Document Bucket API v1beta2 spec

Signed-off-by: Hidde Beydals <hello@hidde.co>
This commit is contained in:
Hidde Beydals 2022-02-03 23:47:42 +01:00
parent ccadce6d16
commit 5832296ef5
14 changed files with 1083 additions and 229 deletions

View File

@ -23,32 +23,31 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
// Artifact represents the output of a Source synchronisation. // Artifact represents the output of a Source reconciliation.
type Artifact struct { type Artifact struct {
// Path is the relative file path of this Artifact. // Path is the relative file path of the Artifact. It can be used to locate
// It can be used to locate the Artifact file in the root of the Artifact // the file in the root of the Artifact storage on the local file system of
// storage on the local file system of the controller managing the Source. // the controller managing the Source.
// +required // +required
Path string `json:"path"` Path string `json:"path"`
// URL is the HTTP address of this artifact. // URL is the HTTP address of the Artifact as exposed by the controller
// It is used by the consumers of the artifacts to fetch and use the // managing the Source. It can be used to retrieve the Artifact for
// artifacts. It is expected to be resolvable from within the cluster. // consumption, e.g. by another controller applying the Artifact contents.
// +required // +required
URL string `json:"url"` URL string `json:"url"`
// Revision is a human readable identifier traceable in the origin source // Revision is a human-readable identifier traceable in the origin source
// system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm // system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
// chart version, etc.
// +optional // +optional
Revision string `json:"revision"` Revision string `json:"revision"`
// Checksum is the SHA256 checksum of the artifact. // Checksum is the SHA256 checksum of the Artifact file.
// +optional // +optional
Checksum string `json:"checksum"` Checksum string `json:"checksum"`
// LastUpdateTime is the timestamp corresponding to the last update of this // LastUpdateTime is the timestamp corresponding to the last update of the
// artifact. // Artifact.
// +required // +required
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
@ -67,14 +66,14 @@ func (in *Artifact) HasRevision(revision string) bool {
} }
// ArtifactDir returns the artifact dir path in the form of // ArtifactDir returns the artifact dir path in the form of
// <source-kind>/<source-namespace>/<source-name>. // '<kind>/<namespace>/<name>'.
func ArtifactDir(kind, namespace, name string) string { func ArtifactDir(kind, namespace, name string) string {
kind = strings.ToLower(kind) kind = strings.ToLower(kind)
return path.Join(kind, namespace, name) return path.Join(kind, namespace, name)
} }
// ArtifactPath returns the artifact path in the form of // ArtifactPath returns the artifact path in the form of
// <source-kind>/<source-namespace>/<source-name>/<artifact-filename>. // '<kind>/<namespace>/name>/<filename>'.
func ArtifactPath(kind, namespace, name, filename string) string { func ArtifactPath(kind, namespace, name, filename string) string {
return path.Join(ArtifactDir(kind, namespace, name), filename) return path.Join(ArtifactDir(kind, namespace, name), filename)
} }

View File

@ -31,46 +31,57 @@ const (
) )
const ( const (
// GenericBucketProvider for any S3 API compatible storage Bucket.
GenericBucketProvider string = "generic" GenericBucketProvider string = "generic"
// AmazonBucketProvider for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service.
AmazonBucketProvider string = "aws" AmazonBucketProvider string = "aws"
// GoogleBucketProvider for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity.
GoogleBucketProvider string = "gcp" GoogleBucketProvider string = "gcp"
// AzureBucketProvider for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key.
AzureBucketProvider string = "azure" AzureBucketProvider string = "azure"
) )
// BucketSpec defines the desired state of an S3 compatible bucket // BucketSpec specifies the required configuration to produce an Artifact for
// an object storage bucket.
type BucketSpec struct { type BucketSpec struct {
// The S3 compatible storage provider name, default ('generic'). // Provider of the object storage bucket.
// Defaults to 'generic', which expects an S3 (API) compatible object
// storage.
// +kubebuilder:validation:Enum=generic;aws;gcp;azure // +kubebuilder:validation:Enum=generic;aws;gcp;azure
// +kubebuilder:default:=generic // +kubebuilder:default:=generic
// +optional // +optional
Provider string `json:"provider,omitempty"` Provider string `json:"provider,omitempty"`
// The bucket name. // BucketName is the name of the object storage bucket.
// +required // +required
BucketName string `json:"bucketName"` BucketName string `json:"bucketName"`
// The bucket endpoint address. // Endpoint is the object storage address the BucketName is located at.
// +required // +required
Endpoint string `json:"endpoint"` Endpoint string `json:"endpoint"`
// Insecure allows connecting to a non-TLS S3 HTTP endpoint. // Insecure allows connecting to a non-TLS HTTP Endpoint.
// +optional // +optional
Insecure bool `json:"insecure,omitempty"` Insecure bool `json:"insecure,omitempty"`
// The bucket region. // Region of the Endpoint where the BucketName is located in.
// +optional // +optional
Region string `json:"region,omitempty"` Region string `json:"region,omitempty"`
// The name of the secret containing authentication credentials // SecretRef specifies the Secret containing authentication credentials
// for the Bucket. // for the Bucket.
// +optional // +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// The interval at which to check for bucket updates. // Interval at which to check the Endpoint for updates.
// +required // +required
Interval metav1.Duration `json:"interval"` Interval metav1.Duration `json:"interval"`
// The timeout for fetch operations, defaults to 60s. // Timeout for fetch operations, defaults to 60s.
// +kubebuilder:default="60s" // +kubebuilder:default="60s"
// +optional // +optional
Timeout *metav1.Duration `json:"timeout,omitempty"` Timeout *metav1.Duration `json:"timeout,omitempty"`
@ -81,18 +92,21 @@ type BucketSpec struct {
// +optional // +optional
Ignore *string `json:"ignore,omitempty"` Ignore *string `json:"ignore,omitempty"`
// This flag tells the controller to suspend the reconciliation of this source. // Suspend tells the controller to suspend the reconciliation of this
// Bucket.
// +optional // +optional
Suspend bool `json:"suspend,omitempty"` Suspend bool `json:"suspend,omitempty"`
// AccessFrom defines an Access Control List for allowing cross-namespace references to this object. // AccessFrom specifies an Access Control List for allowing cross-namespace
// references to this object.
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional // +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
} }
// BucketStatus defines the observed state of a bucket // BucketStatus records the observed state of a Bucket.
type BucketStatus struct { type BucketStatus struct {
// ObservedGeneration is the last observed generation. // ObservedGeneration is the last observed generation of the Bucket object.
// +optional // +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"` ObservedGeneration int64 `json:"observedGeneration,omitempty"`
@ -100,11 +114,13 @@ type BucketStatus struct {
// +optional // +optional
Conditions []metav1.Condition `json:"conditions,omitempty"` Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the fetch link for the artifact output of the last Bucket sync. // URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// BucketStatus.Artifact data is recommended.
// +optional // +optional
URL string `json:"url,omitempty"` URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful Bucket sync. // Artifact represents the last successful Bucket reconciliation.
// +optional // +optional
Artifact *Artifact `json:"artifact,omitempty"` Artifact *Artifact `json:"artifact,omitempty"`
@ -112,12 +128,12 @@ type BucketStatus struct {
} }
const ( const (
// BucketOperationSucceededReason represents the fact that the bucket listing and // BucketOperationSucceededReason signals that the Bucket listing and fetch
// fetch operations succeeded. // operations succeeded.
BucketOperationSucceededReason string = "BucketOperationSucceeded" BucketOperationSucceededReason string = "BucketOperationSucceeded"
// BucketOperationFailedReason represents the fact that the bucket listing or // BucketOperationFailedReason signals that the Bucket listing or fetch
// fetch operations failed. // operations failed.
BucketOperationFailedReason string = "BucketOperationFailed" BucketOperationFailedReason string = "BucketOperationFailed"
) )
@ -136,23 +152,11 @@ func (in Bucket) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration return in.Spec.Interval.Duration
} }
// GetInterval returns the interval at which the source is reconciled.
// Deprecated: use GetRequeueAfter instead.
func (in Bucket) GetInterval() metav1.Duration {
return in.Spec.Interval
}
// GetArtifact returns the latest artifact from the source if present in the status sub-resource. // GetArtifact returns the latest artifact from the source if present in the status sub-resource.
func (in *Bucket) GetArtifact() *Artifact { func (in *Bucket) GetArtifact() *Artifact {
return in.Status.Artifact return in.Status.Artifact
} }
// GetStatusConditions returns a pointer to the Status.Conditions slice.
// Deprecated: use GetConditions instead.
func (in *Bucket) GetStatusConditions() *[]metav1.Condition {
return &in.Status.Conditions
}
// +genclient // +genclient
// +genclient:Namespaced // +genclient:Namespaced
// +kubebuilder:storageversion // +kubebuilder:storageversion
@ -163,7 +167,7 @@ func (in *Bucket) GetStatusConditions() *[]metav1.Condition {
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// Bucket is the Schema for the buckets API // Bucket is the Schema for the buckets API.
type Bucket struct { type Bucket struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"` metav1.ObjectMeta `json:"metadata,omitempty"`
@ -173,9 +177,8 @@ type Bucket struct {
Status BucketStatus `json:"status,omitempty"` Status BucketStatus `json:"status,omitempty"`
} }
// BucketList contains a list of Bucket objects.
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// BucketList contains a list of Bucket
type BucketList struct { type BucketList struct {
metav1.TypeMeta `json:",inline"` metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"` metav1.ListMeta `json:"metadata,omitempty"`

View File

@ -19,33 +19,41 @@ package v1beta2
const SourceFinalizer = "finalizers.fluxcd.io" const SourceFinalizer = "finalizers.fluxcd.io"
const ( const (
// ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. // ArtifactOutdatedCondition indicates the current Artifact of the Source
// This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. // is outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
ArtifactOutdatedCondition string = "ArtifactOutdated" ArtifactOutdatedCondition string = "ArtifactOutdated"
// SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check // SourceVerifiedCondition indicates the integrity of the Source has been
// succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. // verified. If True, the integrity check succeeded. If False, it failed.
// The Condition is only present on the resource if the integrity has been
// verified.
SourceVerifiedCondition string = "SourceVerified" SourceVerifiedCondition string = "SourceVerified"
// FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. // FetchFailedCondition indicates a transient or persistent fetch failure
// If True, observations on the upstream Source revision may be impossible, and the Artifact available for the // of an upstream Source.
// Source may be outdated. // If True, observations on the upstream Source revision may be impossible,
// This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. // and the Artifact available for the Source may be outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
FetchFailedCondition string = "FetchFailed" FetchFailedCondition string = "FetchFailed"
// BuildFailedCondition indicates a transient or persistent build failure of a Source's Artifact. // BuildFailedCondition indicates a transient or persistent build failure
// If True, the Source can be in an ArtifactOutdatedCondition // of a Source's Artifact.
// If True, the Source can be in an ArtifactOutdatedCondition.
BuildFailedCondition string = "BuildFailed" BuildFailedCondition string = "BuildFailed"
) )
const ( const (
// URLInvalidReason represents the fact that a given source has an invalid URL. // URLInvalidReason signals that a given Source has an invalid URL.
URLInvalidReason string = "URLInvalid" URLInvalidReason string = "URLInvalid"
// StorageOperationFailedReason signals a failure caused by a storage operation. // StorageOperationFailedReason signals a failure caused by a storage
// operation.
StorageOperationFailedReason string = "StorageOperationFailed" StorageOperationFailedReason string = "StorageOperationFailed"
// AuthenticationFailedReason represents the fact that a given secret does not // AuthenticationFailedReason signals that a Secret does not have the
// have the required fields or the provided credentials do not match. // required fields, or the provided credentials do not match.
AuthenticationFailedReason string = "AuthenticationFailed" AuthenticationFailedReason string = "AuthenticationFailed"
) )

View File

@ -19,26 +19,27 @@ package v1beta2
import ( import (
"time" "time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
) )
const ( const (
// SourceIndexKey is the key used for indexing resources // SourceIndexKey is the key used for indexing objects based on their
// resources based on their Source. // referenced Source.
SourceIndexKey string = ".metadata.source" SourceIndexKey string = ".metadata.source"
) )
// Source interface must be supported by all API types. // Source interface must be supported by all API types.
// Source is the interface that provides generic access to the Artifact and
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group.
//
// +k8s:deepcopy-gen=false // +k8s:deepcopy-gen=false
type Source interface { type Source interface {
runtime.Object runtime.Object
// GetRequeueAfter returns the duration after which the source must be reconciled again. // GetRequeueAfter returns the duration after which the source must be
// reconciled again.
GetRequeueAfter() time.Duration GetRequeueAfter() time.Duration
// GetArtifact returns the latest artifact from the source if present in the // GetArtifact returns the latest artifact from the source if present in
// status sub-resource. // the status sub-resource.
GetArtifact() *Artifact GetArtifact() *Artifact
// GetInterval returns the interval at which the source is updated.
// Deprecated: use GetRequeueAfter instead.
GetInterval() metav1.Duration
} }

View File

@ -266,7 +266,7 @@ spec:
name: v1beta2 name: v1beta2
schema: schema:
openAPIV3Schema: openAPIV3Schema:
description: Bucket is the Schema for the buckets API description: Bucket is the Schema for the buckets API.
properties: properties:
apiVersion: apiVersion:
description: 'APIVersion defines the versioned schema of this representation description: 'APIVersion defines the versioned schema of this representation
@ -281,12 +281,13 @@ spec:
metadata: metadata:
type: object type: object
spec: spec:
description: BucketSpec defines the desired state of an S3 compatible description: BucketSpec specifies the required configuration to produce
bucket an Artifact for an object storage bucket.
properties: properties:
accessFrom: accessFrom:
description: AccessFrom defines an Access Control List for allowing description: 'AccessFrom specifies an Access Control List for allowing
cross-namespace references to this object. cross-namespace references to this object. NOTE: Not implemented,
provisional as of https://github.com/fluxcd/flux2/pull/2092'
properties: properties:
namespaceSelectors: namespaceSelectors:
description: NamespaceSelectors is the list of namespace selectors description: NamespaceSelectors is the list of namespace selectors
@ -312,10 +313,11 @@ spec:
- namespaceSelectors - namespaceSelectors
type: object type: object
bucketName: bucketName:
description: The bucket name. description: BucketName is the name of the object storage bucket.
type: string type: string
endpoint: endpoint:
description: The bucket endpoint address. description: Endpoint is the object storage address the BucketName
is located at.
type: string type: string
ignore: ignore:
description: Ignore overrides the set of excluded patterns in the description: Ignore overrides the set of excluded patterns in the
@ -324,14 +326,15 @@ spec:
to find out what those are. to find out what those are.
type: string type: string
insecure: insecure:
description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. description: Insecure allows connecting to a non-TLS HTTP Endpoint.
type: boolean type: boolean
interval: interval:
description: The interval at which to check for bucket updates. description: Interval at which to check the Endpoint for updates.
type: string type: string
provider: provider:
default: generic default: generic
description: The S3 compatible storage provider name, default ('generic'). description: Provider of the object storage bucket. Defaults to 'generic',
which expects an S3 (API) compatible object storage.
enum: enum:
- generic - generic
- aws - aws
@ -339,11 +342,12 @@ spec:
- azure - azure
type: string type: string
region: region:
description: The bucket region. description: Region of the Endpoint where the BucketName is located
in.
type: string type: string
secretRef: secretRef:
description: The name of the secret containing authentication credentials description: SecretRef specifies the Secret containing authentication
for the Bucket. credentials for the Bucket.
properties: properties:
name: name:
description: Name of the referent. description: Name of the referent.
@ -352,12 +356,12 @@ spec:
- name - name
type: object type: object
suspend: suspend:
description: This flag tells the controller to suspend the reconciliation description: Suspend tells the controller to suspend the reconciliation
of this source. of this Bucket.
type: boolean type: boolean
timeout: timeout:
default: 60s default: 60s
description: The timeout for fetch operations, defaults to 60s. description: Timeout for fetch operations, defaults to 60s.
type: string type: string
required: required:
- bucketName - bucketName
@ -367,39 +371,38 @@ spec:
status: status:
default: default:
observedGeneration: -1 observedGeneration: -1
description: BucketStatus defines the observed state of a bucket description: BucketStatus records the observed state of a Bucket.
properties: properties:
artifact: artifact:
description: Artifact represents the output of the last successful description: Artifact represents the last successful Bucket reconciliation.
Bucket sync.
properties: properties:
checksum: checksum:
description: Checksum is the SHA256 checksum of the artifact. description: Checksum is the SHA256 checksum of the Artifact file.
type: string type: string
lastUpdateTime: lastUpdateTime:
description: LastUpdateTime is the timestamp corresponding to description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact. the last update of the Artifact.
format: date-time format: date-time
type: string type: string
path: path:
description: Path is the relative file path of this Artifact. description: Path is the relative file path of the Artifact. It
It can be used to locate the Artifact file in the root of the can be used to locate the file in the root of the Artifact storage
Artifact storage on the local file system of the controller on the local file system of the controller managing the Source.
managing the Source.
type: string type: string
revision: revision:
description: Revision is a human readable identifier traceable description: Revision is a human-readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc. tag, a Helm chart version, etc.
type: string type: string
size: size:
description: Size is the number of bytes in the file. description: Size is the number of bytes in the file.
format: int64 format: int64
type: integer type: integer
url: url:
description: URL is the HTTP address of this artifact. It is used description: URL is the HTTP address of the Artifact as exposed
by the consumers of the artifacts to fetch and use the artifacts. by the controller managing the Source. It can be used to retrieve
It is expected to be resolvable from within the cluster. the Artifact for consumption, e.g. by another controller applying
the Artifact contents.
type: string type: string
required: required:
- path - path
@ -481,12 +484,14 @@ spec:
be detected. be detected.
type: string type: string
observedGeneration: observedGeneration:
description: ObservedGeneration is the last observed generation. description: ObservedGeneration is the last observed generation of
the Bucket object.
format: int64 format: int64
type: integer type: integer
url: url:
description: URL is the fetch link for the artifact output of the description: URL is the dynamic fetch link for the latest Artifact.
last Bucket sync. It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact
data is recommended.
type: string type: string
type: object type: object
type: object type: object

View File

@ -541,32 +541,32 @@ spec:
repository sync. repository sync.
properties: properties:
checksum: checksum:
description: Checksum is the SHA256 checksum of the artifact. description: Checksum is the SHA256 checksum of the Artifact file.
type: string type: string
lastUpdateTime: lastUpdateTime:
description: LastUpdateTime is the timestamp corresponding to description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact. the last update of the Artifact.
format: date-time format: date-time
type: string type: string
path: path:
description: Path is the relative file path of this Artifact. description: Path is the relative file path of the Artifact. It
It can be used to locate the Artifact file in the root of the can be used to locate the file in the root of the Artifact storage
Artifact storage on the local file system of the controller on the local file system of the controller managing the Source.
managing the Source.
type: string type: string
revision: revision:
description: Revision is a human readable identifier traceable description: Revision is a human-readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc. tag, a Helm chart version, etc.
type: string type: string
size: size:
description: Size is the number of bytes in the file. description: Size is the number of bytes in the file.
format: int64 format: int64
type: integer type: integer
url: url:
description: URL is the HTTP address of this artifact. It is used description: URL is the HTTP address of the Artifact as exposed
by the consumers of the artifacts to fetch and use the artifacts. by the controller managing the Source. It can be used to retrieve
It is expected to be resolvable from within the cluster. the Artifact for consumption, e.g. by another controller applying
the Artifact contents.
type: string type: string
required: required:
- path - path
@ -646,36 +646,37 @@ spec:
description: IncludedArtifacts represents the included artifacts from description: IncludedArtifacts represents the included artifacts from
the last successful repository sync. the last successful repository sync.
items: items:
description: Artifact represents the output of a Source synchronisation. description: Artifact represents the output of a Source reconciliation.
properties: properties:
checksum: checksum:
description: Checksum is the SHA256 checksum of the artifact. description: Checksum is the SHA256 checksum of the Artifact
file.
type: string type: string
lastUpdateTime: lastUpdateTime:
description: LastUpdateTime is the timestamp corresponding to description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact. the last update of the Artifact.
format: date-time format: date-time
type: string type: string
path: path:
description: Path is the relative file path of this Artifact. description: Path is the relative file path of the Artifact.
It can be used to locate the Artifact file in the root of It can be used to locate the file in the root of the Artifact
the Artifact storage on the local file system of the controller storage on the local file system of the controller managing
managing the Source. the Source.
type: string type: string
revision: revision:
description: Revision is a human readable identifier traceable description: Revision is a human-readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc. tag, a Helm chart version, etc.
type: string type: string
size: size:
description: Size is the number of bytes in the file. description: Size is the number of bytes in the file.
format: int64 format: int64
type: integer type: integer
url: url:
description: URL is the HTTP address of this artifact. It is description: URL is the HTTP address of the Artifact as exposed
used by the consumers of the artifacts to fetch and use the by the controller managing the Source. It can be used to retrieve
artifacts. It is expected to be resolvable from within the the Artifact for consumption, e.g. by another controller applying
cluster. the Artifact contents.
type: string type: string
required: required:
- path - path

View File

@ -420,32 +420,32 @@ spec:
chart sync. chart sync.
properties: properties:
checksum: checksum:
description: Checksum is the SHA256 checksum of the artifact. description: Checksum is the SHA256 checksum of the Artifact file.
type: string type: string
lastUpdateTime: lastUpdateTime:
description: LastUpdateTime is the timestamp corresponding to description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact. the last update of the Artifact.
format: date-time format: date-time
type: string type: string
path: path:
description: Path is the relative file path of this Artifact. description: Path is the relative file path of the Artifact. It
It can be used to locate the Artifact file in the root of the can be used to locate the file in the root of the Artifact storage
Artifact storage on the local file system of the controller on the local file system of the controller managing the Source.
managing the Source.
type: string type: string
revision: revision:
description: Revision is a human readable identifier traceable description: Revision is a human-readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc. tag, a Helm chart version, etc.
type: string type: string
size: size:
description: Size is the number of bytes in the file. description: Size is the number of bytes in the file.
format: int64 format: int64
type: integer type: integer
url: url:
description: URL is the HTTP address of this artifact. It is used description: URL is the HTTP address of the Artifact as exposed
by the consumers of the artifacts to fetch and use the artifacts. by the controller managing the Source. It can be used to retrieve
It is expected to be resolvable from within the cluster. the Artifact for consumption, e.g. by another controller applying
the Artifact contents.
type: string type: string
required: required:
- path - path

View File

@ -346,32 +346,32 @@ spec:
repository sync. repository sync.
properties: properties:
checksum: checksum:
description: Checksum is the SHA256 checksum of the artifact. description: Checksum is the SHA256 checksum of the Artifact file.
type: string type: string
lastUpdateTime: lastUpdateTime:
description: LastUpdateTime is the timestamp corresponding to description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact. the last update of the Artifact.
format: date-time format: date-time
type: string type: string
path: path:
description: Path is the relative file path of this Artifact. description: Path is the relative file path of the Artifact. It
It can be used to locate the Artifact file in the root of the can be used to locate the file in the root of the Artifact storage
Artifact storage on the local file system of the controller on the local file system of the controller managing the Source.
managing the Source.
type: string type: string
revision: revision:
description: Revision is a human readable identifier traceable description: Revision is a human-readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc. tag, a Helm chart version, etc.
type: string type: string
size: size:
description: Size is the number of bytes in the file. description: Size is the number of bytes in the file.
format: int64 format: int64
type: integer type: integer
url: url:
description: URL is the HTTP address of this artifact. It is used description: URL is the HTTP address of the Artifact as exposed
by the consumers of the artifacts to fetch and use the artifacts. by the controller managing the Source. It can be used to retrieve
It is expected to be resolvable from within the cluster. the Artifact for consumption, e.g. by another controller applying
the Artifact contents.
type: string type: string
required: required:
- path - path

View File

@ -69,9 +69,9 @@ import (
// -> s > 100 // -> s > 100
const maxConcurrentBucketFetches = 100 const maxConcurrentBucketFetches = 100
// bucketReadyConditions contains all the conditions information needed // bucketReadyCondition contains the information required to summarize a
// for Bucket Ready status conditions summary calculation. // v1beta2.Bucket Ready Condition.
var bucketReadyConditions = summarize.Conditions{ var bucketReadyCondition = summarize.Conditions{
Target: meta.ReadyCondition, Target: meta.ReadyCondition,
Owned: []string{ Owned: []string{
sourcev1.ArtifactOutdatedCondition, sourcev1.ArtifactOutdatedCondition,
@ -99,7 +99,7 @@ var bucketReadyConditions = summarize.Conditions{
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch
// BucketReconciler reconciles a Bucket object // BucketReconciler reconciles a v1beta2.Bucket object.
type BucketReconciler struct { type BucketReconciler struct {
client.Client client.Client
kuberecorder.EventRecorder kuberecorder.EventRecorder
@ -135,9 +135,10 @@ type BucketProvider interface {
Close(context.Context) Close(context.Context)
} }
// bucketReconcilerFunc is the function type for all the bucket reconciler // bucketReconcileFunc is the function type for all the v1beta2.Bucket
// functions. // (sub)reconcile functions. The type implementations are grouped and
type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) // executed serially to perform the complete reconcile of the object.
type bucketReconcileFunc func(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error)
// etagIndex is an index of storage object keys and their Etag values. // etagIndex is an index of storage object keys and their Etag values.
type etagIndex struct { type etagIndex struct {
@ -260,7 +261,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
defer func() { defer func() {
summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
summarizeOpts := []summarize.Option{ summarizeOpts := []summarize.Option{
summarize.WithConditions(bucketReadyConditions), summarize.WithConditions(bucketReadyCondition),
summarize.WithReconcileResult(recResult), summarize.WithReconcileResult(recResult),
summarize.WithReconcileError(retErr), summarize.WithReconcileError(retErr),
summarize.WithIgnoreNotFound(), summarize.WithIgnoreNotFound(),
@ -268,7 +269,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
summarize.RecordContextualError, summarize.RecordContextualError,
summarize.RecordReconcileReq, summarize.RecordReconcileReq,
), ),
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
summarize.WithPatchFieldOwner(r.ControllerName), summarize.WithPatchFieldOwner(r.ControllerName),
} }
result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
@ -292,7 +293,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
} }
// Reconcile actual object // Reconcile actual object
reconcilers := []bucketReconcilerFunc{ reconcilers := []bucketReconcileFunc{
r.reconcileStorage, r.reconcileStorage,
r.reconcileSource, r.reconcileSource,
r.reconcileArtifact, r.reconcileArtifact,
@ -301,10 +302,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
return return
} }
// reconcile steps iterates through the actual reconciliation tasks for objec, // reconcile iterates through the gitRepositoryReconcileFunc tasks for the
// it returns early on the first step that returns ResultRequeue or produces an // object. It returns early on the first call that returns
// error. // reconcile.ResultRequeue, or produces an error.
func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) {
if obj.Generation != obj.Status.ObservedGeneration { if obj.Generation != obj.Status.ObservedGeneration {
conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
} }
@ -317,7 +318,11 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket,
Reason: sourcev1.StorageOperationFailedReason, Reason: sourcev1.StorageOperationFailedReason,
} }
} }
defer os.RemoveAll(tmpDir) defer func() {
if err = os.RemoveAll(tmpDir); err != nil {
ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory")
}
}()
// Run the sub-reconcilers and build the result of reconciliation. // Run the sub-reconcilers and build the result of reconciliation.
var ( var (
@ -345,11 +350,17 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket,
return res, resErr return res, resErr
} }
// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. // reconcileStorage ensures the current state of the storage matches the
// desired and previously observed state.
// //
// All artifacts for the resource except for the current one are garbage collected from the storage. // All Artifacts for the object except for the current one in the Status are
// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. // garbage collected from the Storage.
// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. // If the Artifact in the Status of the object disappeared from the Storage,
// it is removed from the object.
// If the object does not have an Artifact in its Status, a Reconciling
// condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime.
func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, _ *etagIndex, _ string) (sreconcile.Result, error) { func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, _ *etagIndex, _ string) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage // Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj) _ = r.garbageCollect(ctx, obj)
@ -374,10 +385,11 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // reconcileSource fetches the upstream bucket contents with the client for the
// result. // given object's Provider, and returns the result.
// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret // When a SecretRef is defined, it attempts to fetch the Secret before calling
// fails, it records v1beta1.FetchFailedCondition=True and returns early. // the provider. If this fails, it records v1beta2.FetchFailedCondition=True on
// the object and returns early.
func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) { func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) {
secret, err := r.getBucketSecret(ctx, obj) secret, err := r.getBucketSecret(ctx, obj)
if err != nil { if err != nil {
@ -470,13 +482,15 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the // reconcileArtifact archives a new Artifact to the Storage, if the current
// given data. // (Status) data on the object does not match the given.
// //
// The inspection of the given data to the object is differed, ensuring any stale observations as // The inspection of the given data to the object is differed, ensuring any
// If the given artifact does not differ from the object's current, it returns early. // stale observations like v1beta2.ArtifactOutdatedCondition are removed.
// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is // If the given Artifact does not differ from the object's current, it returns
// updated to its path. // early.
// On a successful archive, the Artifact in the Status of the object is set,
// and the symlink in the Storage is updated to its path.
func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) { func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) {
// Calculate revision // Calculate revision
revision, err := index.Revision() revision, err := index.Revision()
@ -561,8 +575,9 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the // reconcileDelete handles the deletion of the object.
// artifact storage, if successful, the finalizer is removed from the object. // It first garbage collects all Artifacts for the object from the Storage.
// Removing the finalizer from the object if successful.
func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) {
// Garbage collect the resource's artifacts // Garbage collect the resource's artifacts
if err := r.garbageCollect(ctx, obj); err != nil { if err := r.garbageCollect(ctx, obj); err != nil {
@ -577,9 +592,11 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu
return sreconcile.ResultEmpty, nil return sreconcile.ResultEmpty, nil
} }
// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current // garbageCollect performs a garbage collection for the given object.
// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the //
// resource. // It removes all but the current Artifact from the Storage, unless the
// deletion timestamp on the object is set. Which will result in the
// removal of all Artifacts for the objects.
func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error {
if !obj.DeletionTimestamp.IsZero() { if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
@ -626,6 +643,10 @@ func (r *BucketReconciler) getBucketSecret(ctx context.Context, obj *sourcev1.Bu
} }
// eventLogf records event and logs at the same time. // eventLogf records event and logs at the same time.
//
// This log is different from the debug log in the EventRecorder, in the sense
// that this is a simple log. While the debug log contains complete details
// about the event.
func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) {
r.annotatedEventLogf(ctx, obj, nil, eventType, reason, messageFmt, args...) r.annotatedEventLogf(ctx, obj, nil, eventType, reason, messageFmt, args...)
} }

View File

@ -124,7 +124,7 @@ func TestBucketReconciler_Reconcile(t *testing.T) {
}, timeout).Should(BeTrue()) }, timeout).Should(BeTrue())
// Check if the object status is valid. // Check if the object status is valid.
condns := &status.Conditions{NegativePolarity: bucketReadyConditions.NegativePolarity} condns := &status.Conditions{NegativePolarity: bucketReadyCondition.NegativePolarity}
checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns)
checker.CheckErr(ctx, obj) checker.CheckErr(ctx, obj)

View File

@ -19,7 +19,7 @@ Resource Types:
</li></ul> </li></ul>
<h3 id="source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket <h3 id="source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket
</h3> </h3>
<p>Bucket is the Schema for the buckets API</p> <p>Bucket is the Schema for the buckets API.</p>
<div class="md-typeset__scrollwrap"> <div class="md-typeset__scrollwrap">
<div class="md-typeset__table"> <div class="md-typeset__table">
<table> <table>
@ -83,7 +83,9 @@ string
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The S3 compatible storage provider name, default (&lsquo;generic&rsquo;).</p> <p>Provider of the object storage bucket.
Defaults to &lsquo;generic&rsquo;, which expects an S3 (API) compatible object
storage.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -94,7 +96,7 @@ string
</em> </em>
</td> </td>
<td> <td>
<p>The bucket name.</p> <p>BucketName is the name of the object storage bucket.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -105,7 +107,7 @@ string
</em> </em>
</td> </td>
<td> <td>
<p>The bucket endpoint address.</p> <p>Endpoint is the object storage address the BucketName is located at.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -117,7 +119,7 @@ bool
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>Insecure allows connecting to a non-TLS S3 HTTP endpoint.</p> <p>Insecure allows connecting to a non-TLS HTTP Endpoint.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -129,7 +131,7 @@ string
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The bucket region.</p> <p>Region of the Endpoint where the BucketName is located in.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -143,7 +145,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The name of the secret containing authentication credentials <p>SecretRef specifies the Secret containing authentication credentials
for the Bucket.</p> for the Bucket.</p>
</td> </td>
</tr> </tr>
@ -157,7 +159,7 @@ Kubernetes meta/v1.Duration
</em> </em>
</td> </td>
<td> <td>
<p>The interval at which to check for bucket updates.</p> <p>Interval at which to check the Endpoint for updates.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -171,7 +173,7 @@ Kubernetes meta/v1.Duration
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The timeout for fetch operations, defaults to 60s.</p> <p>Timeout for fetch operations, defaults to 60s.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -197,7 +199,8 @@ bool
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>This flag tells the controller to suspend the reconciliation of this source.</p> <p>Suspend tells the controller to suspend the reconciliation of this
Bucket.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -211,7 +214,9 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>AccessFrom defines an Access Control List for allowing cross-namespace references to this object.</p> <p>AccessFrom specifies an Access Control List for allowing cross-namespace
references to this object.
NOTE: Not implemented, provisional as of <a href="https://github.com/fluxcd/flux2/pull/2092">https://github.com/fluxcd/flux2/pull/2092</a></p>
</td> </td>
</tr> </tr>
</table> </table>
@ -854,7 +859,7 @@ HelmRepositoryStatus
<a href="#source.toolkit.fluxcd.io/v1beta2.HelmChartStatus">HelmChartStatus</a>, <a href="#source.toolkit.fluxcd.io/v1beta2.HelmChartStatus">HelmChartStatus</a>,
<a href="#source.toolkit.fluxcd.io/v1beta2.HelmRepositoryStatus">HelmRepositoryStatus</a>) <a href="#source.toolkit.fluxcd.io/v1beta2.HelmRepositoryStatus">HelmRepositoryStatus</a>)
</p> </p>
<p>Artifact represents the output of a Source synchronisation.</p> <p>Artifact represents the output of a Source reconciliation.</p>
<div class="md-typeset__scrollwrap"> <div class="md-typeset__scrollwrap">
<div class="md-typeset__table"> <div class="md-typeset__table">
<table> <table>
@ -873,9 +878,9 @@ string
</em> </em>
</td> </td>
<td> <td>
<p>Path is the relative file path of this Artifact. <p>Path is the relative file path of the Artifact. It can be used to locate
It can be used to locate the Artifact file in the root of the Artifact the file in the root of the Artifact storage on the local file system of
storage on the local file system of the controller managing the Source.</p> the controller managing the Source.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -886,9 +891,9 @@ string
</em> </em>
</td> </td>
<td> <td>
<p>URL is the HTTP address of this artifact. <p>URL is the HTTP address of the Artifact as exposed by the controller
It is used by the consumers of the artifacts to fetch and use the managing the Source. It can be used to retrieve the Artifact for
artifacts. It is expected to be resolvable from within the cluster.</p> consumption, e.g. by another controller applying the Artifact contents.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -900,9 +905,8 @@ string
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>Revision is a human readable identifier traceable in the origin source <p>Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.</p>
chart version, etc.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -914,7 +918,7 @@ string
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>Checksum is the SHA256 checksum of the artifact.</p> <p>Checksum is the SHA256 checksum of the Artifact file.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -927,8 +931,8 @@ Kubernetes meta/v1.Time
</em> </em>
</td> </td>
<td> <td>
<p>LastUpdateTime is the timestamp corresponding to the last update of this <p>LastUpdateTime is the timestamp corresponding to the last update of the
artifact.</p> Artifact.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -953,7 +957,8 @@ int64
(<em>Appears on:</em> (<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket</a>) <a href="#source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket</a>)
</p> </p>
<p>BucketSpec defines the desired state of an S3 compatible bucket</p> <p>BucketSpec specifies the required configuration to produce an Artifact for
an object storage bucket.</p>
<div class="md-typeset__scrollwrap"> <div class="md-typeset__scrollwrap">
<div class="md-typeset__table"> <div class="md-typeset__table">
<table> <table>
@ -973,7 +978,9 @@ string
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The S3 compatible storage provider name, default (&lsquo;generic&rsquo;).</p> <p>Provider of the object storage bucket.
Defaults to &lsquo;generic&rsquo;, which expects an S3 (API) compatible object
storage.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -984,7 +991,7 @@ string
</em> </em>
</td> </td>
<td> <td>
<p>The bucket name.</p> <p>BucketName is the name of the object storage bucket.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -995,7 +1002,7 @@ string
</em> </em>
</td> </td>
<td> <td>
<p>The bucket endpoint address.</p> <p>Endpoint is the object storage address the BucketName is located at.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1007,7 +1014,7 @@ bool
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>Insecure allows connecting to a non-TLS S3 HTTP endpoint.</p> <p>Insecure allows connecting to a non-TLS HTTP Endpoint.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1019,7 +1026,7 @@ string
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The bucket region.</p> <p>Region of the Endpoint where the BucketName is located in.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1033,7 +1040,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The name of the secret containing authentication credentials <p>SecretRef specifies the Secret containing authentication credentials
for the Bucket.</p> for the Bucket.</p>
</td> </td>
</tr> </tr>
@ -1047,7 +1054,7 @@ Kubernetes meta/v1.Duration
</em> </em>
</td> </td>
<td> <td>
<p>The interval at which to check for bucket updates.</p> <p>Interval at which to check the Endpoint for updates.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1061,7 +1068,7 @@ Kubernetes meta/v1.Duration
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>The timeout for fetch operations, defaults to 60s.</p> <p>Timeout for fetch operations, defaults to 60s.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1087,7 +1094,8 @@ bool
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>This flag tells the controller to suspend the reconciliation of this source.</p> <p>Suspend tells the controller to suspend the reconciliation of this
Bucket.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1101,7 +1109,9 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>AccessFrom defines an Access Control List for allowing cross-namespace references to this object.</p> <p>AccessFrom specifies an Access Control List for allowing cross-namespace
references to this object.
NOTE: Not implemented, provisional as of <a href="https://github.com/fluxcd/flux2/pull/2092">https://github.com/fluxcd/flux2/pull/2092</a></p>
</td> </td>
</tr> </tr>
</tbody> </tbody>
@ -1114,7 +1124,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
(<em>Appears on:</em> (<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket</a>) <a href="#source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket</a>)
</p> </p>
<p>BucketStatus defines the observed state of a bucket</p> <p>BucketStatus records the observed state of a Bucket.</p>
<div class="md-typeset__scrollwrap"> <div class="md-typeset__scrollwrap">
<div class="md-typeset__table"> <div class="md-typeset__table">
<table> <table>
@ -1134,7 +1144,7 @@ int64
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>ObservedGeneration is the last observed generation.</p> <p>ObservedGeneration is the last observed generation of the Bucket object.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1160,7 +1170,9 @@ string
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>URL is the fetch link for the artifact output of the last Bucket sync.</p> <p>URL is the dynamic fetch link for the latest Artifact.
It is provided on a &ldquo;best effort&rdquo; basis, and using the precise
BucketStatus.Artifact data is recommended.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -1174,7 +1186,7 @@ Artifact
</td> </td>
<td> <td>
<em>(Optional)</em> <em>(Optional)</em>
<p>Artifact represents the output of the last successful Bucket sync.</p> <p>Artifact represents the last successful Bucket reconciliation.</p>
</td> </td>
</tr> </tr>
<tr> <tr>
@ -2178,7 +2190,10 @@ string
</div> </div>
<h3 id="source.toolkit.fluxcd.io/v1beta2.Source">Source <h3 id="source.toolkit.fluxcd.io/v1beta2.Source">Source
</h3> </h3>
<p>Source interface must be supported by all API types.</p> <p>Source interface must be supported by all API types.
Source is the interface that provides generic access to the Artifact and
interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
API group.</p>
<div class="admonition note"> <div class="admonition note">
<p class="last">This page was automatically generated with <code>gen-crd-api-reference-docs</code></p> <p class="last">This page was automatically generated with <code>gen-crd-api-reference-docs</code></p>
</div> </div>

View File

@ -20,6 +20,7 @@ of the components using them.
## API Specification ## API Specification
* [v1beta2](v1beta2/README.md)
* [v1beta1](v1beta1/README.md) * [v1beta1](v1beta1/README.md)
## Implementation ## Implementation

View File

@ -0,0 +1,20 @@
# source.toolkit.fluxcd.io/v1beta2
This is the v1beta2 API specification for defining the desired state sources of Kubernetes clusters.
## Specification
* Source kinds:
+ GitRepository
+ HelmRepository
+ HelmChart
+ [Bucket](buckets.md)
## Implementation
* [source-controller](https://github.com/fluxcd/source-controller/)
## Consumers
* [kustomize-controller](https://github.com/fluxcd/kustomize-controller/)
* [helm-controller](https://github.com/fluxcd/helm-controller/)

View File

@ -0,0 +1,780 @@
# Buckets
The `Bucket` API defines a Source to produce an Artifact for objects from storage
solutions like Amazon S3, Google Cloud Storage buckets, or any other solution
with a S3 compatible API such as Minio, Alibaba Cloud OSS and others.
## Example
The following is an example of a Bucket. It creates a tarball (`.tar.gz`)
Artifact with the fetched objects from an object storage with an S3
compatible API (e.g. [Minio](https://min.io)):
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: minio-bucket
namespace: default
spec:
interval: 5m0s
endpoint: minio.example.com
insecure: true
secretRef:
name: minio-bucket-secret
bucketName: example
---
apiVersion: v1
kind: Secret
metadata:
name: minio-bucket-secret
namespace: default
type: Opaque
stringData:
accesskey: <access key>
secretkey: <secret key>
```
In the above example:
- A Bucket named `minio-bucket` is created, indicated by the
`.metadata.name` field.
- The source-controller checks the object storage bucket every five minutes,
indicated by the `.spec.interval` field.
- It authenticates to the `minio.example.com` endpoint with
the static credentials from the `minio-secret` Secret data, indicated by
the `.spec.endpoint` and `.spec.secretRef.name` fields.
- A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag)
in the `.spec.bucketName` bucket is compiled, while filtering the keys using
[default ignore rules](#default-exclusions).
- The SHA256 sum of the list is used as Artifact revision, reported
in-cluster in the `.status.artifact.revision` field.
- When the current Bucket revision differs from the latest calculated revision,
all objects are fetched and archived.
- The new Artifact is reported in the `.status.artifact` field.
You can run this example by saving the manifest into `bucket.yaml`, and
changing the Bucket and Secret values to target a Minio instance you have
control over.
**Note:** For more advanced examples targeting e.g. Amazon S3 or GCP, see
[Provider](#provider).
1. Apply the resource on the cluster:
```sh
kubectl apply -f bucket.yaml
```
2. Run `kubectl get buckets` to see the Bucket:
```console
NAME ENDPOINT READY STATUS AGE
minio-bucket minio.example.com True stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' 34s
```
3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact)
and [Conditions](#conditions) in the Bucket's Status:
```console
...
Status:
Artifact:
Checksum: 72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686
Last Update Time: 2022-02-01T23:43:38Z
Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz
Revision: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz
Conditions:
Last Transition Time: 2022-02-01T23:43:38Z
Message: stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
Observed Generation: 1
Reason: Succeeded
Status: True
Type: Ready
Observed Generation: 1
URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal BucketOperationSucceed 43s source-controller downloaded 16 files with revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' from 'my-minio-bucket'
Normal NewArtifact 43s source-controller stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
## Writing a Bucket spec
As with all other Kubernetes config, a Bucket needs `apiVersion`, `kind`, and
`metadata` fields. The name of a Bucket object must be a valid
[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
A Bucket also needs a
[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status).
### Provider
The `.spec.provider` field allows for specifying a Provider to enable provider
specific configurations, for example to communicate with a non-S3 compatible
API endpoint, or to change the authentication method.
Supported options are:
- [Generic](#generic)
- [AWS](#aws)
- [GCP](#gcp)
If you do not specify `.spec.provider`, it defaults to `generic`.
#### Generic
When a Bucket's `spec.provider` is set to `generic`, the controller will
attempt to communicate with the specified [Endpoint](#endpoint) using the
[Minio Client SDK](https://github.com/minio/minio-go), which can communicate
with any Amazon S3 compatible object storage (including
[GCS](https://cloud.google.com/storage/docs/interoperability),
[Wasabi](https://wasabi-support.zendesk.com/hc/en-us/articles/360002079671-How-do-I-use-Minio-Client-with-Wasabi-),
and many others).
The `generic` Provider _requires_ a [Secret reference](#secret-reference) to a
Secret with `.data.accesskey` and `.data.secretkey` values, used to
authenticate with static credentials.
The Provider allows for specifying a region the bucket is in using the
[`.spec.region` field](#region), if required by the [Endpoint](#endpoint).
##### Generic example
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: generic-insecure
namespace: default
spec:
provider: generic
interval: 5m0s
bucketName: podinfo
endpoint: minio.minio.svc.cluster.local:9000
timeout: 60s
insecure: true
secretRef:
name: minio-credentials
---
apiVersion: v1
kind: Secret
metadata:
name: minio-credentials
namespace: default
type: Opaque
data:
accesskey: <BASE64>
secretkey: <BASE64>
```
#### AWS
When a Bucket's `.spec.provider` field is set to `aws`, the source-controller
will attempt to communicate with the specified [Endpoint](#endpoint) using the
[Minio Client SDK](https://github.com/minio/minio-go).
Without a [Secret reference](#secret-reference), authorization using
credentials retrieved from the AWS EC2 service is attempted by default. When
a reference is specified, it expects a Secret with `.data.accesskey` and
`.data.secretkey` values, used to authenticate with static credentials.
The Provider allows for specifying the
[Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions)
using the [`.spec.region` field](#region).
##### AWS EC2 example
**Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for
the source-controller service account that grants access to the bucket.
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: aws
namespace: default
spec:
interval: 5m0s
provider: aws
bucketName: podinfo
endpoint: s3.amazonaws.com
region: us-east-1
timeout: 30s
```
##### AWS IAM role example
Replace `<bucket-name>` with the specified `.spec.bucketName`.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::<bucket-name>/*"
},
{
"Sid": "",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::<bucket-name>"
}
]
}
```
##### AWS static auth example
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: aws
namespace: default
spec:
interval: 5m0s
provider: aws
bucketName: podinfo
endpoint: s3.amazonaws.com
region: us-east-1
secretRef:
name: aws-credentials
---
apiVersion: v1
kind: Secret
metadata:
name: aws-credentials
namespace: default
type: Opaque
data:
accesskey: <BASE64>
secretkey: <BASE64>
```
#### GCP
When a Bucket's `.spec.provider` is set to `gcp`, the source-controller will
attempt to communicate with the specified [Endpoint](#endpoint) using the
[Google Client SDK](https://github.com/googleapis/google-api-go-client).
Without a [Secret reference](#secret-reference), authorization using a
workload identity is attempted by default. The workload identity is obtained
using the `GOOGLE_APPLICATION_CREDENTIALS` environment variable, falling back
to the Google Application Credential file in the config directory.
When a reference is specified, it expects a Secret with a `.data.serviceaccount`
value with a GCP service account JSON file.
The Provider allows for specifying the
[Bucket location](https://cloud.google.com/storage/docs/locations) using the
[`.spec.region` field](#region).
##### GCP example
```yaml
---
apiVersion: source.toolkit.fluccd.io/v1beta2
kind: Bucket
metadata:
name: gcp-workload-identity
namespace: default
spec:
interval: 5m0s
provider: gcp
bucketName: podinfo
endpoint: storage.googleapis.com
region: us-east-1
timeout: 30s
```
##### GCP static auth example
```yaml
---
apiVersion: source.toolkit.fluccd.io/v1beta1
kind: Bucket
metadata:
name: gcp-secret
namespace: default
spec:
interval: 5m0s
provider: gcp
bucketName: <bucket-name>
endpoint: storage.googleapis.com
region: <bucket-region>
secretRef:
name: gcp-service-account
---
apiVersion: v1
kind: Secret
metadata:
name: gcp-service-account
namespace: default
type: Opaque
data:
serviceaccount: <BASE64>
```
Where the (base64 decoded) value of `.data.serviceaccount` looks like this:
```json
{
"type": "service_account",
"project_id": "example",
"private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2",
"private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n",
"client_email": "test@example.iam.gserviceaccount.com",
"client_id": "32657634678762536746",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com"
}
```
### Interval
`.spec.interval` is a required field that specifices the interval which the
object storage bucket must be consulted at.
After successfully reconciling a Bucket object, the source-controller requeues
the object for inspection after the specified interval. The value must be in a
[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration),
e.g. `10m0s` to look at the object storage bucket every 10 minutes.
If the `.metadata.generation` of a resource changes (due to e.g. the apply of a
change to the spec), this is handled instantly outside of the interval window.
### Endpoint
`.spec.endpoint` is a required field that specifies the HTTP/S object storage
endpoint to connect to and fetch objects from. Connecting to an (insecure)
HTTP endpoint requires enabling [`.spec.insecure`](#insecure).
Some endpoints require the specification of a [`.spec.region`](#region),
see [Provider](#provider) for more (provider specific) examples.
### Bucket name
`.spec.bucketName` is a required field that specifies which object storage
bucket on the [Endpoint](#endpoint) objects should be fetched from.
See [Provider](#provider) for more (provider specific) examples.
### Region
`.spec.region` is an optional field to specify the region a
[`.spec.bucketName`](#bucket-name) is located in.
See [Provider](#provider) for more (provider specific) examples.
### Insecure
`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP)
[endpoint](#endpoint), if set to `true`. The default value is `false`,
denying insecure (HTTP) connections.
### Timeout
`.spec.timeout` is an optional field to specify a timeout for object storage
fetch operations. The value must be in a
[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration),
e.g. `1m30s` for a timeout of one minute and thirty seconds.
The default value is `60s`.
### Secret reference
`.spec.secretRef.name` is an optional field to specify a name reference to a
Secret in the same namespace as the Bucket, containing authentication
credentials for the object storage. For some `.spec.provider` implementations
the presence of the field is required, see [Provider](#provider) for more
details and examples.
### Ignore
`.spec.ignore` is an optional field to specify rules in [the `.gitignore`
pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Storage
objects which keys match the defined rules are excluded while fetching.
When specified, `.spec.ignore` overrides the [default exclusion
list](#default-exclusions), and may overrule the [`.sourceignore` file
exclusions](#sourceignore-file). See [excluding files](#excluding-files)
for more information.
### Suspend
`.spec.suspend` is an optional field to suspend the reconciliation of a Bucket.
When set to `true`, the controller will stop reconciling the Bucket, and changes
to the resource or in the object storage bucket will not result in a new
Artifact. When the field is set to `false` or removed, it will resume.
For practical information, see
[suspending and resuming](#suspending-and-resuming).
## Working with Buckets
### Excluding files
By default, storage bucket objects which match the [default exclusion
rules](#default-exclusions) are excluded while fetching. It is possible to
overwrite and/or overrule the default exclusions using a file in the bucket
and/or an in-spec set of rules.
#### `.sourceignore` file
Excluding files is possible by adding a `.sourceignore` file in the root of the
object storage bucket. The `.sourceignore` file follows [the `.gitignore`
pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and
pattern entries may overrule [default exclusions](#default-exclusions).
#### Ignore spec
Another option is to define the exclusions within the Bucket spec, using the
[`.spec.ignore` field](#ignore). Specified rules override the
[default exclusion list](#default-exclusions), and may overrule `.sourceignore`
file exclusions.
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: <bucket-name>
spec:
ignore: |
# exclude all
/*
# include deploy dir
!/deploy
# exclude file extensions from deploy dir
/deploy/**/*.md
/deploy/**/*.txt
```
### Triggering a reconcile
To manually tell the source-controller to reconcile a Bucket outside of the
[specified interval window](#interval), a Bucket can be annotated with
`reconcile.fluxcd.io/requestedAt: <arbitrary value>`. Annotating the resource
queues the Bucket for reconciliation if the `<arbitrary-value>` differs from
the last value the controller acted on, as reported in
[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at).
Using `kubectl`:
```sh
kubectl annotate --overwrite bucket/<bucket-name> reconcile.fluxcd.io/requestedAt="$(date +%s)"
```
Using `flux`:
```sh
flux reconcile source bucket <bucket-name>
```
### Waiting for `Ready`
When a change is applied, it is possible to wait for the Bucket to reach a
[ready state](#ready-bucket) using `kubectl`:
```sh
kubectl wait bucket/<bucket-name> --for=condition=ready --timeout=1m
```
### Suspending and resuming
When you find yourself in a situation where you temporarily want to pause the
reconciliation of a Bucket, you can suspend it using the [`.spec.suspend`
field](#suspend).
#### Suspend a Bucket
In your YAML declaration:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: <bucket-name>
spec:
suspend: true
```
Using `kubectl`:
```sh
kubectl patch bucket <bucket-name> -p '{\"spec\": {\"suspend\" : true }}'
```
Using `flux`:
```sh
flux suspend source bucket <bucket-name>
```
**Note:** When a Bucket has an Artifact and is suspended, and this Artifact
later disappears from the storage due to e.g. the source-controller Pod being
evicted from a Node, this will not be reflected in the Bucket's Status until it
is resumed.
#### Resume a Bucket
In your YAML declaration, comment out (or remove) the field:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: <bucket-name>
spec:
# suspend: true
```
**Note:** Setting the field value to `false` has the same effect as removing
it, but does not allow for "hot patching" using e.g. `kubectl` while practicing
GitOps; as the manually applied patch would be overwritten by the declared
state in Git.
Using `kubectl`:
```sh
kubectl patch bucket <bucket-name> -p '{\"spec\" : {\"suspend\" : false }}'
```
Using `flux`:
```sh
flux resume source bucket <bucket-name>
```
### Debugging a Bucket
There are several ways to gather information about a Bucket for debugging
purposes.
#### Describe the Bucket
Describing a Bucket using `kubectl describe bucket <bucket-name>` displays the
latest recorded information for the resource in the `Status` and `Events`
sections:
```console
...
Status:
...
Conditions:
Last Transition Time: 2022-02-02T13:26:55Z
Message: reconciling new generation 2
Observed Generation: 2
Reason: NewGeneration
Status: True
Type: Reconciling
Last Transition Time: 2022-02-02T13:26:55Z
Message: bucket 'my-new-bucket' does not exist
Observed Generation: 2
Reason: BucketOperationFailed
Status: False
Type: Ready
Last Transition Time: 2022-02-02T13:26:55Z
Message: bucket 'my-new-bucket' does not exist
Observed Generation: 2
Reason: BucketOperationFailed
Status: True
Type: FetchFailed
Observed Generation: 1
URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning BucketOperationFailed 37s (x11 over 42s) source-controller bucket 'my-new-bucket' does not exist
```
#### Trace emitted Events
To view events for specific Bucket(s), `kubectl get events` can be used in
combination with `--field-sector` to list the Events for specific objects.
For example, running
```sh
kubectl get events --field-selector involvedObject.kind=Bucket,involvedObject.name=<bucket-name>
```
lists
```console
LAST SEEN TYPE REASON OBJECT MESSAGE
2m30s Normal BucketOperationSucceed bucket/<bucket-name> downloaded 16 files with revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' from 'my-minio-bucket'
2m30s Normal NewArtifact bucket/<bucket-name> stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
18s Warning BucketOperationFailed bucket/<bucket-name> bucket 'my-new-bucket' does not exist
```
Besides being reported in Events, the reconciliation errors are also logged by
the controller. The Flux CLI offer commands for filtering the logs for a
specific Bucket, e.g. `flux logs --level=error --kind=Bucket --name=<bucket-name>`.
## Bucket Status
### Artifact
The Bucket reports the latest synchronized state from the object storage
bucket as an Artifact object in the `.status.artifact` of the resource.
The Artifact file is a gzip compressed TAR archive
(`<calculated revision>.tar.gz`), and can be retrieved in-cluster from the
`.status.artifact.url` HTTP address.
#### Artifact example
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket
metadata:
name: <bucket-name>
status:
artifact:
checksum: cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a
lastUpdateTime: "2022-01-28T10:30:30Z"
path: bucket/<namespace>/<bucket-name>/c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz
revision: c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
url: http://source-controller.<namespace>.svc.cluster.local./bucket/<namespace>/<bucket-name>/c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz
```
#### Default exclusions
The following files and extensions are excluded from the Artifact by
default:
- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`)
- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`)
- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`)
- CLI configs (`.goreleaser.yml, .sops.yaml`)
- Flux v1 config (`.flux.yaml`)
To define your own exclusion rules, see [excluding files](#excluding-files).
### Conditions
A Bucket enters various states during its lifecycle, reflected as
[Kubernetes Conditions][typical-status-properties].
It can be [reconciling](#reconciling-bucket) while fetching storage objects,
it can be [ready](#ready-bucket), or it can [fail during
reconciliation](#failed-bucket).
The Bucket API is compatible with the [kstatus specification][kstatus-spec],
and reports `Reconciling` and `Stalled` conditions where applicable to
provide better (timeout) support to solutions polling the Bucket to become
`Ready`.
#### Reconciling Bucket
The source-controller marks a Bucket as _reconciling_ when one of the following
is true:
- There is no current Artifact for the Bucket, or the reported Artifact is
determined to have disappeared from the storage.
- The generation of the Bucket is newer than the [Observed Generation](#observed-generation).
- The newly calculated Artifact revision differs from the current Artifact.
When the Bucket is "reconciling", the `Ready` Condition status becomes `False`,
and the controller adds a Condition with the following attributes to the
Bucket's `.status.conditions`:
- `type: Reconciling`
- `status: "True"`
- `reason: NewGeneration` | `reason: NoArtifact` | `reason: NewRevision`
If the reconciling state is due to a new revision, an additional Condition is
added with the following attributes:
- `type: ArtifactOutdated`
- `status: "True"`
- `reason: NewRevision`
Both Conditions have a ["negative polarity"][typical-status-properties],
and are only present on the Bucket while their status value is `"True"`.
#### Ready Bucket
The source-controller marks a Bucket as _ready_ when it has the following
characteristics:
- The Bucket reports an [Artifact](#artifact).
- The reported Artifact exists in the controller's Artifact storage.
- The Bucket was able to communicate with the Bucket's object storage endpoint
using the current spec.
- The revision of the reported Artifact is up-to-date with the latest
calculated revision of the object storage bucket.
When the Bucket is "ready", the controller sets a Condition with the following
attributes in the Bucket's `.status.conditions`:
- `type: Ready`
- `status: "True"`
- `reason: Succeeded`
This `Ready` Condition will retain a status value of `"True"` until the Bucket
is marked as [reconciling](#reconciling-bucket), or e.g. a
[transient error](#failed-bucket) occurs due to a temporary network issue.
#### Failed Bucket
The source-controller may get stuck trying to produce an Artifact for a Bucket
without completing. This can occur due to some of the following factors:
- The object storage [Endpoint](#endpoint) is temporarily unavailable.
- The specified object storage bucket does not exist.
- The [Secret reference](#secret-reference) contains a reference to a
non-existing Secret.
- The credentials in the referenced Secret are invalid.
- The Bucket spec contains a generic misconfiguration.
When this happens, the controller sets the `Ready` Condition status to `False`,
and adds a Condition with the following attributes to the Bucket's
`.status.conditions`:
- `type: FetchFailed`
- `status: "True"`
- `reason: AuthenticationFailed` | `reason: BucketOperationFailed`
This condition has a ["negative polarity"][typical-status-properties],
and is only present on the Bucket while the status value is `"True"`.
While the Bucket has this Condition, the controller will continue to attempt
to produce an Artifact for the resource with an exponential backoff, until
it succeeds and the Bucket is marked as [ready](#ready-bucket).
Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at
the same time, for example due to a newly introduced configuration issue in the
Bucket spec.
### Observed Generation
The source-controller reports an
[observed generation][typical-status-properties]
in the Bucket's `.status.observedGeneration`. The observed generation is the
latest `.metadata.generation` which resulted in either a [ready state](#ready-bucket),
or stalled due to error it can not recover from without human
intervention.
### Last Handled Reconcile At
The source-controller reports the last `reconcile.fluxcd.io/requestedAt`
annotation value it acted on in the `.status.lastHandledReconcileAt` field.
For practical information about this field, see [triggering a
reconcile](#triggering-a-reconcile).
[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties
[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus