Document Bucket API v1beta2 spec
Signed-off-by: Hidde Beydals <hello@hidde.co>
This commit is contained in:
parent
ba5c0b8500
commit
67d005a65e
|
@ -23,32 +23,31 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Artifact represents the output of a Source synchronisation.
|
||||
// Artifact represents the output of a Source reconciliation.
|
||||
type Artifact struct {
|
||||
// Path is the relative file path of this Artifact.
|
||||
// It can be used to locate the Artifact file in the root of the Artifact
|
||||
// storage on the local file system of the controller managing the Source.
|
||||
// Path is the relative file path of the Artifact. It can be used to locate
|
||||
// the file in the root of the Artifact storage on the local file system of
|
||||
// the controller managing the Source.
|
||||
// +required
|
||||
Path string `json:"path"`
|
||||
|
||||
// URL is the HTTP address of this artifact.
|
||||
// It is used by the consumers of the artifacts to fetch and use the
|
||||
// artifacts. It is expected to be resolvable from within the cluster.
|
||||
// URL is the HTTP address of the Artifact as exposed by the controller
|
||||
// managing the Source. It can be used to retrieve the Artifact for
|
||||
// consumption, e.g. by another controller applying the Artifact contents.
|
||||
// +required
|
||||
URL string `json:"url"`
|
||||
|
||||
// Revision is a human readable identifier traceable in the origin source
|
||||
// system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm
|
||||
// chart version, etc.
|
||||
// Revision is a human-readable identifier traceable in the origin source
|
||||
// system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
|
||||
// +optional
|
||||
Revision string `json:"revision"`
|
||||
|
||||
// Checksum is the SHA256 checksum of the artifact.
|
||||
// Checksum is the SHA256 checksum of the Artifact file.
|
||||
// +optional
|
||||
Checksum string `json:"checksum"`
|
||||
|
||||
// LastUpdateTime is the timestamp corresponding to the last update of this
|
||||
// artifact.
|
||||
// LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
// Artifact.
|
||||
// +required
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
}
|
||||
|
@ -63,14 +62,14 @@ func (in *Artifact) HasRevision(revision string) bool {
|
|||
}
|
||||
|
||||
// ArtifactDir returns the artifact dir path in the form of
|
||||
// <source-kind>/<source-namespace>/<source-name>.
|
||||
// '<kind>/<namespace>/<name>'.
|
||||
func ArtifactDir(kind, namespace, name string) string {
|
||||
kind = strings.ToLower(kind)
|
||||
return path.Join(kind, namespace, name)
|
||||
}
|
||||
|
||||
// ArtifactPath returns the artifact path in the form of
|
||||
// <source-kind>/<source-namespace>/<source-name>/<artifact-filename>.
|
||||
// '<kind>/<namespace>/name>/<filename>'.
|
||||
func ArtifactPath(kind, namespace, name, filename string) string {
|
||||
return path.Join(ArtifactDir(kind, namespace, name), filename)
|
||||
}
|
||||
|
|
|
@ -31,45 +31,53 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
// GenericBucketProvider for any S3 API compatible storage Bucket.
|
||||
GenericBucketProvider string = "generic"
|
||||
AmazonBucketProvider string = "aws"
|
||||
GoogleBucketProvider string = "gcp"
|
||||
// AmazonBucketProvider for an AWS S3 object storage Bucket.
|
||||
// Provides support for retrieving credentials from the AWS EC2 service.
|
||||
AmazonBucketProvider string = "aws"
|
||||
// GoogleBucketProvider for a Google Cloud Storage Bucket.
|
||||
// Provides support for authentication using a workload identity.
|
||||
GoogleBucketProvider string = "gcp"
|
||||
)
|
||||
|
||||
// BucketSpec defines the desired state of an S3 compatible bucket
|
||||
// BucketSpec specifies the required configuration to produce an Artifact for
|
||||
// an object storage bucket.
|
||||
type BucketSpec struct {
|
||||
// The S3 compatible storage provider name, default ('generic').
|
||||
// Provider of the object storage bucket.
|
||||
// Defaults to 'generic', which expects an S3 (API) compatible object
|
||||
// storage.
|
||||
// +kubebuilder:validation:Enum=generic;aws;gcp
|
||||
// +kubebuilder:default:=generic
|
||||
// +optional
|
||||
Provider string `json:"provider,omitempty"`
|
||||
|
||||
// The bucket name.
|
||||
// BucketName is the name of the object storage bucket.
|
||||
// +required
|
||||
BucketName string `json:"bucketName"`
|
||||
|
||||
// The bucket endpoint address.
|
||||
// Endpoint is the object storage address the BucketName is located at.
|
||||
// +required
|
||||
Endpoint string `json:"endpoint"`
|
||||
|
||||
// Insecure allows connecting to a non-TLS S3 HTTP endpoint.
|
||||
// Insecure allows connecting to a non-TLS HTTP Endpoint.
|
||||
// +optional
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
|
||||
// The bucket region.
|
||||
// Region on the Endpoint the BucketName is located in.
|
||||
// +optional
|
||||
Region string `json:"region,omitempty"`
|
||||
|
||||
// The name of the secret containing authentication credentials
|
||||
// SecretRef specifies the Secret containing authentication credentials
|
||||
// for the Bucket.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// The interval at which to check for bucket updates.
|
||||
// Interval at which to check the Endpoint for updates.
|
||||
// +required
|
||||
Interval metav1.Duration `json:"interval"`
|
||||
|
||||
// The timeout for fetch operations, defaults to 60s.
|
||||
// Timeout for fetch operations, defaults to 60s.
|
||||
// +kubebuilder:default="60s"
|
||||
// +optional
|
||||
Timeout *metav1.Duration `json:"timeout,omitempty"`
|
||||
|
@ -80,18 +88,21 @@ type BucketSpec struct {
|
|||
// +optional
|
||||
Ignore *string `json:"ignore,omitempty"`
|
||||
|
||||
// This flag tells the controller to suspend the reconciliation of this source.
|
||||
// Suspend tells the controller to suspend the reconciliation of this
|
||||
// Bucket.
|
||||
// +optional
|
||||
Suspend bool `json:"suspend,omitempty"`
|
||||
|
||||
// AccessFrom defines an Access Control List for allowing cross-namespace references to this object.
|
||||
// AccessFrom specifies an Access Control List for allowing cross-namespace
|
||||
// references to this object.
|
||||
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
|
||||
// +optional
|
||||
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
|
||||
}
|
||||
|
||||
// BucketStatus defines the observed state of a bucket
|
||||
// BucketStatus records the observed state of a Bucket.
|
||||
type BucketStatus struct {
|
||||
// ObservedGeneration is the last observed generation.
|
||||
// ObservedGeneration is the last observed generation of the Bucket object.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
|
@ -99,11 +110,13 @@ type BucketStatus struct {
|
|||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
||||
// URL is the fetch link for the artifact output of the last Bucket sync.
|
||||
// URL is the dynamic fetch link for the latest Artifact.
|
||||
// It is provided on a "best effort" basis, and using the precise
|
||||
// BucketStatus.Artifact data is recommended.
|
||||
// +optional
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Artifact represents the output of the last successful Bucket sync.
|
||||
// Artifact represents the last successful Bucket reconciliation.
|
||||
// +optional
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
|
@ -111,12 +124,12 @@ type BucketStatus struct {
|
|||
}
|
||||
|
||||
const (
|
||||
// BucketOperationSucceededReason represents the fact that the bucket listing and
|
||||
// fetch operations succeeded.
|
||||
// BucketOperationSucceededReason signals that the Bucket listing and fetch
|
||||
// operations succeeded.
|
||||
BucketOperationSucceededReason string = "BucketOperationSucceeded"
|
||||
|
||||
// BucketOperationFailedReason represents the fact that the bucket listing or
|
||||
// fetch operations failed.
|
||||
// BucketOperationFailedReason signals that the Bucket listing or fetch
|
||||
// operations failed.
|
||||
BucketOperationFailedReason string = "BucketOperationFailed"
|
||||
)
|
||||
|
||||
|
@ -135,23 +148,11 @@ func (in Bucket) GetRequeueAfter() time.Duration {
|
|||
return in.Spec.Interval.Duration
|
||||
}
|
||||
|
||||
// GetInterval returns the interval at which the source is reconciled.
|
||||
// Deprecated: use GetRequeueAfter instead.
|
||||
func (in Bucket) GetInterval() metav1.Duration {
|
||||
return in.Spec.Interval
|
||||
}
|
||||
|
||||
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
|
||||
func (in *Bucket) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// GetStatusConditions returns a pointer to the Status.Conditions slice.
|
||||
// Deprecated: use GetConditions instead.
|
||||
func (in *Bucket) GetStatusConditions() *[]metav1.Condition {
|
||||
return &in.Status.Conditions
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:storageversion
|
||||
|
@ -162,7 +163,7 @@ func (in *Bucket) GetStatusConditions() *[]metav1.Condition {
|
|||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
|
||||
// Bucket is the Schema for the buckets API
|
||||
// Bucket is the Schema for the buckets API.
|
||||
type Bucket struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
@ -172,9 +173,8 @@ type Bucket struct {
|
|||
Status BucketStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// BucketList contains a list of Bucket objects.
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
// BucketList contains a list of Bucket
|
||||
type BucketList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
|
|
|
@ -19,33 +19,41 @@ package v1beta2
|
|||
const SourceFinalizer = "finalizers.fluxcd.io"
|
||||
|
||||
const (
|
||||
// ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True.
|
||||
// ArtifactOutdatedCondition indicates the current Artifact of the Source
|
||||
// is outdated.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only
|
||||
// present on the resource if it is True.
|
||||
ArtifactOutdatedCondition string = "ArtifactOutdated"
|
||||
|
||||
// SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check
|
||||
// succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified.
|
||||
// SourceVerifiedCondition indicates the integrity of the Source has been
|
||||
// verified. If True, the integrity check succeeded. If False, it failed.
|
||||
// The Condition is only present on the resource if the integrity has been
|
||||
// verified.
|
||||
SourceVerifiedCondition string = "SourceVerified"
|
||||
|
||||
// FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source.
|
||||
// If True, observations on the upstream Source revision may be impossible, and the Artifact available for the
|
||||
// Source may be outdated.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True.
|
||||
// FetchFailedCondition indicates a transient or persistent fetch failure
|
||||
// of an upstream Source.
|
||||
// If True, observations on the upstream Source revision may be impossible,
|
||||
// and the Artifact available for the Source may be outdated.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only
|
||||
// present on the resource if it is True.
|
||||
FetchFailedCondition string = "FetchFailed"
|
||||
|
||||
// BuildFailedCondition indicates a transient or persistent build failure of a Source's Artifact.
|
||||
// If True, the Source can be in an ArtifactOutdatedCondition
|
||||
// BuildFailedCondition indicates a transient or persistent build failure
|
||||
// of a Source's Artifact.
|
||||
// If True, the Source can be in an ArtifactOutdatedCondition.
|
||||
BuildFailedCondition string = "BuildFailed"
|
||||
)
|
||||
|
||||
const (
|
||||
// URLInvalidReason represents the fact that a given source has an invalid URL.
|
||||
// URLInvalidReason signals that a given Source has an invalid URL.
|
||||
URLInvalidReason string = "URLInvalid"
|
||||
|
||||
// StorageOperationFailedReason signals a failure caused by a storage operation.
|
||||
// StorageOperationFailedReason signals a failure caused by a storage
|
||||
// operation.
|
||||
StorageOperationFailedReason string = "StorageOperationFailed"
|
||||
|
||||
// AuthenticationFailedReason represents the fact that a given secret does not
|
||||
// have the required fields or the provided credentials do not match.
|
||||
// AuthenticationFailedReason signals that a Secret does not have the
|
||||
// required fields, or the provided credentials do not match.
|
||||
AuthenticationFailedReason string = "AuthenticationFailed"
|
||||
)
|
||||
|
|
|
@ -19,26 +19,27 @@ package v1beta2
|
|||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
// SourceIndexKey is the key used for indexing resources
|
||||
// resources based on their Source.
|
||||
// SourceIndexKey is the key used for indexing objects based on their
|
||||
// referenced Source.
|
||||
SourceIndexKey string = ".metadata.source"
|
||||
)
|
||||
|
||||
// Source interface must be supported by all API types.
|
||||
// Source is the interface that provides generic access to the Artifact and
|
||||
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
|
||||
// API group.
|
||||
//
|
||||
// +k8s:deepcopy-gen=false
|
||||
type Source interface {
|
||||
runtime.Object
|
||||
// GetRequeueAfter returns the duration after which the source must be reconciled again.
|
||||
// GetRequeueAfter returns the duration after which the source must be
|
||||
// reconciled again.
|
||||
GetRequeueAfter() time.Duration
|
||||
// GetArtifact returns the latest artifact from the source if present in the
|
||||
// status sub-resource.
|
||||
// GetArtifact returns the latest artifact from the source if present in
|
||||
// the status sub-resource.
|
||||
GetArtifact() *Artifact
|
||||
// GetInterval returns the interval at which the source is updated.
|
||||
// Deprecated: use GetRequeueAfter instead.
|
||||
GetInterval() metav1.Duration
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@ spec:
|
|||
name: v1beta2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Bucket is the Schema for the buckets API
|
||||
description: Bucket is the Schema for the buckets API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
|
@ -281,12 +281,13 @@ spec:
|
|||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: BucketSpec defines the desired state of an S3 compatible
|
||||
bucket
|
||||
description: BucketSpec specifies the required configuration to produce
|
||||
an Artifact for an object storage bucket.
|
||||
properties:
|
||||
accessFrom:
|
||||
description: AccessFrom defines an Access Control List for allowing
|
||||
cross-namespace references to this object.
|
||||
description: 'AccessFrom specifies an Access Control List for allowing
|
||||
cross-namespace references to this object. NOTE: Not implemented,
|
||||
provisional as of https://github.com/fluxcd/flux2/pull/2092'
|
||||
properties:
|
||||
namespaceSelectors:
|
||||
description: NamespaceSelectors is the list of namespace selectors
|
||||
|
@ -312,10 +313,11 @@ spec:
|
|||
- namespaceSelectors
|
||||
type: object
|
||||
bucketName:
|
||||
description: The bucket name.
|
||||
description: BucketName is the name of the object storage bucket.
|
||||
type: string
|
||||
endpoint:
|
||||
description: The bucket endpoint address.
|
||||
description: Endpoint is the object storage address the BucketName
|
||||
is located at.
|
||||
type: string
|
||||
ignore:
|
||||
description: Ignore overrides the set of excluded patterns in the
|
||||
|
@ -324,25 +326,26 @@ spec:
|
|||
to find out what those are.
|
||||
type: string
|
||||
insecure:
|
||||
description: Insecure allows connecting to a non-TLS S3 HTTP endpoint.
|
||||
description: Insecure allows connecting to a non-TLS HTTP Endpoint.
|
||||
type: boolean
|
||||
interval:
|
||||
description: The interval at which to check for bucket updates.
|
||||
description: Interval at which to check the Endpoint for updates.
|
||||
type: string
|
||||
provider:
|
||||
default: generic
|
||||
description: The S3 compatible storage provider name, default ('generic').
|
||||
description: Provider of the object storage bucket. Defaults to 'generic',
|
||||
which expects an S3 (API) compatible object storage.
|
||||
enum:
|
||||
- generic
|
||||
- aws
|
||||
- gcp
|
||||
type: string
|
||||
region:
|
||||
description: The bucket region.
|
||||
description: Region on the Endpoint the BucketName is located in.
|
||||
type: string
|
||||
secretRef:
|
||||
description: The name of the secret containing authentication credentials
|
||||
for the Bucket.
|
||||
description: SecretRef specifies the Secret containing authentication
|
||||
credentials for the Bucket.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
|
@ -351,12 +354,12 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
suspend:
|
||||
description: This flag tells the controller to suspend the reconciliation
|
||||
of this source.
|
||||
description: Suspend tells the controller to suspend the reconciliation
|
||||
of this Bucket.
|
||||
type: boolean
|
||||
timeout:
|
||||
default: 60s
|
||||
description: The timeout for fetch operations, defaults to 60s.
|
||||
description: Timeout for fetch operations, defaults to 60s.
|
||||
type: string
|
||||
required:
|
||||
- bucketName
|
||||
|
@ -366,35 +369,34 @@ spec:
|
|||
status:
|
||||
default:
|
||||
observedGeneration: -1
|
||||
description: BucketStatus defines the observed state of a bucket
|
||||
description: BucketStatus records the observed state of a Bucket.
|
||||
properties:
|
||||
artifact:
|
||||
description: Artifact represents the output of the last successful
|
||||
Bucket sync.
|
||||
description: Artifact represents the last successful Bucket reconciliation.
|
||||
properties:
|
||||
checksum:
|
||||
description: Checksum is the SHA256 checksum of the artifact.
|
||||
description: Checksum is the SHA256 checksum of the Artifact file.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of this artifact.
|
||||
the last update of the Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
path:
|
||||
description: Path is the relative file path of this Artifact.
|
||||
It can be used to locate the Artifact file in the root of the
|
||||
Artifact storage on the local file system of the controller
|
||||
managing the Source.
|
||||
description: Path is the relative file path of the Artifact. It
|
||||
can be used to locate the file in the root of the Artifact storage
|
||||
on the local file system of the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: Revision is a human readable identifier traceable
|
||||
description: Revision is a human-readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm index timestamp, a Helm chart version, etc.
|
||||
tag, a Helm chart version, etc.
|
||||
type: string
|
||||
url:
|
||||
description: URL is the HTTP address of this artifact. It is used
|
||||
by the consumers of the artifacts to fetch and use the artifacts.
|
||||
It is expected to be resolvable from within the cluster.
|
||||
description: URL is the HTTP address of the Artifact as exposed
|
||||
by the controller managing the Source. It can be used to retrieve
|
||||
the Artifact for consumption, e.g. by another controller applying
|
||||
the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
|
@ -476,12 +478,14 @@ spec:
|
|||
be detected.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the last observed generation.
|
||||
description: ObservedGeneration is the last observed generation of
|
||||
the Bucket object.
|
||||
format: int64
|
||||
type: integer
|
||||
url:
|
||||
description: URL is the fetch link for the artifact output of the
|
||||
last Bucket sync.
|
||||
description: URL is the dynamic fetch link for the latest Artifact.
|
||||
It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact
|
||||
data is recommended.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
|
|
|
@ -541,28 +541,28 @@ spec:
|
|||
repository sync.
|
||||
properties:
|
||||
checksum:
|
||||
description: Checksum is the SHA256 checksum of the artifact.
|
||||
description: Checksum is the SHA256 checksum of the Artifact file.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of this artifact.
|
||||
the last update of the Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
path:
|
||||
description: Path is the relative file path of this Artifact.
|
||||
It can be used to locate the Artifact file in the root of the
|
||||
Artifact storage on the local file system of the controller
|
||||
managing the Source.
|
||||
description: Path is the relative file path of the Artifact. It
|
||||
can be used to locate the file in the root of the Artifact storage
|
||||
on the local file system of the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: Revision is a human readable identifier traceable
|
||||
description: Revision is a human-readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm index timestamp, a Helm chart version, etc.
|
||||
tag, a Helm chart version, etc.
|
||||
type: string
|
||||
url:
|
||||
description: URL is the HTTP address of this artifact. It is used
|
||||
by the consumers of the artifacts to fetch and use the artifacts.
|
||||
It is expected to be resolvable from within the cluster.
|
||||
description: URL is the HTTP address of the Artifact as exposed
|
||||
by the controller managing the Source. It can be used to retrieve
|
||||
the Artifact for consumption, e.g. by another controller applying
|
||||
the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
|
@ -642,32 +642,33 @@ spec:
|
|||
description: IncludedArtifacts represents the included artifacts from
|
||||
the last successful repository sync.
|
||||
items:
|
||||
description: Artifact represents the output of a Source synchronisation.
|
||||
description: Artifact represents the output of a Source reconciliation.
|
||||
properties:
|
||||
checksum:
|
||||
description: Checksum is the SHA256 checksum of the artifact.
|
||||
description: Checksum is the SHA256 checksum of the Artifact
|
||||
file.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of this artifact.
|
||||
the last update of the Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
path:
|
||||
description: Path is the relative file path of this Artifact.
|
||||
It can be used to locate the Artifact file in the root of
|
||||
the Artifact storage on the local file system of the controller
|
||||
managing the Source.
|
||||
description: Path is the relative file path of the Artifact.
|
||||
It can be used to locate the file in the root of the Artifact
|
||||
storage on the local file system of the controller managing
|
||||
the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: Revision is a human readable identifier traceable
|
||||
description: Revision is a human-readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm index timestamp, a Helm chart version, etc.
|
||||
tag, a Helm chart version, etc.
|
||||
type: string
|
||||
url:
|
||||
description: URL is the HTTP address of this artifact. It is
|
||||
used by the consumers of the artifacts to fetch and use the
|
||||
artifacts. It is expected to be resolvable from within the
|
||||
cluster.
|
||||
description: URL is the HTTP address of the Artifact as exposed
|
||||
by the controller managing the Source. It can be used to retrieve
|
||||
the Artifact for consumption, e.g. by another controller applying
|
||||
the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
|
|
|
@ -420,28 +420,28 @@ spec:
|
|||
chart sync.
|
||||
properties:
|
||||
checksum:
|
||||
description: Checksum is the SHA256 checksum of the artifact.
|
||||
description: Checksum is the SHA256 checksum of the Artifact file.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of this artifact.
|
||||
the last update of the Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
path:
|
||||
description: Path is the relative file path of this Artifact.
|
||||
It can be used to locate the Artifact file in the root of the
|
||||
Artifact storage on the local file system of the controller
|
||||
managing the Source.
|
||||
description: Path is the relative file path of the Artifact. It
|
||||
can be used to locate the file in the root of the Artifact storage
|
||||
on the local file system of the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: Revision is a human readable identifier traceable
|
||||
description: Revision is a human-readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm index timestamp, a Helm chart version, etc.
|
||||
tag, a Helm chart version, etc.
|
||||
type: string
|
||||
url:
|
||||
description: URL is the HTTP address of this artifact. It is used
|
||||
by the consumers of the artifacts to fetch and use the artifacts.
|
||||
It is expected to be resolvable from within the cluster.
|
||||
description: URL is the HTTP address of the Artifact as exposed
|
||||
by the controller managing the Source. It can be used to retrieve
|
||||
the Artifact for consumption, e.g. by another controller applying
|
||||
the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
|
|
|
@ -346,28 +346,28 @@ spec:
|
|||
repository sync.
|
||||
properties:
|
||||
checksum:
|
||||
description: Checksum is the SHA256 checksum of the artifact.
|
||||
description: Checksum is the SHA256 checksum of the Artifact file.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of this artifact.
|
||||
the last update of the Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
path:
|
||||
description: Path is the relative file path of this Artifact.
|
||||
It can be used to locate the Artifact file in the root of the
|
||||
Artifact storage on the local file system of the controller
|
||||
managing the Source.
|
||||
description: Path is the relative file path of the Artifact. It
|
||||
can be used to locate the file in the root of the Artifact storage
|
||||
on the local file system of the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: Revision is a human readable identifier traceable
|
||||
description: Revision is a human-readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm index timestamp, a Helm chart version, etc.
|
||||
tag, a Helm chart version, etc.
|
||||
type: string
|
||||
url:
|
||||
description: URL is the HTTP address of this artifact. It is used
|
||||
by the consumers of the artifacts to fetch and use the artifacts.
|
||||
It is expected to be resolvable from within the cluster.
|
||||
description: URL is the HTTP address of the Artifact as exposed
|
||||
by the controller managing the Source. It can be used to retrieve
|
||||
the Artifact for consumption, e.g. by another controller applying
|
||||
the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- path
|
||||
|
|
|
@ -59,9 +59,9 @@ import (
|
|||
"github.com/fluxcd/source-controller/pkg/sourceignore"
|
||||
)
|
||||
|
||||
// bucketReadyConditions contains all the conditions information needed
|
||||
// for Bucket Ready status conditions summary calculation.
|
||||
var bucketReadyConditions = summarize.Conditions{
|
||||
// bucketReadyCondition contains the information required to summarize a
|
||||
// v1beta2.Bucket Ready Condition.
|
||||
var bucketReadyCondition = summarize.Conditions{
|
||||
Target: meta.ReadyCondition,
|
||||
Owned: []string{
|
||||
sourcev1.ArtifactOutdatedCondition,
|
||||
|
@ -89,7 +89,7 @@ var bucketReadyConditions = summarize.Conditions{
|
|||
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch
|
||||
|
||||
// BucketReconciler reconciles a Bucket object
|
||||
// BucketReconciler reconciles a v1beta2.Bucket object.
|
||||
type BucketReconciler struct {
|
||||
client.Client
|
||||
kuberecorder.EventRecorder
|
||||
|
@ -102,9 +102,10 @@ type BucketReconcilerOptions struct {
|
|||
MaxConcurrentReconciles int
|
||||
}
|
||||
|
||||
// bucketReconcilerFunc is the function type for all the bucket reconciler
|
||||
// functions.
|
||||
type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error)
|
||||
// bucketReconcileFunc is the function type for all the v1beta2.Bucket
|
||||
// (sub)reconcile functions. The type implementations are grouped and
|
||||
// executed serially to perform the complete reconcile of the object.
|
||||
type bucketReconcileFunc func(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error)
|
||||
|
||||
func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{})
|
||||
|
@ -151,7 +152,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
|
|||
defer func() {
|
||||
summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
|
||||
summarizeOpts := []summarize.Option{
|
||||
summarize.WithConditions(bucketReadyConditions),
|
||||
summarize.WithConditions(bucketReadyCondition),
|
||||
summarize.WithReconcileResult(recResult),
|
||||
summarize.WithReconcileError(retErr),
|
||||
summarize.WithIgnoreNotFound(),
|
||||
|
@ -159,7 +160,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
|
|||
summarize.RecordContextualError,
|
||||
summarize.RecordReconcileReq,
|
||||
),
|
||||
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}),
|
||||
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
|
||||
}
|
||||
result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
|
||||
|
||||
|
@ -182,7 +183,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
|
|||
}
|
||||
|
||||
// Reconcile actual object
|
||||
reconcilers := []bucketReconcilerFunc{
|
||||
reconcilers := []bucketReconcileFunc{
|
||||
r.reconcileStorage,
|
||||
r.reconcileSource,
|
||||
r.reconcileArtifact,
|
||||
|
@ -191,16 +192,14 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
|
|||
return
|
||||
}
|
||||
|
||||
// reconcile steps iterates through the actual reconciliation tasks for objec,
|
||||
// it returns early on the first step that returns ResultRequeue or produces an
|
||||
// error.
|
||||
func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) {
|
||||
// reconcile iterates through the gitRepositoryReconcileFunc tasks for the
|
||||
// object. It returns early on the first call that returns
|
||||
// reconcile.ResultRequeue, or produces an error.
|
||||
func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) {
|
||||
if obj.Generation != obj.Status.ObservedGeneration {
|
||||
conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
|
||||
}
|
||||
|
||||
var artifact sourcev1.Artifact
|
||||
|
||||
// Create temp working dir
|
||||
tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name))
|
||||
if err != nil {
|
||||
|
@ -209,11 +208,19 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket,
|
|||
Reason: sourcev1.StorageOperationFailedReason,
|
||||
}
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
defer func() {
|
||||
if err = os.RemoveAll(tmpDir); err != nil {
|
||||
ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory")
|
||||
}
|
||||
}()
|
||||
|
||||
// Run the sub-reconcilers and build the result of reconciliation.
|
||||
var res sreconcile.Result
|
||||
var resErr error
|
||||
var (
|
||||
artifact sourcev1.Artifact
|
||||
|
||||
res sreconcile.Result
|
||||
resErr error
|
||||
)
|
||||
for _, rec := range reconcilers {
|
||||
recResult, err := rec(ctx, obj, &artifact, tmpDir)
|
||||
// Exit immediately on ResultRequeue.
|
||||
|
@ -233,12 +240,18 @@ func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket,
|
|||
return res, resErr
|
||||
}
|
||||
|
||||
// reconcileStorage ensures the current state of the storage matches the desired and previously observed state.
|
||||
// reconcileStorage ensures the current state of the storage matches the
|
||||
// desired and previously observed state.
|
||||
//
|
||||
// All artifacts for the resource except for the current one are garbage collected from the storage.
|
||||
// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object.
|
||||
// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated.
|
||||
func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
|
||||
// All Artifacts for the object except for the current one in the Status are
|
||||
// garbage collected from the Storage.
|
||||
// If the Artifact in the Status of the object disappeared from the Storage,
|
||||
// it is removed from the object.
|
||||
// If the object does not have an Artifact in its Status, a Reconciling
|
||||
// condition is added.
|
||||
// The hostname of any URL in the Status of the object are updated, to ensure
|
||||
// they match the Storage server hostname of current runtime.
|
||||
func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, _ *sourcev1.Artifact, _ string) (sreconcile.Result, error) {
|
||||
// Garbage collect previous advertised artifact(s) from storage
|
||||
_ = r.garbageCollect(ctx, obj)
|
||||
|
||||
|
@ -262,10 +275,11 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B
|
|||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the
|
||||
// result.
|
||||
// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret
|
||||
// fails, it records v1beta1.FetchFailedCondition=True and returns early.
|
||||
// reconcileSource fetches the upstream bucket contents with the client for the
|
||||
// given object's Provider, and returns the result.
|
||||
// When a SecretRef is defined, it attempts to fetch the Secret before calling
|
||||
// the provider. If this fails, it records v1beta2.FetchFailedCondition=True on
|
||||
// the object and returns early.
|
||||
func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
|
||||
var secret *corev1.Secret
|
||||
if obj.Spec.SecretRef != nil {
|
||||
|
@ -293,15 +307,18 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu
|
|||
}
|
||||
}
|
||||
|
||||
// reconcileMinioSource ensures the upstream Minio client compatible bucket can be reached and downloaded from using the
|
||||
// declared configuration, and observes its state.
|
||||
// reconcileMinioSource fetches the object storage bucket contents using a
|
||||
// Minio client constructed with the specified configuration from the
|
||||
// v1beta2.Bucket.
|
||||
//
|
||||
// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into
|
||||
// account. In case of an error during the download process (including transient errors), it records
|
||||
// v1beta1.FetchFailedCondition=True and returns early.
|
||||
// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to
|
||||
// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ.
|
||||
// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata.
|
||||
// The contents are fetched to the given dir while taking ignore rules into
|
||||
// account. In case of an error during the fetch process (including transient
|
||||
// ones), it records v1beta2.FetchFailedCondition=True and returns early.
|
||||
// When successful, it removes v1beta2.FetchFailedCondition, and compares the
|
||||
// current calculated revision to the current Artifact on the object. Recording
|
||||
// v1beta2.ArtifactOutdatedCondition if they differ.
|
||||
// Lastly, the given Artifact pointer is set to a new Artifact object with the
|
||||
// available metadata.
|
||||
func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact,
|
||||
secret *corev1.Secret, dir string) (sreconcile.Result, error) {
|
||||
// Build the client with the configuration from the object and secret
|
||||
|
@ -448,15 +465,18 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source
|
|||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
// reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the
|
||||
// declared configuration, and observes its state.
|
||||
// reconcileGCPSource fetches the object storage bucket contents using a
|
||||
// Google Cloud client constructed with the specified configuration from the
|
||||
// v1beta2.Bucket.
|
||||
//
|
||||
// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into
|
||||
// account. In case of an error during the download process (including transient errors), it records
|
||||
// v1beta1.DownloadFailedCondition=True and returns early.
|
||||
// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to
|
||||
// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ.
|
||||
// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata.
|
||||
// The contents are fetched to the given dir while taking ignore rules into
|
||||
// account. In case of an error during the fetch process (including transient
|
||||
// ones), it records v1beta2.FetchFailedCondition=True and returns early.
|
||||
// When successful, it removes v1beta2.FetchFailedCondition, and compares the
|
||||
// current calculated revision to the current Artifact on the object. Recording
|
||||
// v1beta2.ArtifactOutdatedCondition if they differ.
|
||||
// Lastly, the given Artifact pointer is set to a new Artifact object with the
|
||||
// available metadata.
|
||||
func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact,
|
||||
secret *corev1.Secret, dir string) (sreconcile.Result, error) {
|
||||
gcpClient, err := r.buildGCPClient(ctx, secret)
|
||||
|
@ -603,13 +623,15 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1
|
|||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the
|
||||
// given data.
|
||||
// reconcileArtifact archives a new Artifact to the Storage, if the current
|
||||
// (Status) data on the object does not match the given.
|
||||
//
|
||||
// The inspection of the given data to the object is differed, ensuring any stale observations as
|
||||
// If the given artifact does not differ from the object's current, it returns early.
|
||||
// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is
|
||||
// updated to its path.
|
||||
// The inspection of the given data to the object is differed, ensuring any
|
||||
// stale observations like v1beta2.ArtifactOutdatedCondition are removed.
|
||||
// If the given Artifact does not differ from the object's current, it returns
|
||||
// early.
|
||||
// On a successful archive, the Artifact in the Status of the object is set,
|
||||
// and the symlink in the Storage is updated to its path.
|
||||
func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
|
||||
// Always restore the Ready condition in case it got removed due to a transient error
|
||||
defer func() {
|
||||
|
@ -686,8 +708,9 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.
|
|||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the
|
||||
// artifact storage, if successful, the finalizer is removed from the object.
|
||||
// reconcileDelete handles the deletion of the object.
|
||||
// It first garbage collects all Artifacts for the object from the Storage.
|
||||
// Removing the finalizer from the object if successful.
|
||||
func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) {
|
||||
// Garbage collect the resource's artifacts
|
||||
if err := r.garbageCollect(ctx, obj); err != nil {
|
||||
|
@ -702,9 +725,11 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu
|
|||
return sreconcile.ResultEmpty, nil
|
||||
}
|
||||
|
||||
// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current
|
||||
// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the
|
||||
// resource.
|
||||
// garbageCollect performs a garbage collection for the given object.
|
||||
//
|
||||
// It removes all but the current Artifact from the Storage, unless the
|
||||
// deletion timestamp on the object is set. Which will result in the
|
||||
// removal of all Artifacts for the objects.
|
||||
func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error {
|
||||
if !obj.DeletionTimestamp.IsZero() {
|
||||
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
|
||||
|
@ -733,9 +758,9 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc
|
|||
return nil
|
||||
}
|
||||
|
||||
// buildMinioClient constructs a minio.Client with the data from the given object and Secret.
|
||||
// It returns an error if the Secret does not have the required fields, or if there is no credential handler
|
||||
// configured.
|
||||
// buildMinioClient constructs a minio.Client with the data from the given
|
||||
// object and Secret. It returns an error if the Secret does not have the
|
||||
// required fields, or if there is no credential handler configured.
|
||||
func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1.Secret) (*minio.Client, error) {
|
||||
opts := minio.Options{
|
||||
Region: obj.Spec.Region,
|
||||
|
@ -759,26 +784,27 @@ func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1
|
|||
return minio.New(obj.Spec.Endpoint, &opts)
|
||||
}
|
||||
|
||||
// buildGCPClient constructs a gcp.GCPClient with the data from the given Secret.
|
||||
// It returns an error if the Secret does not have the required field, or if the client construction fails.
|
||||
// buildGCPClient constructs a gcp.GCPClient with the data from the given
|
||||
// Secret. It returns an error if the Secret does not pass validation, or if
|
||||
// the client construction fails.
|
||||
func (r *BucketReconciler) buildGCPClient(ctx context.Context, secret *corev1.Secret) (*gcp.GCPClient, error) {
|
||||
var client *gcp.GCPClient
|
||||
var gClient *gcp.GCPClient
|
||||
var err error
|
||||
if secret != nil {
|
||||
if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"]))
|
||||
gClient, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
client, err = gcp.NewClient(ctx)
|
||||
gClient, err = gcp.NewClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return client, nil
|
||||
return gClient, nil
|
||||
}
|
||||
|
||||
// etagIndex is an index of bucket keys and their Etag values.
|
||||
|
@ -803,9 +829,11 @@ func (i etagIndex) Revision() (string, error) {
|
|||
return fmt.Sprintf("%x", sum.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// eventLog records event and logs at the same time. This log is different from
|
||||
// the debug log in the event recorder in the sense that this is a simple log,
|
||||
// the event recorder debug log contains complete details about the event.
|
||||
// eventLogf records event and logs at the same time.
|
||||
//
|
||||
// This log is different from the debug log in the EventRecorder, in the sense
|
||||
// that this is a simple log. While the debug log contains complete details
|
||||
// about the event.
|
||||
func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(messageFmt, args...)
|
||||
// Log and emit event.
|
||||
|
|
|
@ -126,7 +126,7 @@ func TestBucketReconciler_Reconcile(t *testing.T) {
|
|||
}, timeout).Should(BeTrue())
|
||||
|
||||
// Check if the object status is valid.
|
||||
condns := &status.Conditions{NegativePolarity: bucketReadyConditions.NegativePolarity}
|
||||
condns := &status.Conditions{NegativePolarity: bucketReadyCondition.NegativePolarity}
|
||||
checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns)
|
||||
checker.CheckErr(ctx, obj)
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ Resource Types:
|
|||
</li></ul>
|
||||
<h3 id="source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket
|
||||
</h3>
|
||||
<p>Bucket is the Schema for the buckets API</p>
|
||||
<p>Bucket is the Schema for the buckets API.</p>
|
||||
<div class="md-typeset__scrollwrap">
|
||||
<div class="md-typeset__table">
|
||||
<table>
|
||||
|
@ -83,7 +83,9 @@ string
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The S3 compatible storage provider name, default (‘generic’).</p>
|
||||
<p>Provider of the object storage bucket.
|
||||
Defaults to ‘generic’, which expects an S3 (API) compatible object
|
||||
storage.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -94,7 +96,7 @@ string
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>The bucket name.</p>
|
||||
<p>BucketName is the name of the object storage bucket.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -105,7 +107,7 @@ string
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>The bucket endpoint address.</p>
|
||||
<p>Endpoint is the object storage address the BucketName is located at.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -117,7 +119,7 @@ bool
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>Insecure allows connecting to a non-TLS S3 HTTP endpoint.</p>
|
||||
<p>Insecure allows connecting to a non-TLS HTTP Endpoint.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -129,7 +131,7 @@ string
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The bucket region.</p>
|
||||
<p>Region on the Endpoint the BucketName is located in.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -143,7 +145,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The name of the secret containing authentication credentials
|
||||
<p>SecretRef specifies the Secret containing authentication credentials
|
||||
for the Bucket.</p>
|
||||
</td>
|
||||
</tr>
|
||||
|
@ -157,7 +159,7 @@ Kubernetes meta/v1.Duration
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>The interval at which to check for bucket updates.</p>
|
||||
<p>Interval at which to check the Endpoint for updates.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -171,7 +173,7 @@ Kubernetes meta/v1.Duration
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The timeout for fetch operations, defaults to 60s.</p>
|
||||
<p>Timeout for fetch operations, defaults to 60s.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -197,7 +199,8 @@ bool
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>This flag tells the controller to suspend the reconciliation of this source.</p>
|
||||
<p>Suspend tells the controller to suspend the reconciliation of this
|
||||
Bucket.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -211,7 +214,9 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>AccessFrom defines an Access Control List for allowing cross-namespace references to this object.</p>
|
||||
<p>AccessFrom specifies an Access Control List for allowing cross-namespace
|
||||
references to this object.
|
||||
NOTE: Not implemented, provisional as of <a href="https://github.com/fluxcd/flux2/pull/2092">https://github.com/fluxcd/flux2/pull/2092</a></p>
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
@ -854,7 +859,7 @@ HelmRepositoryStatus
|
|||
<a href="#source.toolkit.fluxcd.io/v1beta2.HelmChartStatus">HelmChartStatus</a>,
|
||||
<a href="#source.toolkit.fluxcd.io/v1beta2.HelmRepositoryStatus">HelmRepositoryStatus</a>)
|
||||
</p>
|
||||
<p>Artifact represents the output of a Source synchronisation.</p>
|
||||
<p>Artifact represents the output of a Source reconciliation.</p>
|
||||
<div class="md-typeset__scrollwrap">
|
||||
<div class="md-typeset__table">
|
||||
<table>
|
||||
|
@ -873,9 +878,9 @@ string
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>Path is the relative file path of this Artifact.
|
||||
It can be used to locate the Artifact file in the root of the Artifact
|
||||
storage on the local file system of the controller managing the Source.</p>
|
||||
<p>Path is the relative file path of the Artifact. It can be used to locate
|
||||
the file in the root of the Artifact storage on the local file system of
|
||||
the controller managing the Source.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -886,9 +891,9 @@ string
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>URL is the HTTP address of this artifact.
|
||||
It is used by the consumers of the artifacts to fetch and use the
|
||||
artifacts. It is expected to be resolvable from within the cluster.</p>
|
||||
<p>URL is the HTTP address of the Artifact as exposed by the controller
|
||||
managing the Source. It can be used to retrieve the Artifact for
|
||||
consumption, e.g. by another controller applying the Artifact contents.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -900,9 +905,8 @@ string
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>Revision is a human readable identifier traceable in the origin source
|
||||
system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm
|
||||
chart version, etc.</p>
|
||||
<p>Revision is a human-readable identifier traceable in the origin source
|
||||
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -914,7 +918,7 @@ string
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>Checksum is the SHA256 checksum of the artifact.</p>
|
||||
<p>Checksum is the SHA256 checksum of the Artifact file.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -927,8 +931,8 @@ Kubernetes meta/v1.Time
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>LastUpdateTime is the timestamp corresponding to the last update of this
|
||||
artifact.</p>
|
||||
<p>LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
Artifact.</p>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
|
@ -941,7 +945,8 @@ artifact.</p>
|
|||
(<em>Appears on:</em>
|
||||
<a href="#source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket</a>)
|
||||
</p>
|
||||
<p>BucketSpec defines the desired state of an S3 compatible bucket</p>
|
||||
<p>BucketSpec specifies the required configuration to produce an Artifact for
|
||||
an object storage bucket.</p>
|
||||
<div class="md-typeset__scrollwrap">
|
||||
<div class="md-typeset__table">
|
||||
<table>
|
||||
|
@ -961,7 +966,9 @@ string
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The S3 compatible storage provider name, default (‘generic’).</p>
|
||||
<p>Provider of the object storage bucket.
|
||||
Defaults to ‘generic’, which expects an S3 (API) compatible object
|
||||
storage.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -972,7 +979,7 @@ string
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>The bucket name.</p>
|
||||
<p>BucketName is the name of the object storage bucket.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -983,7 +990,7 @@ string
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>The bucket endpoint address.</p>
|
||||
<p>Endpoint is the object storage address the BucketName is located at.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -995,7 +1002,7 @@ bool
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>Insecure allows connecting to a non-TLS S3 HTTP endpoint.</p>
|
||||
<p>Insecure allows connecting to a non-TLS HTTP Endpoint.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1007,7 +1014,7 @@ string
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The bucket region.</p>
|
||||
<p>Region on the Endpoint the BucketName is located in.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1021,7 +1028,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The name of the secret containing authentication credentials
|
||||
<p>SecretRef specifies the Secret containing authentication credentials
|
||||
for the Bucket.</p>
|
||||
</td>
|
||||
</tr>
|
||||
|
@ -1035,7 +1042,7 @@ Kubernetes meta/v1.Duration
|
|||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>The interval at which to check for bucket updates.</p>
|
||||
<p>Interval at which to check the Endpoint for updates.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1049,7 +1056,7 @@ Kubernetes meta/v1.Duration
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>The timeout for fetch operations, defaults to 60s.</p>
|
||||
<p>Timeout for fetch operations, defaults to 60s.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1075,7 +1082,8 @@ bool
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>This flag tells the controller to suspend the reconciliation of this source.</p>
|
||||
<p>Suspend tells the controller to suspend the reconciliation of this
|
||||
Bucket.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1089,7 +1097,9 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>AccessFrom defines an Access Control List for allowing cross-namespace references to this object.</p>
|
||||
<p>AccessFrom specifies an Access Control List for allowing cross-namespace
|
||||
references to this object.
|
||||
NOTE: Not implemented, provisional as of <a href="https://github.com/fluxcd/flux2/pull/2092">https://github.com/fluxcd/flux2/pull/2092</a></p>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
|
@ -1102,7 +1112,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
|
|||
(<em>Appears on:</em>
|
||||
<a href="#source.toolkit.fluxcd.io/v1beta2.Bucket">Bucket</a>)
|
||||
</p>
|
||||
<p>BucketStatus defines the observed state of a bucket</p>
|
||||
<p>BucketStatus records the observed state of a Bucket.</p>
|
||||
<div class="md-typeset__scrollwrap">
|
||||
<div class="md-typeset__table">
|
||||
<table>
|
||||
|
@ -1122,7 +1132,7 @@ int64
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>ObservedGeneration is the last observed generation.</p>
|
||||
<p>ObservedGeneration is the last observed generation of the Bucket object.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1148,7 +1158,9 @@ string
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>URL is the fetch link for the artifact output of the last Bucket sync.</p>
|
||||
<p>URL is the dynamic fetch link for the latest Artifact.
|
||||
It is provided on a “best effort” basis, and using the precise
|
||||
BucketStatus.Artifact data is recommended.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1162,7 +1174,7 @@ Artifact
|
|||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>Artifact represents the output of the last successful Bucket sync.</p>
|
||||
<p>Artifact represents the last successful Bucket reconciliation.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -1568,8 +1580,8 @@ Artifact
|
|||
<td>
|
||||
<code>includedArtifacts</code><br>
|
||||
<em>
|
||||
<a href="#source.toolkit.fluxcd.io/v1beta2.*github.com/fluxcd/source-controller/api/v1beta2.Artifact">
|
||||
[]*github.com/fluxcd/source-controller/api/v1beta2.Artifact
|
||||
<a href="#source.toolkit.fluxcd.io/v1beta2.*./api/v1beta2.Artifact">
|
||||
[]*./api/v1beta2.Artifact
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
|
@ -2166,7 +2178,10 @@ string
|
|||
</div>
|
||||
<h3 id="source.toolkit.fluxcd.io/v1beta2.Source">Source
|
||||
</h3>
|
||||
<p>Source interface must be supported by all API types.</p>
|
||||
<p>Source interface must be supported by all API types.
|
||||
Source is the interface that provides generic access to the Artifact and
|
||||
interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
|
||||
API group.</p>
|
||||
<div class="admonition note">
|
||||
<p class="last">This page was automatically generated with <code>gen-crd-api-reference-docs</code></p>
|
||||
</div>
|
||||
|
|
|
@ -20,6 +20,7 @@ of the components using them.
|
|||
|
||||
## API Specification
|
||||
|
||||
* [v1beta2](v1beta2/README.md)
|
||||
* [v1beta1](v1beta1/README.md)
|
||||
|
||||
## Implementation
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
# source.toolkit.fluxcd.io/v1beta2
|
||||
|
||||
This is the v1beta2 API specification for defining the desired state sources of Kubernetes clusters.
|
||||
|
||||
## Specification
|
||||
|
||||
* Source kinds:
|
||||
+ GitRepository
|
||||
+ HelmRepository
|
||||
+ HelmChart
|
||||
+ [Bucket](buckets.md)
|
||||
|
||||
## Implementation
|
||||
|
||||
* [source-controller](https://github.com/fluxcd/source-controller/)
|
||||
|
||||
## Consumers
|
||||
|
||||
* [kustomize-controller](https://github.com/fluxcd/kustomize-controller/)
|
||||
* [helm-controller](https://github.com/fluxcd/helm-controller/)
|
|
@ -0,0 +1,780 @@
|
|||
# Buckets
|
||||
|
||||
The `Bucket` API defines a Source to produce an Artifact for objects from storage
|
||||
solutions like Amazon S3, Google Cloud Storage buckets, or any other solution
|
||||
with a S3 compatible API such as Minio, Alibaba Cloud OSS and others.
|
||||
|
||||
## Example
|
||||
|
||||
The following is an example of a Bucket. It creates a tarball (`.tar.gz`)
|
||||
Artifact with the fetched objects from an object storage with an S3
|
||||
compatible API (e.g. [Minio](https://min.io)):
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: minio-bucket
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 5m0s
|
||||
endpoint: minio.example.com
|
||||
insecure: true
|
||||
secretRef:
|
||||
name: minio-bucket-secret
|
||||
bucketName: example
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: minio-bucket-secret
|
||||
namespace: default
|
||||
type: Opaque
|
||||
stringData:
|
||||
accesskey: <access key>
|
||||
secretkey: <secret key>
|
||||
```
|
||||
|
||||
In the above example:
|
||||
|
||||
- A Bucket named `minio-bucket` is created, indicated by the
|
||||
`.metadata.name` field.
|
||||
- The source-controller checks the object storage bucket every five minutes,
|
||||
indicated by the `.spec.interval` field.
|
||||
- It authenticates to the `minio.example.com` endpoint with
|
||||
the static credentials from the `minio-secret` Secret data, indicated by
|
||||
the `.spec.endpoint` and `.spec.secretRef.name` fields.
|
||||
- A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag)
|
||||
in the `.spec.bucketName` bucket is compiled, while filtering the keys using
|
||||
[default ignore rules](#default-exclusions).
|
||||
- The SHA256 sum of the list is used as Artifact revision, reported
|
||||
in-cluster in the `.status.artifact.revision` field.
|
||||
- When the current Bucket revision differs from the latest calculated revision,
|
||||
all objects are fetched and archived.
|
||||
- The new Artifact is reported in the `.status.artifact` field.
|
||||
|
||||
You can run this example by saving the manifest into `bucket.yaml`, and
|
||||
changing the Bucket and Secret values to target a Minio instance you have
|
||||
control over.
|
||||
|
||||
**Note:** For more advanced examples targeting e.g. Amazon S3 or GCP, see
|
||||
[Provider](#provider).
|
||||
|
||||
1. Apply the resource on the cluster:
|
||||
|
||||
```sh
|
||||
kubectl apply -f bucket.yaml
|
||||
```
|
||||
|
||||
2. Run `kubectl get buckets` to see the Bucket:
|
||||
|
||||
```console
|
||||
NAME ENDPOINT READY STATUS AGE
|
||||
minio-bucket minio.example.com True stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' 34s
|
||||
```
|
||||
|
||||
3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact)
|
||||
and [Conditions](#conditions) in the Bucket's Status:
|
||||
|
||||
```console
|
||||
...
|
||||
Status:
|
||||
Artifact:
|
||||
Checksum: 72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686
|
||||
Last Update Time: 2022-02-01T23:43:38Z
|
||||
Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz
|
||||
Revision: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
|
||||
URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz
|
||||
Conditions:
|
||||
Last Transition Time: 2022-02-01T23:43:38Z
|
||||
Message: stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
|
||||
Observed Generation: 1
|
||||
Reason: Succeeded
|
||||
Status: True
|
||||
Type: Ready
|
||||
Observed Generation: 1
|
||||
URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal BucketOperationSucceed 43s source-controller downloaded 16 files with revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' from 'my-minio-bucket'
|
||||
Normal NewArtifact 43s source-controller stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
|
||||
```
|
||||
|
||||
## Writing a Bucket spec
|
||||
|
||||
As with all other Kubernetes config, a Bucket needs `apiVersion`, `kind`, and
|
||||
`metadata` fields. The name of a Bucket object must be a valid
|
||||
[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
|
||||
|
||||
A Bucket also needs a
|
||||
[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status).
|
||||
|
||||
### Provider
|
||||
|
||||
The `.spec.provider` field allows for specifying a Provider to enable provider
|
||||
specific configurations, for example to communicate with a non-S3 compatible
|
||||
API endpoint, or to change the authentication method.
|
||||
|
||||
Supported options are:
|
||||
|
||||
- [Generic](#generic)
|
||||
- [AWS](#aws)
|
||||
- [GCP](#gcp)
|
||||
|
||||
If you do not specify `.spec.provider`, it defaults to `generic`.
|
||||
|
||||
#### Generic
|
||||
|
||||
When a Bucket's `spec.provider` is set to `generic`, the controller will
|
||||
attempt to communicate with the specified [Endpoint](#endpoint) using the
|
||||
[Minio Client SDK](https://github.com/minio/minio-go), which can communicate
|
||||
with any Amazon S3 compatible object storage (including
|
||||
[GCS](https://cloud.google.com/storage/docs/interoperability),
|
||||
[Wasabi](https://wasabi-support.zendesk.com/hc/en-us/articles/360002079671-How-do-I-use-Minio-Client-with-Wasabi-),
|
||||
and many others).
|
||||
|
||||
The `generic` Provider _requires_ a [Secret reference](#secret-reference) to a
|
||||
Secret with `.data.accesskey` and `.data.secretkey` values, used to
|
||||
authenticate with static credentials.
|
||||
|
||||
The Provider allows for specifying a region the bucket is in using the
|
||||
[`.spec.region` field](#region), if required by the [Endpoint](#endpoint).
|
||||
|
||||
##### Generic example
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: generic-insecure
|
||||
namespace: default
|
||||
spec:
|
||||
provider: generic
|
||||
interval: 5m0s
|
||||
bucketName: podinfo
|
||||
endpoint: minio.minio.svc.cluster.local:9000
|
||||
timeout: 60s
|
||||
insecure: true
|
||||
secretRef:
|
||||
name: minio-credentials
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: minio-credentials
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
accesskey: <BASE64>
|
||||
secretkey: <BASE64>
|
||||
```
|
||||
|
||||
#### AWS
|
||||
|
||||
When a Bucket's `.spec.provider` field is set to `aws`, the source-controller
|
||||
will attempt to communicate with the specified [Endpoint](#endpoint) using the
|
||||
[Minio Client SDK](https://github.com/minio/minio-go).
|
||||
|
||||
Without a [Secret reference](#secret-reference), authorization using
|
||||
credentials retrieved from the AWS EC2 service is attempted by default. When
|
||||
a reference is specified, it expects a Secret with `.data.accesskey` and
|
||||
`.data.secretkey` values, used to authenticate with static credentials.
|
||||
|
||||
The Provider allows for specifying the
|
||||
[Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions)
|
||||
using the [`.spec.region` field](#region).
|
||||
|
||||
##### AWS EC2 example
|
||||
|
||||
**Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for
|
||||
the source-controller service account that grants access to the bucket.
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: aws
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 5m0s
|
||||
provider: aws
|
||||
bucketName: podinfo
|
||||
endpoint: s3.amazonaws.com
|
||||
region: us-east-1
|
||||
timeout: 30s
|
||||
```
|
||||
|
||||
##### AWS IAM role example
|
||||
|
||||
Replace `<bucket-name>` with the specified `.spec.bucketName`.
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:GetObject",
|
||||
"Resource": "arn:aws:s3:::<bucket-name>/*"
|
||||
},
|
||||
{
|
||||
"Sid": "",
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:ListBucket",
|
||||
"Resource": "arn:aws:s3:::<bucket-name>"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
##### AWS static auth example
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: aws
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 5m0s
|
||||
provider: aws
|
||||
bucketName: podinfo
|
||||
endpoint: s3.amazonaws.com
|
||||
region: us-east-1
|
||||
secretRef:
|
||||
name: aws-credentials
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: aws-credentials
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
accesskey: <BASE64>
|
||||
secretkey: <BASE64>
|
||||
```
|
||||
|
||||
#### GCP
|
||||
|
||||
When a Bucket's `.spec.provider` is set to `gcp`, the source-controller will
|
||||
attempt to communicate with the specified [Endpoint](#endpoint) using the
|
||||
[Google Client SDK](https://github.com/googleapis/google-api-go-client).
|
||||
|
||||
Without a [Secret reference](#secret-reference), authorization using a
|
||||
workload identity is attempted by default. The workload identity is obtained
|
||||
using the `GOOGLE_APPLICATION_CREDENTIALS` environment variable, falling back
|
||||
to the Google Application Credential file in the config directory.
|
||||
When a reference is specified, it expects a Secret with a `.data.serviceaccount`
|
||||
value with a GCP service account JSON file.
|
||||
|
||||
The Provider allows for specifying the
|
||||
[Bucket location](https://cloud.google.com/storage/docs/locations) using the
|
||||
[`.spec.region` field](#region).
|
||||
|
||||
##### GCP example
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluccd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: gcp-workload-identity
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 5m0s
|
||||
provider: gcp
|
||||
bucketName: podinfo
|
||||
endpoint: storage.googleapis.com
|
||||
region: us-east-1
|
||||
timeout: 30s
|
||||
```
|
||||
|
||||
##### GCP static auth example
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluccd.io/v1beta1
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: gcp-secret
|
||||
namespace: default
|
||||
spec:
|
||||
interval: 5m0s
|
||||
provider: gcp
|
||||
bucketName: <bucket-name>
|
||||
endpoint: storage.googleapis.com
|
||||
region: <bucket-region>
|
||||
secretRef:
|
||||
name: gcp-service-account
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: gcp-service-account
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
serviceaccount: <BASE64>
|
||||
```
|
||||
|
||||
Where the (base64 decoded) value of `.data.serviceaccount` looks like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "service_account",
|
||||
"project_id": "example",
|
||||
"private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "test@example.iam.gserviceaccount.com",
|
||||
"client_id": "32657634678762536746",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://oauth2.googleapis.com/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com"
|
||||
}
|
||||
```
|
||||
|
||||
### Interval
|
||||
|
||||
`.spec.interval` is a required field that specifices the interval which the
|
||||
object storage bucket must be consulted at.
|
||||
|
||||
After successfully reconciling a Bucket object, the source-controller requeues
|
||||
the object for inspection after the specified interval. The value must be in a
|
||||
[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration),
|
||||
e.g. `10m0s` to look at the object storage bucket every 10 minutes.
|
||||
|
||||
If the `.metadata.generation` of a resource changes (due to e.g. the apply of a
|
||||
change to the spec), this is handled instantly outside of the interval window.
|
||||
|
||||
### Endpoint
|
||||
|
||||
`.spec.endpoint` is a required field that specifies the HTTP/S object storage
|
||||
endpoint to connect to and fetch objects from. Connecting to an (insecure)
|
||||
HTTP endpoint requires enabling [`.spec.insecure`](#insecure).
|
||||
|
||||
Some endpoints require the specification of a [`.spec.region`](#region),
|
||||
see [Provider](#provider) for more (provider specific) examples.
|
||||
|
||||
### Bucket name
|
||||
|
||||
`.spec.bucketName` is a required field that specifies which object storage
|
||||
bucket on the [Endpoint](#endpoint) objects should be fetched from.
|
||||
|
||||
See [Provider](#provider) for more (provider specific) examples.
|
||||
|
||||
### Region
|
||||
|
||||
`.spec.region` is an optional field to specify the region a
|
||||
[`.spec.bucketName`](#bucket-name) is located in.
|
||||
|
||||
See [Provider](#provider) for more (provider specific) examples.
|
||||
|
||||
### Insecure
|
||||
|
||||
`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP)
|
||||
[endpoint](#endpoint), if set to `true`. The default value is `false`,
|
||||
denying insecure (HTTP) connections.
|
||||
|
||||
### Timeout
|
||||
|
||||
`.spec.timeout` is an optional field to specify a timeout for object storage
|
||||
fetch operations. The value must be in a
|
||||
[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration),
|
||||
e.g. `1m30s` for a timeout of one minute and thirty seconds.
|
||||
The default value is `60s`.
|
||||
|
||||
### Secret reference
|
||||
|
||||
`.spec.secretRef.name` is an optional field to specify a name reference to a
|
||||
Secret in the same namespace as the Bucket, containing authentication
|
||||
credentials for the object storage. For some `.spec.provider` implementations
|
||||
the presence of the field is required, see [Provider](#provider) for more
|
||||
details and examples.
|
||||
|
||||
### Ignore
|
||||
|
||||
`.spec.ignore` is an optional field to specify rules in [the `.gitignore`
|
||||
pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Storage
|
||||
objects which keys match the defined rules are excluded while fetching.
|
||||
|
||||
When specified, `.spec.ignore` overrides the [default exclusion
|
||||
list](#default-exclusions), and may overrule the [`.sourceignore` file
|
||||
exclusions](#sourceignore-file). See [excluding files](#excluding-files)
|
||||
for more information.
|
||||
|
||||
### Suspend
|
||||
|
||||
`.spec.suspend` is an optional field to suspend the reconciliation of a Bucket.
|
||||
When set to `true`, the controller will stop reconciling the Bucket, and changes
|
||||
to the resource or in the object storage bucket will not result in a new
|
||||
Artifact. When the field is set to `false` or removed, it will resume.
|
||||
|
||||
For practical information, see
|
||||
[suspending and resuming](#suspending-and-resuming).
|
||||
|
||||
## Working with Buckets
|
||||
|
||||
### Excluding files
|
||||
|
||||
By default, storage bucket objects which match the [default exclusion
|
||||
rules](#default-exclusions) are excluded while fetching. It is possible to
|
||||
overwrite and/or overrule the default exclusions using a file in the bucket
|
||||
and/or an in-spec set of rules.
|
||||
|
||||
#### `.sourceignore` file
|
||||
|
||||
Excluding files is possible by adding a `.sourceignore` file in the root of the
|
||||
object storage bucket. The `.sourceignore` file follows [the `.gitignore`
|
||||
pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and
|
||||
pattern entries may overrule [default exclusions](#default-exclusions).
|
||||
|
||||
#### Ignore spec
|
||||
|
||||
Another option is to define the exclusions within the Bucket spec, using the
|
||||
[`.spec.ignore` field](#ignore). Specified rules override the
|
||||
[default exclusion list](#default-exclusions), and may overrule `.sourceignore`
|
||||
file exclusions.
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: <bucket-name>
|
||||
spec:
|
||||
ignore: |
|
||||
# exclude all
|
||||
/*
|
||||
# include deploy dir
|
||||
!/deploy
|
||||
# exclude file extensions from deploy dir
|
||||
/deploy/**/*.md
|
||||
/deploy/**/*.txt
|
||||
```
|
||||
|
||||
### Triggering a reconcile
|
||||
|
||||
To manually tell the source-controller to reconcile a Bucket outside of the
|
||||
[specified interval window](#interval), a Bucket can be annotated with
|
||||
`reconcile.fluxcd.io/requestedAt: <arbitrary value>`. Annotating the resource
|
||||
queues the Bucket for reconciliation if the `<arbitrary-value>` differs from
|
||||
the last value the controller acted on, as reported in
|
||||
[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at).
|
||||
|
||||
Using `kubectl`:
|
||||
|
||||
```sh
|
||||
kubectl annotate --overwrite bucket/<bucket-name> reconcile.fluxcd.io/requestedAt="$(date +%s)"
|
||||
```
|
||||
|
||||
Using `flux`:
|
||||
|
||||
```sh
|
||||
flux reconcile source bucket <bucket-name>
|
||||
```
|
||||
|
||||
### Waiting for `Ready`
|
||||
|
||||
When a change is applied, it is possible to wait for the Bucket to reach a
|
||||
[ready state](#ready-bucket) using `kubectl`:
|
||||
|
||||
```sh
|
||||
kubectl wait bucket/<bucket-name> --for=condition=ready --timeout=1m
|
||||
```
|
||||
|
||||
### Suspending and resuming
|
||||
|
||||
When you find yourself in a situation where you temporarily want to pause the
|
||||
reconciliation of a Bucket, you can suspend it using the [`.spec.suspend`
|
||||
field](#suspend).
|
||||
|
||||
#### Suspend a Bucket
|
||||
|
||||
In your YAML declaration:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: <bucket-name>
|
||||
spec:
|
||||
suspend: true
|
||||
```
|
||||
|
||||
Using `kubectl`:
|
||||
|
||||
```sh
|
||||
kubectl patch bucket <bucket-name> -p '{\"spec\": {\"suspend\" : true }}'
|
||||
```
|
||||
|
||||
Using `flux`:
|
||||
|
||||
```sh
|
||||
flux suspend source bucket <bucket-name>
|
||||
```
|
||||
|
||||
**Note:** When a Bucket has an Artifact and is suspended, and this Artifact
|
||||
later disappears from the storage due to e.g. the source-controller Pod being
|
||||
evicted from a Node, this will not be reflected in the Bucket's Status until it
|
||||
is resumed.
|
||||
|
||||
#### Resume a Bucket
|
||||
|
||||
In your YAML declaration, comment out (or remove) the field:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: <bucket-name>
|
||||
spec:
|
||||
# suspend: true
|
||||
```
|
||||
|
||||
**Note:** Setting the field value to `false` has the same effect as removing
|
||||
it, but does not allow for "hot patching" using e.g. `kubectl` while practicing
|
||||
GitOps; as the manually applied patch would be overwritten by the declared
|
||||
state in Git.
|
||||
|
||||
Using `kubectl`:
|
||||
|
||||
```sh
|
||||
kubectl patch bucket <bucket-name> -p '{\"spec\" : {\"suspend\" : false }}'
|
||||
```
|
||||
|
||||
Using `flux`:
|
||||
|
||||
```sh
|
||||
flux resume source bucket <bucket-name>
|
||||
```
|
||||
|
||||
### Debugging a Bucket
|
||||
|
||||
There are several ways to gather information about a Bucket for debugging
|
||||
purposes.
|
||||
|
||||
#### Describe the Bucket
|
||||
|
||||
Describing a Bucket using `kubectl describe bucket <bucket-name>` displays the
|
||||
latest recorded information for the resource in the `Status` and `Events`
|
||||
sections:
|
||||
|
||||
```console
|
||||
...
|
||||
Status:
|
||||
...
|
||||
Conditions:
|
||||
Last Transition Time: 2022-02-02T13:26:55Z
|
||||
Message: reconciling new generation 2
|
||||
Observed Generation: 2
|
||||
Reason: NewGeneration
|
||||
Status: True
|
||||
Type: Reconciling
|
||||
Last Transition Time: 2022-02-02T13:26:55Z
|
||||
Message: bucket 'my-new-bucket' does not exist
|
||||
Observed Generation: 2
|
||||
Reason: BucketOperationFailed
|
||||
Status: False
|
||||
Type: Ready
|
||||
Last Transition Time: 2022-02-02T13:26:55Z
|
||||
Message: bucket 'my-new-bucket' does not exist
|
||||
Observed Generation: 2
|
||||
Reason: BucketOperationFailed
|
||||
Status: True
|
||||
Type: FetchFailed
|
||||
Observed Generation: 1
|
||||
URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Warning BucketOperationFailed 37s (x11 over 42s) source-controller bucket 'my-new-bucket' does not exist
|
||||
```
|
||||
|
||||
#### Trace emitted Events
|
||||
|
||||
To view events for specific Bucket(s), `kubectl get events` can be used in
|
||||
combination with `--field-sector` to list the Events for specific objects.
|
||||
For example, running
|
||||
|
||||
```sh
|
||||
kubectl get events --field-selector involvedObject.kind=Bucket,involvedObject.name=<bucket-name>
|
||||
```
|
||||
|
||||
lists
|
||||
|
||||
```console
|
||||
LAST SEEN TYPE REASON OBJECT MESSAGE
|
||||
2m30s Normal BucketOperationSucceed bucket/<bucket-name> downloaded 16 files with revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' from 'my-minio-bucket'
|
||||
2m30s Normal NewArtifact bucket/<bucket-name> stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
|
||||
18s Warning BucketOperationFailed bucket/<bucket-name> bucket 'my-new-bucket' does not exist
|
||||
```
|
||||
|
||||
Besides being reported in Events, the reconciliation errors are also logged by
|
||||
the controller. The Flux CLI offer commands for filtering the logs for a
|
||||
specific Bucket, e.g. `flux logs --level=error --kind=Bucket --name=<bucket-name>`.
|
||||
|
||||
## Bucket Status
|
||||
|
||||
### Artifact
|
||||
|
||||
The Bucket reports the latest synchronized state from the object storage
|
||||
bucket as an Artifact object in the `.status.artifact` of the resource.
|
||||
|
||||
The Artifact file is a gzip compressed TAR archive
|
||||
(`<calculated revision>.tar.gz`), and can be retrieved in-cluster from the
|
||||
`.status.artifact.url` HTTP address.
|
||||
|
||||
#### Artifact example
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: <bucket-name>
|
||||
status:
|
||||
artifact:
|
||||
checksum: cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a
|
||||
lastUpdateTime: "2022-01-28T10:30:30Z"
|
||||
path: bucket/<namespace>/<bucket-name>/c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz
|
||||
revision: c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2
|
||||
url: http://source-controller.<namespace>.svc.cluster.local./bucket/<namespace>/<bucket-name>/c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz
|
||||
```
|
||||
|
||||
#### Default exclusions
|
||||
|
||||
The following files and extensions are excluded from the Artifact by
|
||||
default:
|
||||
|
||||
- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`)
|
||||
- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`)
|
||||
- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`)
|
||||
- CLI configs (`.goreleaser.yml, .sops.yaml`)
|
||||
- Flux v1 config (`.flux.yaml`)
|
||||
|
||||
To define your own exclusion rules, see [excluding files](#excluding-files).
|
||||
|
||||
### Conditions
|
||||
|
||||
A Bucket enters various states during its lifecycle, reflected as
|
||||
[Kubernetes Conditions][typical-status-properties].
|
||||
It can be [reconciling](#reconciling-bucket) while fetching storage objects,
|
||||
it can be [ready](#ready-bucket), or it can [fail during
|
||||
reconciliation](#failed-bucket).
|
||||
|
||||
The Bucket API is compatible with the [kstatus specification][kstatus-spec],
|
||||
and reports `Reconciling` and `Stalled` conditions where applicable to
|
||||
provide better (timeout) support to solutions polling the Bucket to become
|
||||
`Ready`.
|
||||
|
||||
#### Reconciling Bucket
|
||||
|
||||
The source-controller marks a Bucket as _reconciling_ when one of the following
|
||||
is true:
|
||||
|
||||
- There is no current Artifact for the Bucket, or the reported Artifact is
|
||||
determined to have disappeared from the storage.
|
||||
- The generation of the Bucket is newer than the [Observed Generation](#observed-generation).
|
||||
- The newly calculated Artifact revision differs from the current Artifact.
|
||||
|
||||
When the Bucket is "reconciling", the `Ready` Condition status becomes `False`,
|
||||
and the controller adds a Condition with the following attributes to the
|
||||
Bucket's `.status.conditions`:
|
||||
|
||||
- `type: Reconciling`
|
||||
- `status: "True"`
|
||||
- `reason: NewGeneration` | `reason: NoArtifact` | `reason: NewRevision`
|
||||
|
||||
If the reconciling state is due to a new revision, an additional Condition is
|
||||
added with the following attributes:
|
||||
|
||||
- `type: ArtifactOutdated`
|
||||
- `status: "True"`
|
||||
- `reason: NewRevision`
|
||||
|
||||
Both Conditions have a ["negative polarity"][typical-status-properties],
|
||||
and are only present on the Bucket while their status value is `"True"`.
|
||||
|
||||
#### Ready Bucket
|
||||
|
||||
The source-controller marks a Bucket as _ready_ when it has the following
|
||||
characteristics:
|
||||
|
||||
- The Bucket reports an [Artifact](#artifact).
|
||||
- The reported Artifact exists in the controller's Artifact storage.
|
||||
- The Bucket was able to communicate with the Bucket's object storage endpoint
|
||||
using the current spec.
|
||||
- The revision of the reported Artifact is up-to-date with the latest
|
||||
calculated revision of the object storage bucket.
|
||||
|
||||
When the Bucket is "ready", the controller sets a Condition with the following
|
||||
attributes in the Bucket's `.status.conditions`:
|
||||
|
||||
- `type: Ready`
|
||||
- `status: "True"`
|
||||
- `reason: Succeeded`
|
||||
|
||||
This `Ready` Condition will retain a status value of `"True"` until the Bucket
|
||||
is marked as [reconciling](#reconciling-bucket), or e.g. a
|
||||
[transient error](#failed-bucket) occurs due to a temporary network issue.
|
||||
|
||||
#### Failed Bucket
|
||||
|
||||
The source-controller may get stuck trying to produce an Artifact for a Bucket
|
||||
without completing. This can occur due to some of the following factors:
|
||||
|
||||
- The object storage [Endpoint](#endpoint) is temporarily unavailable.
|
||||
- The specified object storage bucket does not exist.
|
||||
- The [Secret reference](#secret-reference) contains a reference to a
|
||||
non-existing Secret.
|
||||
- The credentials in the referenced Secret are invalid.
|
||||
- The Bucket spec contains a generic misconfiguration.
|
||||
|
||||
When this happens, the controller sets the `Ready` Condition status to `False`,
|
||||
and adds a Condition with the following attributes to the Bucket's
|
||||
`.status.conditions`:
|
||||
|
||||
- `type: FetchFailed`
|
||||
- `status: "True"`
|
||||
- `reason: AuthenticationFailed` | `reason: BucketOperationFailed`
|
||||
|
||||
This condition has a ["negative polarity"][typical-status-properties],
|
||||
and is only present on the Bucket while the status value is `"True"`.
|
||||
|
||||
While the Bucket has this Condition, the controller will continue to attempt
|
||||
to produce an Artifact for the resource with an exponential backoff, until
|
||||
it succeeds and the Bucket is marked as [ready](#ready-bucket).
|
||||
|
||||
Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at
|
||||
the same time, for example due to a newly introduced configuration issue in the
|
||||
Bucket spec.
|
||||
|
||||
### Observed Generation
|
||||
|
||||
The source-controller reports an
|
||||
[observed generation][typical-status-properties]
|
||||
in the Bucket's `.status.observedGeneration`. The observed generation is the
|
||||
latest `.metadata.generation` which resulted in either a [ready state](#ready-bucket),
|
||||
or stalled due to error it can not recover from without human
|
||||
intervention.
|
||||
|
||||
### Last Handled Reconcile At
|
||||
|
||||
The source-controller reports the last `reconcile.fluxcd.io/requestedAt`
|
||||
annotation value it acted on in the `.status.lastHandledReconcileAt` field.
|
||||
|
||||
For practical information about this field, see [triggering a
|
||||
reconcile](#triggering-a-reconcile).
|
||||
|
||||
[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties
|
||||
[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus
|
Loading…
Reference in New Issue