diff --git a/Makefile b/Makefile index 3b0e5b87..bc315f6b 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc. cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases" api-docs: gen-crd-api-reference-docs ## Generate API reference documentation - $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md + $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md tidy: ## Run go mod tidy go mod tidy diff --git a/PROJECT b/PROJECT index a807390b..776217e9 100644 --- a/PROJECT +++ b/PROJECT @@ -1,6 +1,18 @@ domain: toolkit.fluxcd.io repo: github.com/fluxcd/source-controller resources: +- group: source + kind: GitRepository + version: v1beta2 +- group: source + kind: HelmRepository + version: v1beta2 +- group: source + kind: HelmChart + version: v1beta2 +- group: source + kind: Bucket + version: v1beta2 - group: source kind: GitRepository version: v1beta1 diff --git a/api/go.mod b/api/go.mod index ce4aef76..46c5284f 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/fluxcd/pkg/apis/acl v0.0.3 - github.com/fluxcd/pkg/apis/meta v0.10.2 - k8s.io/apimachinery v0.23.1 + github.com/fluxcd/pkg/apis/meta v0.12.0 + k8s.io/apimachinery v0.23.2 sigs.k8s.io/controller-runtime v0.11.0 ) @@ -24,5 +24,5 @@ require ( k8s.io/klog/v2 v2.30.0 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect ) diff --git a/api/go.sum b/api/go.sum index ffe31a8a..7267da4d 100644 --- a/api/go.sum +++ b/api/go.sum @@ -123,8 +123,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.12.0 h1:Ssyltj6E9A7y32sZrzjog0m+bIsFM/3lHHfmpxesUAU= +github.com/fluxcd/pkg/apis/meta v0.12.0/go.mod h1:SPrSWMwDK7Ls2/4GadzhjDjPFbKrzzgzuZ0oDO3jzso= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -897,8 +897,8 @@ k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= -k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo= -k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= +k8s.io/apimachinery v0.23.2 h1:dBmjCOeYBdg2ibcQxMuUq+OopZ9fjfLIR5taP/XKeTs= +k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= @@ -924,8 +924,9 @@ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87J sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 33e28bb4..0d5f3de8 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -126,7 +126,13 @@ func BucketProgressing(bucket Bucket) Bucket { bucket.Status.ObservedGeneration = bucket.Generation bucket.Status.URL = "" bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } @@ -136,14 +142,26 @@ func BucketProgressing(bucket Bucket) Bucket { func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { bucket.Status.Artifact = &artifact bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } // BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with // the given reason and message. It returns the modified Bucket. func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition) return bucket } diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index d38a6873..3cdfab6b 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -196,7 +196,13 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -207,7 +213,13 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt repository.Status.Artifact = &artifact repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -215,7 +227,13 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 51c04781..8d4c0a02 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -152,7 +152,13 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } @@ -162,7 +168,13 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } @@ -170,7 +182,13 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition) return chart } diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 0af0d4cf..62b0e9a6 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -113,7 +113,13 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionUnknown, + Reason: meta.ProgressingReason, + Message: "reconciliation in progress", + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -123,7 +129,13 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionTrue, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } @@ -131,7 +143,13 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + newCondition := metav1.Condition{ + Type: meta.ReadyCondition, + Status: metav1.ConditionFalse, + Reason: reason, + Message: message, + } + apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition) return repository } diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index d5e4f489..3fd54793 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -2,7 +2,7 @@ // +build !ignore_autogenerated /* -Copyright 2020 The Flux authors +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/api/v1beta2/artifact_types.go b/api/v1beta2/artifact_types.go new file mode 100644 index 00000000..363f79b1 --- /dev/null +++ b/api/v1beta2/artifact_types.go @@ -0,0 +1,76 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "path" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Artifact represents the output of a Source synchronisation. +type Artifact struct { + // Path is the relative file path of this Artifact. + // It can be used to locate the Artifact file in the root of the Artifact + // storage on the local file system of the controller managing the Source. + // +required + Path string `json:"path"` + + // URL is the HTTP address of this artifact. + // It is used by the consumers of the artifacts to fetch and use the + // artifacts. It is expected to be resolvable from within the cluster. + // +required + URL string `json:"url"` + + // Revision is a human readable identifier traceable in the origin source + // system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm + // chart version, etc. + // +optional + Revision string `json:"revision"` + + // Checksum is the SHA256 checksum of the artifact. + // +optional + Checksum string `json:"checksum"` + + // LastUpdateTime is the timestamp corresponding to the last update of this + // artifact. + // +required + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` +} + +// HasRevision returns true if the given revision matches the current Revision +// of the Artifact. +func (in *Artifact) HasRevision(revision string) bool { + if in == nil { + return false + } + return in.Revision == revision +} + +// ArtifactDir returns the artifact dir path in the form of +// //. +func ArtifactDir(kind, namespace, name string) string { + kind = strings.ToLower(kind) + return path.Join(kind, namespace, name) +} + +// ArtifactPath returns the artifact path in the form of +// ///. +func ArtifactPath(kind, namespace, name, filename string) string { + return path.Join(ArtifactDir(kind, namespace, name), filename) +} diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go new file mode 100644 index 00000000..42c23b48 --- /dev/null +++ b/api/v1beta2/bucket_types.go @@ -0,0 +1,186 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // BucketKind is the string representation of a Bucket. + BucketKind = "Bucket" +) + +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" +) + +// BucketSpec defines the desired state of an S3 compatible bucket +type BucketSpec struct { + // The S3 compatible storage provider name, default ('generic'). + // +kubebuilder:validation:Enum=generic;aws;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // The bucket name. + // +required + BucketName string `json:"bucketName"` + + // The bucket endpoint address. + // +required + Endpoint string `json:"endpoint"` + + // Insecure allows connecting to a non-TLS S3 HTTP endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // The bucket region. + // +optional + Region string `json:"region,omitempty"` + + // The name of the secret containing authentication credentials + // for the Bucket. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // The interval at which to check for bucket updates. + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for fetch operations, defaults to 60s. + // +kubebuilder:default="60s" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +// BucketStatus defines the observed state of a bucket +type BucketStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the fetch link for the artifact output of the last Bucket sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful Bucket sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // BucketOperationSucceededReason represents the fact that the bucket listing and + // fetch operations succeeded. + BucketOperationSucceededReason string = "BucketOperationSucceeded" + + // BucketOperationFailedReason represents the fact that the bucket listing or + // fetch operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *Bucket) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *Bucket) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// Bucket is the Schema for the buckets API +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status BucketStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketList contains a list of Bucket +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go new file mode 100644 index 00000000..1e6ff992 --- /dev/null +++ b/api/v1beta2/condition_types.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +const SourceFinalizer = "finalizers.fluxcd.io" + +const ( + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check + // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. + SourceVerifiedCondition string = "SourceVerified" + + // FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, and the Artifact available for the + // Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" + + // BuildFailedCondition indicates a transient or persistent build failure of a Source's Artifact. + // If True, the Source can be in an ArtifactOutdatedCondition + BuildFailedCondition string = "BuildFailed" +) + +const ( + // URLInvalidReason represents the fact that a given source has an invalid URL. + URLInvalidReason string = "URLInvalid" + + // StorageOperationFailedReason signals a failure caused by a storage operation. + StorageOperationFailedReason string = "StorageOperationFailed" + + // AuthenticationFailedReason represents the fact that a given secret does not + // have the required fields or the provided credentials do not match. + AuthenticationFailedReason string = "AuthenticationFailed" +) diff --git a/api/v1beta2/doc.go b/api/v1beta2/doc.go new file mode 100644 index 00000000..e9fca165 --- /dev/null +++ b/api/v1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta2 contains API Schema definitions for the source v1beta2 API group +// +kubebuilder:object:generate=true +// +groupName=source.toolkit.fluxcd.io +package v1beta2 diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go new file mode 100644 index 00000000..8910cd17 --- /dev/null +++ b/api/v1beta2/gitrepository_types.go @@ -0,0 +1,257 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // GitRepositoryKind is the string representation of a GitRepository. + GitRepositoryKind = "GitRepository" + + // GoGitImplementation represents the go-git Git implementation kind. + GoGitImplementation = "go-git" + // LibGit2Implementation represents the git2go Git implementation kind. + LibGit2Implementation = "libgit2" +) + +const ( + // IncludeUnavailableCondition indicates one of the includes is not available. For example, because it does not + // exist, or does not have an Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" +) + +// GitRepositorySpec defines the desired state of a Git repository. +type GitRepositorySpec struct { + // The repository URL, can be a HTTP/S or SSH address. + // +kubebuilder:validation:Pattern="^(http|https|ssh)://" + // +required + URL string `json:"url"` + + // The secret name containing the Git credentials. + // For HTTPS repositories the secret must contain username and password fields. + // For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // The interval at which to check for repository updates. + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for remote Git operations like cloning, defaults to 60s. + // +kubebuilder:default="60s" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // The Git reference to checkout and monitor for changes, defaults to + // master branch. + // +optional + Reference *GitRepositoryRef `json:"ref,omitempty"` + + // Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. + // +optional + Verification *GitRepositoryVerification `json:"verify,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). + // If not provided, a default will be used, consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this source. + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // Determines which git client library to use. + // Defaults to go-git, valid values are ('go-git', 'libgit2'). + // +kubebuilder:validation:Enum=go-git;libgit2 + // +kubebuilder:default:=go-git + // +optional + GitImplementation string `json:"gitImplementation,omitempty"` + + // When enabled, after the clone is created, initializes all submodules within, using their default settings. + // This option is available only when using the 'go-git' GitImplementation. + // +optional + RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` + + // Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for + // this resource. + Include []GitRepositoryInclude `json:"include,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +func (in *GitRepositoryInclude) GetFromPath() string { + return in.FromPath +} + +func (in *GitRepositoryInclude) GetToPath() string { + if in.ToPath == "" { + return in.GitRepositoryRef.Name + } + return in.ToPath +} + +// GitRepositoryInclude defines a source with a from and to path. +type GitRepositoryInclude struct { + // Reference to a GitRepository to include. + GitRepositoryRef meta.LocalObjectReference `json:"repository"` + + // The path to copy contents from, defaults to the root directory. + // +optional + FromPath string `json:"fromPath"` + + // The path to copy contents to, defaults to the name of the source ref. + // +optional + ToPath string `json:"toPath"` +} + +// GitRepositoryRef defines the Git ref used for pull and checkout operations. +type GitRepositoryRef struct { + // The Git branch to checkout, defaults to master. + // +optional + Branch string `json:"branch,omitempty"` + + // The Git tag to checkout, takes precedence over Branch. + // +optional + Tag string `json:"tag,omitempty"` + + // The Git tag semver expression, takes precedence over Tag. + // +optional + SemVer string `json:"semver,omitempty"` + + // The Git commit SHA to checkout, if specified Tag filters will be ignored. + // +optional + Commit string `json:"commit,omitempty"` +} + +// GitRepositoryVerification defines the OpenPGP signature verification process. +type GitRepositoryVerification struct { + // Mode describes what Git object should be verified, currently ('head'). + // +kubebuilder:validation:Enum=head + Mode string `json:"mode"` + + // SecretRef containing the public keys of all trusted Git authors. + SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` +} + +// GitRepositoryStatus defines the observed state of a Git repository. +type GitRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the GitRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the fetch link for the artifact output of the last repository sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful repository sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + // IncludedArtifacts represents the included artifacts from the last successful repository sync. + // +optional + IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // GitOperationSucceedReason represents the fact that the git clone, pull and checkout operations succeeded. + GitOperationSucceedReason string = "GitOperationSucceed" + + // GitOperationFailedReason represents the fact that the git clone, pull or checkout operations failed. + GitOperationFailedReason string = "GitOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in GitRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *GitRepository) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *GitRepository) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=gitrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// GitRepository is the Schema for the gitrepositories API +type GitRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec GitRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status GitRepositoryStatus `json:"status,omitempty"` +} + +// GitRepositoryList contains a list of GitRepository +// +kubebuilder:object:root=true +type GitRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []GitRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{}) +} diff --git a/api/v1beta2/groupversion_info.go b/api/v1beta2/groupversion_info.go new file mode 100644 index 00000000..797e6c53 --- /dev/null +++ b/api/v1beta2/groupversion_info.go @@ -0,0 +1,33 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1beta2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go new file mode 100644 index 00000000..6abe8ed2 --- /dev/null +++ b/api/v1beta2/helmchart_types.go @@ -0,0 +1,237 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +// HelmChartKind is the string representation of a HelmChart. +const HelmChartKind = "HelmChart" + +// HelmChartSpec defines the desired state of a Helm chart. +type HelmChartSpec struct { + // The name or path the Helm chart is available at in the SourceRef. + // +required + Chart string `json:"chart"` + + // The chart version semver expression, ignored for charts from GitRepository + // and Bucket sources. Defaults to latest when omitted. + // +kubebuilder:default:=* + // +optional + Version string `json:"version,omitempty"` + + // The reference to the Source the chart is available at. + // +required + SourceRef LocalHelmChartSourceReference `json:"sourceRef"` + + // The interval at which to check the Source for updates. + // +required + Interval metav1.Duration `json:"interval"` + + // Determines what enables the creation of a new artifact. Valid values are + // ('ChartVersion', 'Revision'). + // See the documentation of the values for an explanation on their behavior. + // Defaults to ChartVersion when omitted. + // +kubebuilder:validation:Enum=ChartVersion;Revision + // +kubebuilder:default:=ChartVersion + // +optional + ReconcileStrategy string `json:"reconcileStrategy,omitempty"` + + // Alternative list of values files to use as the chart values (values.yaml + // is not included by default), expected to be a relative path in the SourceRef. + // Values files are merged in the order of this list with the last file overriding + // the first. Ignored when omitted. + // +optional + ValuesFiles []string `json:"valuesFiles,omitempty"` + + // Alternative values file to use as the default chart values, expected to + // be a relative path in the SourceRef. Deprecated in favor of ValuesFiles, + // for backwards compatibility the file defined here is merged before the + // ValuesFiles items. Ignored when omitted. + // +optional + // +deprecated + ValuesFile string `json:"valuesFile,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +const ( + // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different. + ReconcileStrategyChartVersion string = "ChartVersion" + + // ReconcileStrategyRevision reconciles when the Revision of the source is different. + ReconcileStrategyRevision string = "Revision" +) + +// LocalHelmChartSourceReference contains enough information to let you locate +// the typed referenced object at namespace level. +type LocalHelmChartSourceReference struct { + // APIVersion of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + // Kind of the referent, valid values are ('HelmRepository', 'GitRepository', + // 'Bucket'). + // +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket + // +required + Kind string `json:"kind"` + + // Name of the referent. + // +required + Name string `json:"name"` +} + +// HelmChartStatus defines the observed state of the HelmChart. +type HelmChartStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // ObservedSourceArtifactRevision is the last observed Artifact.Revision + // of the Source reference. + // +optional + ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"` + + // ObservedChartName is the last observed chart name as defined by the + // resolved chart reference. + // +optional + ObservedChartName string `json:"observedChartName,omitempty"` + + // Conditions holds the conditions for the HelmChart. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the fetch link for the last chart pulled. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful chart sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // ChartPullFailedReason represents the fact that the pull of the Helm chart + // failed. + ChartPullFailedReason string = "ChartPullFailed" + + // ChartPullSucceededReason represents the fact that the pull of the Helm chart + // succeeded. + ChartPullSucceededReason string = "ChartPullSucceeded" + + // ChartPackageFailedReason represent the fact that the package of the Helm + // chart failed. + ChartPackageFailedReason string = "ChartPackageFailed" + + // ChartPackageSucceededReason represents the fact that the package of the Helm + // chart succeeded. + ChartPackageSucceededReason string = "ChartPackageSucceeded" +) + +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in HelmChart) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *HelmChart) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetValuesFiles returns a merged list of ValuesFiles. +func (in *HelmChart) GetValuesFiles() []string { + valuesFiles := in.Spec.ValuesFiles + + // Prepend the deprecated ValuesFile to the list + if in.Spec.ValuesFile != "" { + valuesFiles = append([]string{in.Spec.ValuesFile}, valuesFiles...) + } + return valuesFiles +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=hc +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` +// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` +// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// HelmChart is the Schema for the helmcharts API +type HelmChart struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmChartSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmChartStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HelmChartList contains a list of HelmChart +type HelmChartList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmChart `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmChart{}, &HelmChartList{}) +} diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go new file mode 100644 index 00000000..6a9bea96 --- /dev/null +++ b/api/v1beta2/helmrepository_types.go @@ -0,0 +1,172 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // HelmRepositoryKind is the string representation of a HelmRepository. + HelmRepositoryKind = "HelmRepository" + // HelmRepositoryURLIndexKey is the key to use for indexing HelmRepository + // resources by their HelmRepositorySpec.URL. + HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL" +) + +// HelmRepositorySpec defines the reference to a Helm repository. +type HelmRepositorySpec struct { + // The Helm repository URL, a valid URL contains at least a protocol and host. + // +required + URL string `json:"url"` + + // The name of the secret containing authentication credentials for the Helm + // repository. + // For HTTP/S basic auth the secret must contain username and + // password fields. + // For TLS the secret must contain a certFile and keyFile, and/or + // caCert fields. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // PassCredentials allows the credentials from the SecretRef to be passed on to + // a host that does not match the host as defined in URL. + // This may be required if the host of the advertised chart URLs in the index + // differ from the defined URL. + // Enabling this should be done with caution, as it can potentially result in + // credentials getting stolen in a MITM-attack. + // +optional + PassCredentials bool `json:"passCredentials,omitempty"` + + // The interval at which to check the upstream for updates. + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout of index fetching, defaults to 60s. + // +kubebuilder:default:="60s" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // This flag tells the controller to suspend the reconciliation of this source. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +// HelmRepositoryStatus defines the observed state of the HelmRepository. +type HelmRepositoryStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the HelmRepository. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the fetch link for the last index fetched. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful repository sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // IndexationFailedReason represents the fact that the indexation of the given + // Helm repository failed. + IndexationFailedReason string = "IndexationFailed" + + // IndexationSucceededReason represents the fact that the indexation of the + // given Helm repository succeeded. + IndexationSucceededReason string = "IndexationSucceed" +) + +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. +func (in HelmRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *HelmRepository) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *HelmRepository) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=helmrepo +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// HelmRepository is the Schema for the helmrepositories API +type HelmRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmRepositorySpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status HelmRepositoryStatus `json:"status,omitempty"` +} + +// HelmRepositoryList contains a list of HelmRepository +// +kubebuilder:object:root=true +type HelmRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmRepository `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{}) +} diff --git a/api/v1beta2/source.go b/api/v1beta2/source.go new file mode 100644 index 00000000..a8db640d --- /dev/null +++ b/api/v1beta2/source.go @@ -0,0 +1,44 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // SourceIndexKey is the key used for indexing resources + // resources based on their Source. + SourceIndexKey string = ".metadata.source" +) + +// Source interface must be supported by all API types. +// +k8s:deepcopy-gen=false +type Source interface { + runtime.Object + // GetRequeueAfter returns the duration after which the source must be reconciled again. + GetRequeueAfter() time.Duration + // GetArtifact returns the latest artifact from the source if present in the + // status sub-resource. + GetArtifact() *Artifact + // GetInterval returns the interval at which the source is updated. + // Deprecated: use GetRequeueAfter instead. + GetInterval() metav1.Duration +} diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 00000000..53c86a93 --- /dev/null +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,611 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Artifact) DeepCopyInto(out *Artifact) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. +func (in *Artifact) DeepCopy() *Artifact { + if in == nil { + return nil + } + out := new(Artifact) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepository) DeepCopyInto(out *GitRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository. +func (in *GitRepository) DeepCopy() *GitRepository { + if in == nil { + return nil + } + out := new(GitRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) { + *out = *in + out.GitRepositoryRef = in.GitRepositoryRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude. +func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude { + if in == nil { + return nil + } + out := new(GitRepositoryInclude) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GitRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList. +func (in *GitRepositoryList) DeepCopy() *GitRepositoryList { + if in == nil { + return nil + } + out := new(GitRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef. +func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef { + if in == nil { + return nil + } + out := new(GitRepositoryRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(GitRepositoryRef) + **out = **in + } + if in.Verification != nil { + in, out := &in.Verification, &out.Verification + *out = new(GitRepositoryVerification) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.Include != nil { + in, out := &in.Include, &out.Include + *out = make([]GitRepositoryInclude, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec. +func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec { + if in == nil { + return nil + } + out := new(GitRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + if in.IncludedArtifacts != nil { + in, out := &in.IncludedArtifacts, &out.IncludedArtifacts + *out = make([]*Artifact, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + } + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus. +func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus { + if in == nil { + return nil + } + out := new(GitRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) { + *out = *in + out.SecretRef = in.SecretRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification. +func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification { + if in == nil { + return nil + } + out := new(GitRepositoryVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChart) DeepCopyInto(out *HelmChart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart. +func (in *HelmChart) DeepCopy() *HelmChart { + if in == nil { + return nil + } + out := new(HelmChart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartList) DeepCopyInto(out *HelmChartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmChart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList. +func (in *HelmChartList) DeepCopy() *HelmChartList { + if in == nil { + return nil + } + out := new(HelmChartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) { + *out = *in + out.SourceRef = in.SourceRef + out.Interval = in.Interval + if in.ValuesFiles != nil { + in, out := &in.ValuesFiles, &out.ValuesFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. +func (in *HelmChartSpec) DeepCopy() *HelmChartSpec { + if in == nil { + return nil + } + out := new(HelmChartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus. +func (in *HelmChartStatus) DeepCopy() *HelmChartStatus { + if in == nil { + return nil + } + out := new(HelmChartStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepository) DeepCopyInto(out *HelmRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository. +func (in *HelmRepository) DeepCopy() *HelmRepository { + if in == nil { + return nil + } + out := new(HelmRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList. +func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList { + if in == nil { + return nil + } + out := new(HelmRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec. +func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec { + if in == nil { + return nil + } + out := new(HelmRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus. +func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus { + if in == nil { + return nil + } + out := new(HelmRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference. +func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference { + if in == nil { + return nil + } + out := new(LocalHelmChartSourceReference) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 61bbaf32..2abd9a2b 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -111,7 +111,7 @@ spec: for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -233,7 +233,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -246,6 +247,245 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.endpoint + name: Endpoint + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the buckets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BucketSpec defines the desired state of an S3 compatible + bucket + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + bucketName: + description: The bucket name. + type: string + endpoint: + description: The bucket endpoint address. + type: string + ignore: + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. + type: boolean + interval: + description: The interval at which to check for bucket updates. + type: string + provider: + default: generic + description: The S3 compatible storage provider name, default ('generic'). + enum: + - generic + - aws + - gcp + type: string + region: + description: The bucket region. + type: string + secretRef: + description: The name of the secret containing authentication credentials + for the Bucket. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for fetch operations, defaults to 60s. + type: string + required: + - bucketName + - endpoint + - interval + type: object + status: + default: + observedGeneration: -1 + description: BucketStatus defines the observed state of a bucket + properties: + artifact: + description: Artifact represents the output of the last successful + Bucket sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the Bucket. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the fetch link for the artifact output of the + last Bucket sync. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index f2ea9662..905f1ae1 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -106,7 +106,7 @@ spec: description: Reference to a GitRepository to include. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -153,7 +153,7 @@ spec: and known_hosts fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -186,7 +186,7 @@ spec: trusted Git authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -332,7 +332,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -345,6 +346,350 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: GitRepository is the Schema for the gitrepositories API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: GitRepositorySpec defines the desired state of a Git repository. + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + gitImplementation: + default: go-git + description: Determines which git client library to use. Defaults + to go-git, valid values are ('go-git', 'libgit2'). + enum: + - go-git + - libgit2 + type: string + ignore: + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. + type: string + include: + description: Include defines a list of GitRepository resources which + artifacts should be included in the artifact produced for this resource. + items: + description: GitRepositoryInclude defines a source with a from and + to path. + properties: + fromPath: + description: The path to copy contents from, defaults to the + root directory. + type: string + repository: + description: Reference to a GitRepository to include. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + toPath: + description: The path to copy contents to, defaults to the name + of the source ref. + type: string + required: + - repository + type: object + type: array + interval: + description: The interval at which to check for repository updates. + type: string + recurseSubmodules: + description: When enabled, after the clone is created, initializes + all submodules within, using their default settings. This option + is available only when using the 'go-git' GitImplementation. + type: boolean + ref: + description: The Git reference to checkout and monitor for changes, + defaults to master branch. + properties: + branch: + description: The Git branch to checkout, defaults to master. + type: string + commit: + description: The Git commit SHA to checkout, if specified Tag + filters will be ignored. + type: string + semver: + description: The Git tag semver expression, takes precedence over + Tag. + type: string + tag: + description: The Git tag to checkout, takes precedence over Branch. + type: string + type: object + secretRef: + description: The secret name containing the Git credentials. For HTTPS + repositories the secret must contain username and password fields. + For SSH repositories the secret must contain 'identity', 'identity.pub' + and 'known_hosts' fields. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + suspend: + description: Suspend tells the controller to suspend the reconciliation + of this source. This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout for remote Git operations like cloning, defaults + to 60s. + type: string + url: + description: The repository URL, can be a HTTP/S or SSH address. + pattern: ^(http|https|ssh):// + type: string + verify: + description: Verification defines the configuration to verify the + OpenPGP signature for the Git commit HEAD points to. + properties: + mode: + description: Mode describes what Git object should be verified, + currently ('head'). + enum: + - head + type: string + secretRef: + description: SecretRef containing the public keys of all trusted + Git authors. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - mode + type: object + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: GitRepositoryStatus defines the observed state of a Git repository. + properties: + artifact: + description: Artifact represents the output of the last successful + repository sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the GitRepository. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + includedArtifacts: + description: IncludedArtifacts represents the included artifacts from + the last successful repository sync. + items: + description: Artifact represents the output of a Source synchronisation. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of + the Artifact storage on the local file system of the controller + managing the Source. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. It is + used by the consumers of the artifacts to fetch and use the + artifacts. It is expected to be resolvable from within the + cluster. + type: string + required: + - path + - url + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the fetch link for the artifact output of the + last repository sync. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index 6594bca5..7dc2ece7 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -258,7 +258,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -270,6 +271,275 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.chart + name: Chart + type: string + - jsonPath: .spec.version + name: Version + type: string + - jsonPath: .spec.sourceRef.kind + name: Source Kind + type: string + - jsonPath: .spec.sourceRef.name + name: Source Name + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HelmChart is the Schema for the helmcharts API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HelmChartSpec defines the desired state of a Helm chart. + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + chart: + description: The name or path the Helm chart is available at in the + SourceRef. + type: string + interval: + description: The interval at which to check the Source for updates. + type: string + reconcileStrategy: + default: ChartVersion + description: Determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). See the documentation + of the values for an explanation on their behavior. Defaults to + ChartVersion when omitted. + enum: + - ChartVersion + - Revision + type: string + sourceRef: + description: The reference to the Source the chart is available at. + properties: + apiVersion: + description: APIVersion of the referent. + type: string + kind: + description: Kind of the referent, valid values are ('HelmRepository', + 'GitRepository', 'Bucket'). + enum: + - HelmRepository + - GitRepository + - Bucket + type: string + name: + description: Name of the referent. + type: string + required: + - kind + - name + type: object + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + valuesFile: + description: Alternative values file to use as the default chart values, + expected to be a relative path in the SourceRef. Deprecated in favor + of ValuesFiles, for backwards compatibility the file defined here + is merged before the ValuesFiles items. Ignored when omitted. + type: string + valuesFiles: + description: Alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be + a relative path in the SourceRef. Values files are merged in the + order of this list with the last file overriding the first. Ignored + when omitted. + items: + type: string + type: array + version: + default: '*' + description: The chart version semver expression, ignored for charts + from GitRepository and Bucket sources. Defaults to latest when omitted. + type: string + required: + - chart + - interval + - sourceRef + type: object + status: + default: + observedGeneration: -1 + description: HelmChartStatus defines the observed state of the HelmChart. + properties: + artifact: + description: Artifact represents the output of the last successful + chart sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the HelmChart. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. + type: string + observedChartName: + description: ObservedChartName is the last observed chart name as + defined by the resolved chart reference. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + observedSourceArtifactRevision: + description: ObservedSourceArtifactRevision is the last observed Artifact.Revision + of the Source reference. + type: string + url: + description: URL is the fetch link for the last chart pulled. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index e9595d5c..e951fbd3 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -96,7 +96,7 @@ spec: certFile and keyFile, and/or caCert fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -221,7 +221,8 @@ spec: type: array lastHandledReconcileAt: description: LastHandledReconcileAt holds the value of the most recent - reconcile request value, so a change can be detected. + reconcile request value, so a change of the annotation value can + be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. @@ -233,6 +234,230 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: HelmRepository is the Schema for the helmrepositories API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HelmRepositorySpec defines the reference to a Helm repository. + properties: + accessFrom: + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. + properties: + namespaceSelectors: + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. + items: + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + interval: + description: The interval at which to check the upstream for updates. + type: string + passCredentials: + description: PassCredentials allows the credentials from the SecretRef + to be passed on to a host that does not match the host as defined + in URL. This may be required if the host of the advertised chart + URLs in the index differ from the defined URL. Enabling this should + be done with caution, as it can potentially result in credentials + getting stolen in a MITM-attack. + type: boolean + secretRef: + description: The name of the secret containing authentication credentials + for the Helm repository. For HTTP/S basic auth the secret must contain + username and password fields. For TLS the secret must contain a + certFile and keyFile, and/or caCert fields. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + suspend: + description: This flag tells the controller to suspend the reconciliation + of this source. + type: boolean + timeout: + default: 60s + description: The timeout of index fetching, defaults to 60s. + type: string + url: + description: The Helm repository URL, a valid URL contains at least + a protocol and host. + type: string + required: + - interval + - url + type: object + status: + default: + observedGeneration: -1 + description: HelmRepositoryStatus defines the observed state of the HelmRepository. + properties: + artifact: + description: Artifact represents the output of the last successful + repository sync. + properties: + checksum: + description: Checksum is the SHA256 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this Artifact. + It can be used to locate the Artifact file in the root of the + Artifact storage on the local file system of the controller + managing the Source. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. It is used + by the consumers of the artifacts to fetch and use the artifacts. + It is expected to be resolvable from within the cluster. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the HelmRepository. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value can + be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the fetch link for the last index fetched. + type: string + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/controllers/artifact.go b/controllers/artifact.go index 0e16fd03..8d034f07 100644 --- a/controllers/artifact.go +++ b/controllers/artifact.go @@ -1,9 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers -import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +import sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + +type artifactSet []*sourcev1.Artifact + +// Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. +func (s artifactSet) Diff(set artifactSet) bool { + if len(s) != len(set) { + return true + } + +outer: + for _, j := range s { + for _, k := range set { + if k.HasRevision(j.Revision) { + continue outer + } + } + return true + } + return false +} // hasArtifactUpdated returns true if any of the revisions in the current artifacts // does not match any of the artifacts in the updated artifacts +// NOTE: artifactSet is a replacement for this. Remove this once it's not used +// anywhere. func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool { if len(current) != len(updated) { return true diff --git a/controllers/artifact_matchers_test.go b/controllers/artifact_matchers_test.go new file mode 100644 index 00000000..06ab529d --- /dev/null +++ b/controllers/artifact_matchers_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +// MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. +func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { + return &matchArtifact{ + expected: expected, + } +} + +type matchArtifact struct { + expected *sourcev1.Artifact +} + +func (m matchArtifact) Match(actual interface{}) (success bool, err error) { + actualArtifact, ok := actual.(*sourcev1.Artifact) + if !ok { + return false, fmt.Errorf("actual should be a pointer to an Artifact") + } + + if ok, _ := BeNil().Match(m.expected); ok { + return BeNil().Match(actual) + } + + if ok, err = Equal(m.expected.Path).Match(actualArtifact.Path); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok { + return ok, err + } + + return ok, err +} + +func (m matchArtifact) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto match\n\t%#v\n", actual, m.expected) +} + +func (m matchArtifact) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto not match\n\t%#v\n", actual, m.expected) +} diff --git a/controllers/artifact_test.go b/controllers/artifact_test.go index 95966161..935c93bf 100644 --- a/controllers/artifact_test.go +++ b/controllers/artifact_test.go @@ -1,26 +1,40 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "testing" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestHasUpdated(t *testing.T) { +func Test_artifactSet_Diff(t *testing.T) { tests := []struct { name string - current []*sourcev1.Artifact - updated []*sourcev1.Artifact + current artifactSet + updated artifactSet expected bool }{ { - name: "not updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, no diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -28,13 +42,13 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "bar", }, @@ -42,8 +56,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "not updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, no diff", + current: artifactSet{ { Revision: "foo", }, @@ -51,7 +65,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -62,8 +76,8 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, diff", + current: artifactSet{ { Revision: "foo", }, @@ -71,7 +85,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -82,8 +96,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "updated different artifact count", - current: []*sourcev1.Artifact{ + name: "different artifact count", + current: artifactSet{ { Revision: "foo", }, @@ -91,7 +105,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -101,7 +115,7 @@ func TestHasUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := hasArtifactUpdated(tt.current, tt.updated) + result := tt.current.Diff(tt.updated) if result != tt.expected { t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected) } diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index a25587d1..522b47c8 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -18,24 +18,28 @@ package controllers import ( "context" - "crypto/sha1" + "crypto/sha256" + "errors" "fmt" "os" "path/filepath" + "sort" "strings" "time" + gcpstorage "cloud.google.com/go/storage" + "github.com/fluxcd/pkg/runtime/events" + "github.com/fluxcd/source-controller/pkg/gcp" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -43,15 +47,43 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/gcp" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/pkg/sourceignore" ) +// bucketReadyConditions contains all the conditions information needed +// for Bucket Ready status conditions summary calculation. +var bucketReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete @@ -60,17 +92,21 @@ import ( // BucketReconciler reconciles a Bucket object type BucketReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage + ControllerName string } type BucketReconcilerOptions struct { MaxConcurrentReconciles int } +// bucketReconcilerFunc is the function type for all the bucket reconciler +// functions. +type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) + func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) } @@ -83,244 +119,424 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc Complete(r) } -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + // Fetch the Bucket + obj := &sourcev1.Bucket{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, bucket) + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(bucket.DeepCopy()) - controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &bucket, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !bucket.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, bucket) - } - - // Return early if the object is suspended. - if bucket.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") + // Return early if the object is suspended + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetBucket, ok := r.resetStatus(bucket); ok { - bucket = resetBucket - if err := r.updateStatus(ctx, req, bucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, bucket) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok { - bucket.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(bucket); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile bucket by downloading its content - reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledBucket) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // emit revision change event - if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { - r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) - } - r.recordReadiness(ctx, reconciledBucket) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - bucket.GetInterval().Duration.String(), - )) - - return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil -} - -func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - var err error - var sourceBucket sourcev1.Bucket - - tempDir, err := os.MkdirTemp("", bucket.Name) + // Initialize the patch helper with the current version of the object. + patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + return ctrl.Result{}, err } + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object and status after each reconciliation + // NOTE: The final runtime result and error are set in this block. defer func() { - if err := os.RemoveAll(tempDir); err != nil { - log.Error(err, "failed to remove working directory", "path", tempDir) + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(bucketReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) }() - if bucket.Spec.Provider == sourcev1.GoogleBucketProvider { - sourceBucket, err = r.reconcileWithGCP(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } else { - sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } - revision, err := r.checksum(tempDir) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return } - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) - if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != bucket.GetArtifact().URL { - r.Storage.SetArtifactURL(bucket.GetArtifact()) - bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) - } - return bucket, nil + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return } - // create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile actual object + reconcilers := []bucketReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, } - - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer unlock() - - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tempDir, nil); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return } -func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) { - if err := r.gc(bucket); err != nil { - r.event(ctx, bucket, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err +// reconcile steps iterates through the actual reconciliation tasks for objec, +// it returns early on the first step that returns ResultRequeue or produces an +// error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } - // Record deleted status - r.recordReadiness(ctx, bucket) + index := make(etagIndex) + var artifact sourcev1.Artifact - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - return ctrl.Result{}, err + // Create temp working dir + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) + if err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } + defer os.RemoveAll(tmpDir) - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, index, &artifact, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + return res, resErr } -// reconcileWithGCP handles getting objects from a Google Cloud Platform bucket -// using a gcp client -func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - gcpClient, err := r.authGCP(ctx, bucket) - if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err - } - defer gcpClient.Close(log) +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +func (r *BucketReconciler) reconcileStorage(ctx context.Context, + obj *sourcev1.Bucket, _ etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the +// result. +// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret +// fails, it records v1beta1.FetchFailedCondition=True and returns early. +func (r *BucketReconciler) reconcileSource(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + var secret *corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", secretName.String(), err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return error as the world as observed may change + return sreconcile.ResultEmpty, e + } + } + + switch obj.Spec.Provider { + case sourcev1.GoogleBucketProvider: + return r.reconcileGCPSource(ctx, obj, index, artifact, secret, dir) + default: + return r.reconcileMinioSource(ctx, obj, index, artifact, secret, dir) + } +} + +// reconcileMinioSource ensures the upstream Minio client compatible bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.FetchFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, secret *corev1.Secret, dir string) (sreconcile.Result, error) { + // Build the client with the configuration from the object and secret + s3Client, err := r.buildMinioClient(obj, secret) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to construct S3 client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + // Return error as the contents of the secret may change + return sreconcile.ResultEmpty, e + } + + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := gcpClient.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - // Look for file with ignore rules first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { - if err == gcp.ErrorObjectDoesNotExist && sourceignore.IgnoreFile != ".sourceignore" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { + if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - objects := gcpClient.ListObjects(ctxTimeout, bucket.Spec.BucketName, nil) - // download bucket content + + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + for object := range s3Client.ListObjects(ctxTimeout, obj.Spec.BucketName, minio.ListObjectsOptions{ + Recursive: true, + UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), + }) { + if err = object.Err; err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + + // Ignore directories and the .sourceignore file + if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { + continue + } + // Ignore matches + if matcher.Match(strings.Split(object.Key, "/"), false) { + continue + } + + index[object.Key] = object.ETag + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, + } + } + + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath, minio.GetObjectOptions{}); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("fetch from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + } + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return sreconcile.ResultSuccess, nil +} + +// reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, secret *corev1.Secret, dir string) (sreconcile.Result, error) { + gcpClient, err := r.buildGCPClient(ctx, secret) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to construct GCP client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + // Return error as the contents of the secret may change + return sreconcile.ResultEmpty, e + } + defer gcpClient.Close(ctrl.LoggerFrom(ctx)) + + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) + defer cancel() + exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + if !exists { + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { + if err != gcpstorage.ErrObjectNotExist { + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + } + ps, err := sourceignore.ReadIgnoreFile(path, nil) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + // In-spec patterns take precedence + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) + } + matcher := sourceignore.NewMatcher(ps) + + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + objects := gcpClient.ListObjects(ctxTimeout, obj.Spec.BucketName, nil) for { object, err := objects.Next() - if err == gcp.IteratorDone { - break - } if err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + if err == gcp.IteratorDone { + break + } + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { @@ -331,98 +547,224 @@ func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1 continue } - localPath := filepath.Join(tempDir, object.Name) - if err = gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Name, localPath); err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + index[object.Name] = object.Etag + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, } } - return sourcev1.Bucket{}, nil + + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("fetch from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + } + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return sreconcile.ResultSuccess, nil } -// reconcileWithMinio handles getting objects from an S3 compatible bucket -// using a minio client -func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - s3Client, err := r.authMinio(ctx, bucket) +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// If the given artifact does not differ from the object's current, it returns early. +// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is +// updated to its path. +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, + obj *sourcev1.Bucket, index etagIndex, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "stored artifact for revision '%s'", artifact.Revision) + } + }() + + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) { + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) + return sreconcile.ResultSuccess, nil + } + + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new upstream revision '%s'", artifact.Revision) + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to stat source path: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } else if !f.IsDir() { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("source path '%s' is not a directory", dir), + Reason: sourcev1.StorageOperationFailedReason, + } + } + + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(*artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + unlock, err := r.Storage.Lock(*artifact) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, + } } + defer unlock() - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) - defer cancel() + // Archive directory to storage + if err := r.Storage.Archive(artifact, dir, nil); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to archive artifact to storage: %s", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + r.annotatedEventLogf(ctx, obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, "NewArtifact", "fetched %d files from '%s'", len(index), obj.Spec.BucketName) - exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) } - if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + if url != "" { + obj.Status.URL = url } - - // Look for file with ignore rules first - // NB: S3 has flat filepath keys making it impossible to look - // for files in "subdirectories" without building up a tree first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { - if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - } - ps, err := sourceignore.ReadIgnoreFile(path, nil) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) - } - matcher := sourceignore.NewMatcher(ps) - - // download bucket content - for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), - }) { - if object.Err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - - if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { - continue - } - - if matcher.Match(strings.Split(object.Key, "/"), false) { - continue - } - - localPath := filepath.Join(tempDir, object.Key) - err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) - if err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } - } - return sourcev1.Bucket{}, nil + return sreconcile.ResultSuccess, nil } -// authGCP creates a new Google Cloud Platform storage client -// to interact with the storage service. -func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) { +// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err), + Reason: "GarbageCollectionFailed", + } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %s", err), + Reason: "GarbageCollectionFailed", + } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") + } + } + return nil +} + +// buildMinioClient constructs a minio.Client with the data from the given object and Secret. +// It returns an error if the Secret does not have the required fields, or if there is no credential handler +// configured. +func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1.Secret) (*minio.Client, error) { + opts := minio.Options{ + Region: obj.Spec.Region, + Secure: !obj.Spec.Insecure, + } + if secret != nil { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) + } + if accessKey == "" || secretKey == "" { + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + } + opts.Creds = credentials.NewStaticV4(accessKey, secretKey, "") + } else if obj.Spec.Provider == sourcev1.AmazonBucketProvider { + opts.Creds = credentials.NewIAM("") + } + return minio.New(obj.Spec.Endpoint, &opts) +} + +// buildGCPClient constructs a gcp.GCPClient with the data from the given Secret. +// It returns an error if the Secret does not have the required field, or if the client construction fails. +func (r *BucketReconciler) buildGCPClient(ctx context.Context, secret *corev1.Secret) (*gcp.GCPClient, error) { var client *gcp.GCPClient var err error - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } - - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } + if secret != nil { if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { return nil, err } @@ -437,175 +779,47 @@ func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) } } return client, nil - } -// authMinio creates a new Minio client to interact with S3 -// compatible storage services. -func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, +// etagIndex is an index of bucket keys and their Etag values. +type etagIndex map[string]string + +// Revision calculates the SHA256 checksum of the index. +// The keys are sorted to ensure a stable order, and the SHA256 sum is then calculated for the string representations of +// the key/value pairs, each pair written on a newline +// The sum result is returned as a string. +func (i etagIndex) Revision() (string, error) { + keyIndex := make([]string, 0, len(i)) + for k := range i { + keyIndex = append(keyIndex, k) } - - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, + sort.Strings(keyIndex) + sum := sha256.New() + for _, k := range keyIndex { + if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i[k]))); err != nil { + return "", err } - - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - - accesskey := "" - secretkey := "" - if k, ok := secret.Data["accesskey"]; ok { - accesskey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretkey = string(k) - } - if accesskey == "" || secretkey == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - } - opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") - } - - if opt.Creds == nil { - return nil, fmt.Errorf("no bucket credentials found") - } - - return minio.New(bucket.Spec.Endpoint, &opt) -} - -// checksum calculates the SHA1 checksum of the given root directory. -// It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the -// list with relative file paths and their checksums. -func (r *BucketReconciler) checksum(root string) (string, error) { - sum := sha1.New() - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() { - return nil - } - data, err := os.ReadFile(path) - if err != nil { - return err - } - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - sum.Write([]byte(fmt.Sprintf("%x %s\n", sha1.Sum(data), relPath))) - return nil - }); err != nil { - return "", err } return fmt.Sprintf("%x", sum.Sum(nil)), nil } -// resetStatus returns a modified v1beta1.Bucket and a boolean indicating -// if the status field has been reset. -func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { - // We do not have an artifact, or it does no longer exist - if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) { - bucket = sourcev1.BucketProgressing(bucket) - bucket.Status.Artifact = nil - return bucket, true - } - if bucket.Generation != bucket.Status.ObservedGeneration { - return sourcev1.BucketProgressing(bucket), true - } - return bucket, false +// eventLogf records event and logs at the same time. +func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + r.annotatedEventLogf(ctx, obj, nil, eventType, reason, messageFmt, args...) } -// gc performs a garbage collection for the given v1beta1.Bucket. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { - if !bucket.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*")) - } - if bucket.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) - } - return nil -} - -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero()) +// annotatedEventLogf records annotated event and logs at the same time. This +// log is different from the debug log in the event recorder in the sense that +// this is a simple log, the event recorder debug log contains complete details +// about the event. +func (r *BucketReconciler) annotatedEventLogf(ctx context.Context, + obj runtime.Object, annotations map[string]string, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !bucket.DeletionTimestamp.IsZero()) + ctrl.LoggerFrom(ctx).Info(msg) } -} - -func (r *BucketReconciler) recordSuspension(ctx context.Context, bucket sourcev1.Bucket) { - if r.MetricsRecorder == nil { - return - } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !bucket.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, bucket.Spec.Suspend) - } -} - -func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error { - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return err - } - - patch := client.MergeFrom(bucket.DeepCopy()) - bucket.Status = newStatus - - return r.Status().Patch(ctx, &bucket, patch) + r.AnnotatedEventf(obj, annotations, eventType, reason, msg) } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 01ff20d8..39ef7086 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -17,59 +17,1043 @@ limitations under the License. package controllers import ( + "context" + "crypto/md5" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" "os" + "path" "path/filepath" + "strings" "testing" + "time" + + "github.com/darkowlzz/controller-check/status" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + . "github.com/onsi/gomega" + raw "google.golang.org/api/storage/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) -func TestBucketReconciler_checksum(t *testing.T) { +// Environment variable to set the GCP Storage host for the GCP client. +const ENV_GCP_STORAGE_HOST = "STORAGE_EMULATOR_HOST" + +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := newS3Server("test-bucket") + s3Server.Objects = []*s3MockObject{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: bucketReadyConditions.NegativePolarity} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + + // kstatus client conformance check. + uo, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(uo) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { - name string - beforeFunc func(root string) - want string - wantErr bool + name string + beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string }{ { - name: "empty root", - want: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, }, { - name: "with file", - beforeFunc: func(root string) { - mockFile(root, "a/b/c.txt", "a dummy string") + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/invalid.txt"), + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), }, - want: "309a5e6e96b4a7eea0d1cfaabf1be8ec1c063fa0", }, { - name: "with file in different path", - beforeFunc: func(root string) { - mockFile(root, "a/b.txt", "a dummy string") + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", }, - want: "e28c62b5cc488849950c4355dddc5523712616d4", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - root, err := os.MkdirTemp("", "bucket-checksum-") - if err != nil { - t.Fatal(err) + g := NewWithT(t) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, } - defer os.RemoveAll(root) if tt.beforeFunc != nil { - tt.beforeFunc(root) + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) } - got, err := (&BucketReconciler{}).checksum(root) - if (err != nil) != tt.wantErr { - t.Errorf("checksum() error = %v, wantErr %v", err, tt.wantErr) - return + + index := make(etagIndex) + var artifact sourcev1.Artifact + + got, err := r.reconcileStorage(context.TODO(), obj, index, &artifact, "") + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) } - if got != tt.want { - t.Errorf("checksum() got = %v, want %v", got, tt.want) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) } }) } } +func TestBucketReconciler_reconcileMinioSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3MockObject + middleware http.Handler + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct S3 client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), + }, + }, + { + // TODO(hidde): test the lesser happy paths + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", + Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + } + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3MockServer + if tt.bucketName != "" { + server = newS3Server(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &sourcev1.Artifact{} + index := make(etagIndex) + got, err := r.reconcileSource(context.TODO(), obj, index, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileGCPSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcpMockObject + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want sreconcile.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct GCP client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", + Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + } + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + want: sreconcile.ResultSuccess, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Test bucket object. + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GoogleBucketProvider, + }, + } + + // Set up the mock GCP bucket server. + server := newGCPServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(ENV_GCP_STORAGE_HOST, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(ENV_GCP_STORAGE_HOST)).ToNot(HaveOccurred()) + }() + + artifact := &sourcev1.Artifact{} + index := make(etagIndex) + got, err := r.reconcileSource(context.TODO(), obj, index, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + // testChecksum is the checksum value of the artifacts created in this + // test. + const testChecksum = "4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1" + + tests := []struct { + name string + beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + defer f.Close() + t.Expect(err).ToNot(HaveOccurred()) + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + index := make(etagIndex) + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + artifact.Checksum = testChecksum + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, tmpDir) + } + + got, err := r.reconcileArtifact(context.TODO(), obj, index, &artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } + }) + } +} + +func Test_etagIndex_Revision(t *testing.T) { + tests := []struct { + name string + list etagIndex + want string + wantErr bool + }{ + { + name: "index with items", + list: map[string]string{ + "one": "one", + "two": "two", + "three": "three", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "index with items in different order", + list: map[string]string{ + "three": "three", + "one": "one", + "two": "two", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "empty index", + list: map[string]string{}, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + { + name: "nil index", + list: nil, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.list.Revision() + if (err != nil) != tt.wantErr { + t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("revision() got = %v, want %v", got, tt.want) + } + }) + } +} + +// helpers + func mockFile(root, path, content string) error { filePath := filepath.Join(root, path) if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { @@ -80,3 +1064,264 @@ func mockFile(root, path, content string) error { } return nil } + +type s3MockObject struct { + Key string + LastModified time.Time + ContentType string + Content []byte +} + +type s3MockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*s3MockObject +} + +func newS3Server(bucketName string) *s3MockServer { + s := &s3MockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle(fmt.Sprintf("/%s/", s.BucketName), http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *s3MockServer) Start() { + s.srv.Start() +} + +func (s *s3MockServer) Stop() { + s.srv.Close() +} + +func (s *s3MockServer) HTTPAddress() string { + return s.srv.URL +} + +func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { + key := path.Base(r.URL.Path) + + switch key { + case s.BucketName: + w.Header().Add("Content-Type", "application/xml") + + if r.Method == http.MethodHead { + return + } + + q := r.URL.Query() + + if q["location"] != nil { + fmt.Fprint(w, ` + +Europe + `) + return + } + + contents := "" + for _, o := range s.Objects { + etag := md5.Sum(o.Content) + contents += fmt.Sprintf(` + + %s + %s + %d + "%b" + STANDARD + `, o.Key, o.LastModified.UTC().Format(time.RFC3339), len(o.Content), etag) + } + + fmt.Fprintf(w, ` + + + %s + + + %d + 1000 + false + %s + + `, s.BucketName, len(s.Objects), contents) + default: + key, err := filepath.Rel("/"+s.BucketName, r.URL.Path) + if err != nil { + w.WriteHeader(500) + return + } + + var found *s3MockObject + for _, o := range s.Objects { + if key == o.Key { + found = o + } + } + if found == nil { + w.WriteHeader(404) + return + } + + etag := md5.Sum(found.Content) + lastModified := strings.Replace(found.LastModified.UTC().Format(time.RFC1123), "UTC", "GMT", 1) + + w.Header().Add("Content-Type", found.ContentType) + w.Header().Add("Last-Modified", lastModified) + w.Header().Add("ETag", fmt.Sprintf("\"%b\"", etag)) + w.Header().Add("Content-Length", fmt.Sprintf("%d", len(found.Content))) + + if r.Method == http.MethodHead { + return + } + + w.Write(found.Content) + } +} + +type gcpMockObject struct { + Key string + ContentType string + Content []byte +} + +type gcpMockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Etag string + Objects []*gcpMockObject + Close func() +} + +func newGCPServer(bucketName string) *gcpMockServer { + s := &gcpMockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (gs *gcpMockServer) Start() { + gs.srv.Start() +} + +func (gs *gcpMockServer) Stop() { + gs.srv.Close() +} + +func (gs *gcpMockServer) HTTPAddress() string { + return gs.srv.URL +} + +func (gs *gcpMockServer) GetAllObjects() *raw.Objects { + objs := &raw.Objects{} + for _, o := range gs.Objects { + objs.Items = append(objs.Items, getGCPObject(gs.BucketName, *o)) + } + return objs +} + +func (gs *gcpMockServer) GetObjectFile(key string) ([]byte, error) { + for _, o := range gs.Objects { + if o.Key == key { + return o.Content, nil + } + } + return nil, fmt.Errorf("not found") +} + +func (gs *gcpMockServer) handler(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.RequestURI, "/b/") { + // Handle the bucket info related queries. + if r.RequestURI == fmt.Sprintf("/b/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName) { + // Return info about the bucket. + response := getGCPBucket(gs.BucketName, gs.Etag) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } else if strings.Contains(r.RequestURI, "/o/") { + // Return info about object in the bucket. + var obj *gcpMockObject + for _, o := range gs.Objects { + // The object key in the URI is escaped. + // e.g.: /b/dummy/o/included%2Ffile.txt?alt=json&prettyPrint=false&projection=full + if r.RequestURI == fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName, url.QueryEscape(o.Key)) { + obj = o + } + } + if obj != nil { + response := getGCPObject(gs.BucketName, *obj) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else if strings.Contains(r.RequestURI, "/o?") { + // Return info about all the objects in the bucket. + response := gs.GetAllObjects() + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else { + // Handle object file query. + bucketPrefix := fmt.Sprintf("/%s/", gs.BucketName) + if strings.HasPrefix(r.RequestURI, bucketPrefix) { + // The URL path is of the format //included/file.txt. + // Extract the object key by discarding the bucket prefix. + key := strings.TrimPrefix(r.URL.Path, bucketPrefix) + // Handle returning object file in a bucket. + response, err := gs.GetObjectFile(key) + if err != nil { + w.WriteHeader(404) + return + } + w.WriteHeader(200) + w.Write(response) + return + } + w.WriteHeader(404) + return + } +} + +func getGCPObject(bucket string, obj gcpMockObject) *raw.Object { + return &raw.Object{ + Bucket: bucket, + Name: obj.Key, + ContentType: obj.ContentType, + } +} + +func getGCPBucket(name, eTag string) *raw.Bucket { + return &raw.Bucket{ + Name: name, + Location: "loc", + Etag: eTag, + } +} diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 7642a161..53a9da69 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -18,20 +18,18 @@ package controllers import ( "context" + "errors" "fmt" "os" - "path/filepath" "strings" "time" securejoin "github.com/cyphar/filepath-securejoin" + "github.com/fluxcd/pkg/runtime/logger" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -40,16 +38,52 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" + "github.com/fluxcd/source-controller/internal/util" "github.com/fluxcd/source-controller/pkg/git" "github.com/fluxcd/source-controller/pkg/git/strategy" "github.com/fluxcd/source-controller/pkg/sourceignore" ) +// gitRepoReadyConditions contains all the conditions information needed +// for GitRepository Ready status conditions summary calculation. +var gitRepoReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.SourceVerifiedCondition, + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.IncludeUnavailableCondition, + sourcev1.SourceVerifiedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.FetchFailedCondition, + sourcev1.IncludeUnavailableCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete @@ -58,12 +92,13 @@ import ( // GitRepositoryReconciler reconciles a GitRepository object type GitRepositoryReconciler struct { client.Client - requeueDependency time.Duration - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage + ControllerName string + + requeueDependency time.Duration } type GitRepositoryReconcilerOptions struct { @@ -71,6 +106,10 @@ type GitRepositoryReconcilerOptions struct { DependencyRequeueInterval time.Duration } +// gitRepoReconcilerFunc is the function type for all the Git repository +// reconciler functions. +type gitRepoReconcilerFunc func(ctx context.Context, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) + func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{}) } @@ -86,409 +125,531 @@ func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o Complete(r) } -func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var repository sourcev1.GitRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { + // Fetch the GitRepository + obj := &sourcev1.GitRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, repository) + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(repository.DeepCopy()) - controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &repository, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !repository.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, repository) - } - - // Return early if the object is suspended. - if repository.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") + // Return early if the object is suspended + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") return ctrl.Result{}, nil } - // check dependencies - if len(repository.Spec.Include) > 0 { - if err := r.checkDependencies(repository); err != nil { - repository = sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()) - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status for dependency not ready") - return ctrl.Result{Requeue: true}, err - } - // we can't rely on exponential backoff because it will prolong the execution too much, - // instead we requeue on a fix interval. - msg := fmt.Sprintf("Dependencies do not meet ready condition, retrying in %s", r.requeueDependency.String()) - log.Info(msg) - r.event(ctx, repository, events.EventSeverityInfo, msg) - r.recordReadiness(ctx, repository) - return ctrl.Result{RequeueAfter: r.requeueDependency}, nil - } - log.Info("All dependencies area ready, proceeding with reconciliation") - } - - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetRepository, ok := r.resetStatus(repository); ok { - repository = resetRepository - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, repository) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok { - repository.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(repository); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile repository by pulling the latest Git commit - reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledRepository) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // emit revision change event - if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision { - r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.GitRepositoryReadyMessage(reconciledRepository)) - } - r.recordReadiness(ctx, reconciledRepository) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - repository.GetInterval().Duration.String(), - )) - - return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil -} - -func (r *GitRepositoryReconciler) checkDependencies(repository sourcev1.GitRepository) error { - for _, d := range repository.Spec.Include { - dName := types.NamespacedName{Name: d.GitRepositoryRef.Name, Namespace: repository.Namespace} - var gr sourcev1.GitRepository - err := r.Get(context.Background(), dName, &gr) - if err != nil { - return fmt.Errorf("unable to get '%s' dependency: %w", dName, err) - } - - if len(gr.Status.Conditions) == 0 || gr.Generation != gr.Status.ObservedGeneration { - return fmt.Errorf("dependency '%s' is not ready", dName) - } - - if !apimeta.IsStatusConditionTrue(gr.Status.Conditions, meta.ReadyCondition) { - return fmt.Errorf("dependency '%s' is not ready", dName) - } - } - - return nil -} - -func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sourcev1.GitRepository) (sourcev1.GitRepository, error) { - log := ctrl.LoggerFrom(ctx) - - // create tmp dir for the Git clone - tmpGit, err := os.MkdirTemp("", repository.Name) + // Initialize the patch helper with the current version of the object. + patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + return ctrl.Result{}, err } + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object and status after each reconciliation + // NOTE: The final runtime result and error are set in this block. defer func() { - if err := os.RemoveAll(tmpGit); err != nil { - log.Error(err, "failed to remove working directory", "path", tmpGit) + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(gitRepoReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) }() - // Configure auth options using secret - var authOpts *git.AuthOptions - if repository.Spec.SecretRef != nil { - name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, - } + // Add finalizer first if not exist to avoid the race condition + // between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } - secret := &corev1.Secret{} - err = r.Client.Get(ctx, name, secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err - } + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } - authOpts, err = git.AuthOptionsFromSecret(repository.Spec.URL, secret) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err + // Reconcile actual object + reconcilers := []gitRepoReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileInclude, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return +} + +// reconcile steps iterates through the actual reconciliation tasks for objec, +// it returns early on the first step that returns ResultRequeue or produces an +// error. +func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository, reconcilers []gitRepoReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) + } + + var commit git.Commit + var includes artifactSet + + // Create temp dir for Git clone + tmpDir, err := util.TempDirForObj("", obj) + if err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, } } - checkoutOpts := git.CheckoutOptions{RecurseSubmodules: repository.Spec.RecurseSubmodules} - if ref := repository.Spec.Reference; ref != nil { + defer os.RemoveAll(tmpDir) + + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &commit, &includes, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + return res, resErr +} + +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of any of the URLs on the object do not match the current storage server hostname, they are updated. +func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, + obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource ensures the upstream Git repository can be reached and checked out using the declared configuration, +// and observes its state. +// +// The repository is checked out to the given dir using the defined configuration, and in case of an error during the +// checkout process (including transient errors), it records v1beta1.FetchFailedCondition=True and returns early. +// On a successful checkout it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to the +// artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If instructed, the signature of the commit is verified if and recorded as v1beta1.SourceVerifiedCondition. If the +// signature can not be verified or the verification fails, the Condition=False and it returns early. +// If both the checkout and signature verification are successful, the given artifact pointer is set to a new artifact +// with the available metadata. +func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, + obj *sourcev1.GitRepository, commit *git.Commit, _ *artifactSet, dir string) (sreconcile.Result, error) { + // Configure authentication strategy to access the source + var authOpts *git.AuthOptions + var err error + if obj.Spec.SecretRef != nil { + // Attempt to retrieve secret + name := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + var secret corev1.Secret + if err := r.Client.Get(ctx, name, &secret); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return error as the world as observed may change + return sreconcile.ResultEmpty, e + } + + // Configure strategy with secret + authOpts, err = git.AuthOptionsFromSecret(obj.Spec.URL, &secret) + } else { + // Set the minimal auth options for valid transport. + authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL) + } + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to configure auth strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return error as the contents of the secret may change + return sreconcile.ResultEmpty, e + } + + // Configure checkout strategy + checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules} + if ref := obj.Spec.Reference; ref != nil { checkoutOpts.Branch = ref.Branch checkoutOpts.Commit = ref.Commit checkoutOpts.Tag = ref.Tag checkoutOpts.SemVer = ref.SemVer } checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx, - git.Implementation(repository.Spec.GitImplementation), checkoutOpts) + git.Implementation(obj.Spec.GitImplementation), checkoutOpts) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err), + Reason: sourcev1.GitOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, e.Err.Error()) + // Do not return err as recovery without changes is impossible + return sreconcile.ResultEmpty, e } - gitCtx, cancel := context.WithTimeout(ctx, repository.Spec.Timeout.Duration) + // Checkout HEAD of reference in object + gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - commit, err := checkoutStrategy.Checkout(gitCtx, tmpGit, repository.Spec.URL, authOpts) + c, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err - } - artifact := r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) - - // copy all included repository into the artifact - includedArtifacts := []*sourcev1.Artifact{} - for _, incl := range repository.Spec.Include { - dName := types.NamespacedName{Name: incl.GitRepositoryRef.Name, Namespace: repository.Namespace} - var gr sourcev1.GitRepository - err := r.Get(context.Background(), dName, &gr) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to checkout and determine revision: %w", err), + Reason: sourcev1.GitOperationFailedReason, } - includedArtifacts = append(includedArtifacts, gr.GetArtifact()) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, e.Err.Error()) + // Coin flip on transient or persistent error, return error and hope for the best + return sreconcile.ResultEmpty, e + } + // Assign the commit to the shared commit reference. + *commit = *c + ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String()) + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Verify commit signature + if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty { + return result, err } - // return early on unchanged revision and unchanged included repositories - if apimeta.IsStatusConditionTrue(repository.Status.Conditions, meta.ReadyCondition) && repository.GetArtifact().HasRevision(artifact.Revision) && !hasArtifactUpdated(repository.Status.IncludedArtifacts, includedArtifacts) { - if artifact.URL != repository.GetArtifact().URL { - r.Storage.SetArtifactURL(repository.GetArtifact()) - repository.Status.URL = r.Storage.SetHostname(repository.Status.URL) + // Mark observations about the revision on the object + if !obj.GetArtifact().HasRevision(commit.String()) { + message := fmt.Sprintf("new upstream revision '%s'", commit.String()) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) + } + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact and/or includes do not differ from the object's current, it returns early. +// Source ignore patterns are loaded, and the given directory is archived. +// On a successful archive, the artifact and includes in the status of the given object are set, and the symlink in the +// storage is updated to its path. +func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, + obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + // Create potential new artifact with current available metadata + artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String())) + + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "stored artifact for revision '%s'", artifact.Revision) } - return repository, nil + }() + + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) { + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) + return sreconcile.ResultSuccess, nil } - // verify PGP signature - if repository.Spec.Verification != nil { - publicKeySecret := types.NamespacedName{ - Namespace: repository.Namespace, - Name: repository.Spec.Verification.SecretRef.Name, - } - secret := &corev1.Secret{} - if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { - err = fmt.Errorf("PGP public keys secret error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err - } + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new upstream revision '%s'", artifact.Revision) - var keyRings []string - for _, v := range secret.Data { - keyRings = append(keyRings, string(v)) + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to stat target path: %w", err), + Reason: sourcev1.StorageOperationFailedReason, } - if _, err = commit.Verify(keyRings...); err != nil { - return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, e + } else if !f.IsDir() { + e := &serror.Event{ + Err: fmt.Errorf("invalid target path: '%s' is not a directory", dir), + Reason: sourcev1.StorageOperationFailedReason, } + return sreconcile.ResultEmpty, e } - // create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - for i, incl := range repository.Spec.Include { - toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath()) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err - } - err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath) - if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, } + return sreconcile.ResultEmpty, e } - - // acquire lock unlock, err := r.Storage.Lock(artifact) if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, + } } defer unlock() - // archive artifact and check integrity - ignoreDomain := strings.Split(tmpGit, string(filepath.Separator)) - ps, err := sourceignore.LoadIgnorePatterns(tmpGit, ignoreDomain) + // Load ignore rules for archiving + ps, err := sourceignore.LoadIgnorePatterns(dir, nil) if err != nil { - err = fmt.Errorf(".sourceignore error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to load source ignore patterns from repository: %w", err), + Reason: "SourceIgnoreError", + } } - if repository.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*repository.Spec.Ignore), ignoreDomain)...) - } - if err := r.Storage.Archive(&artifact, tmpGit, SourceIgnoreFilter(ps, ignoreDomain)); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } - // update latest symlink + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, nil)); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to archive artifact to storage: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for commit '%s'", commit.ShortMessage()) + + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.IncludedArtifacts = *includes + + // Update symlink on a "best effort" basis url, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) } - - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.GitRepositoryReady(repository, artifact, includedArtifacts, url, sourcev1.GitOperationSucceedReason, message), nil + if url != "" { + obj.Status.URL = url + } + return sreconcile.ResultSuccess, nil } -func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.GitRepository) (ctrl.Result, error) { - if err := r.gc(repository); err != nil { - r.event(ctx, repository, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) +// reconcileInclude reconciles the declared includes from the object by copying their artifact (sub)contents to the +// declared paths in the given directory. +// +// If an include is unavailable, it marks the object with v1beta1.IncludeUnavailableCondition and returns early. +// If the copy operations are successful, it deletes the v1beta1.IncludeUnavailableCondition from the object. +// If the artifactSet differs from the current set, it marks the object with v1beta1.ArtifactOutdatedCondition. +func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context, + obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) { + artifacts := make(artifactSet, len(obj.Spec.Include)) + for i, incl := range obj.Spec.Include { + // Do this first as it is much cheaper than copy operations + toPath, err := securejoin.SecureJoin(dir, incl.GetToPath()) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err), + Reason: "IllegalPath", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", e.Err.Error()) + return sreconcile.ResultEmpty, e + } + + // Retrieve the included GitRepository + dep := &sourcev1.GitRepository{} + if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err), + Reason: "NotFound", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NotFound", e.Err.Error()) + return sreconcile.ResultEmpty, err + } + + // Confirm include has an artifact + if dep.GetArtifact() == nil { + e := &serror.Stalling{ + Err: fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name), + Reason: "NoArtifact", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", e.Err.Error()) + return sreconcile.ResultEmpty, e + } + + // Copy artifact (sub)contents to configured directory + if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err), + Reason: "CopyFailure", + } + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "CopyFailure", e.Err.Error()) + return sreconcile.ResultEmpty, e + } + artifacts[i] = dep.GetArtifact().DeepCopy() + } + + // We now know all includes are available + conditions.Delete(obj, sourcev1.IncludeUnavailableCondition) + + // Observe if the artifacts still match the previous included ones + if artifacts.Diff(obj.Status.IncludedArtifacts) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", + "included artifacts differ from last observed includes") + } + + // Persist the artifactSet. + *includes = artifacts + return sreconcile.ResultSuccess, nil +} + +// reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } - // Record deleted status - r.recordReadiness(ctx, repository) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - return ctrl.Result{}, err - } + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, nil } -// resetStatus returns a modified v1beta1.GitRepository and a boolean indicating -// if the status field has been reset. -func (r *GitRepositoryReconciler) resetStatus(repository sourcev1.GitRepository) (sourcev1.GitRepository, bool) { - // We do not have an artifact, or it does no longer exist - if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) { - repository = sourcev1.GitRepositoryProgressing(repository) - repository.Status.Artifact = nil - return repository, true +// verifyCommitSignature verifies the signature of the given commit if a verification mode is configured on the object. +func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) { + // Check if there is a commit verification is configured and remove any old observations if there is none + if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" { + conditions.Delete(obj, sourcev1.SourceVerifiedCondition) + return sreconcile.ResultSuccess, nil } - if repository.Generation != repository.Status.ObservedGeneration { - return sourcev1.GitRepositoryProgressing(repository), true + + // Get secret with GPG data + publicKeySecret := types.NamespacedName{ + Namespace: obj.Namespace, + Name: obj.Spec.Verification.SecretRef.Name, } - return repository, false + secret := &corev1.Secret{} + if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("PGP public keys secret error: %w", err), + Reason: "VerificationError", + } + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + + var keyRings []string + for _, v := range secret.Data { + keyRings = append(keyRings, string(v)) + } + // Verify commit with GPG data from secret + if _, err := commit.Verify(keyRings...); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err), + Reason: "InvalidCommitSignature", + } + conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, meta.FailedReason, e.Err.Error()) + // Return error in the hope the secret changes + return sreconcile.ResultEmpty, e + } + + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, + "verified signature of commit '%s'", commit.Hash.String()) + r.eventLogf(ctx, obj, events.EventTypeTrace, "VerifiedCommit", + "verified signature of commit '%s'", commit.Hash.String()) + return sreconcile.ResultSuccess, nil } -// gc performs a garbage collection for the given v1beta1.GitRepository. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *GitRepositoryReconciler) gc(repository sourcev1.GitRepository) error { - if !repository.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*")) +// garbageCollect performs a garbage collection for the given v1beta1.GitRepository. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), + Reason: "GarbageCollectionFailed", + } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil } - if repository.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*repository.GetArtifact()) + if obj.GetArtifact() != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), + } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") + } } return nil } -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *GitRepositoryReconciler) event(ctx context.Context, repository sourcev1.GitRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *GitRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.GitRepository) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero()) +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *GitRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !repository.DeletionTimestamp.IsZero()) + ctrl.LoggerFrom(ctx).Info(msg) } -} - -func (r *GitRepositoryReconciler) recordSuspension(ctx context.Context, gitrepository sourcev1.GitRepository) { - if r.MetricsRecorder == nil { - return - } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &gitrepository) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !gitrepository.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, gitrepository.Spec.Suspend) - } -} - -func (r *GitRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.GitRepositoryStatus) error { - var repository sourcev1.GitRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return err - } - - patch := client.MergeFrom(repository.DeepCopy()) - repository.Status = newStatus - - return r.Status().Patch(ctx, &repository, patch) + r.Eventf(obj, eventType, reason, msg) } diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go index a8691c26..1e7028c7 100644 --- a/controllers/gitrepository_controller_test.go +++ b/controllers/gitrepository_controller_test.go @@ -18,753 +18,1490 @@ package controllers import ( "context" - "crypto/tls" "fmt" - "net/http" "net/url" "os" - "os/exec" - "path" "path/filepath" "strings" + "testing" "time" - "github.com/go-git/go-billy/v5/memfs" - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/transport/client" - httptransport "github.com/go-git/go-git/v5/plumbing/transport/http" - "github.com/go-git/go-git/v5/storage/memory" - . "github.com/onsi/ginkgo" - - . "github.com/onsi/ginkgo/extensions/table" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - + "github.com/darkowlzz/controller-check/status" "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/untar" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/ssh" + "github.com/fluxcd/pkg/testserver" + "github.com/go-git/go-billy/v5/memfs" + gogit "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/storage/memory" + . "github.com/onsi/gomega" + sshtestdata "golang.org/x/crypto/ssh/testdata" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "k8s.io/utils/pointer" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/pkg/git" ) -var _ = Describe("GitRepositoryReconciler", func() { +const ( + encodedCommitFixture = `tree f0c522d8cc4c90b73e2bc719305a896e7e3c108a +parent eb167bc68d0a11530923b1f24b4978535d10b879 +author Stefan Prodan 1633681364 +0300 +committer Stefan Prodan 1633681364 +0300 - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 1 - ) +Update containerd and runc to fix CVEs - Context("GitRepository", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - err error - ) +Signed-off-by: Stefan Prodan +` + malformedEncodedCommitFixture = `parent eb167bc68d0a11530923b1f24b4978535d10b879 +author Stefan Prodan 1633681364 +0300 +committer Stefan Prodan 1633681364 +0300 - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "git-repository-test" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") +Update containerd and runc to fix CVEs - cert := corev1.Secret{ +Signed-off-by: Stefan Prodan +` + signatureCommitFixture = `-----BEGIN PGP SIGNATURE----- + +iHUEABEIAB0WIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCYV//1AAKCRAyma6w5Ahb +r7nJAQCQU4zEJu04/Q0ac/UaL6htjhq/wTDNMeUM+aWG/LcBogEAqFUea1oR2BJQ +JCJmEtERFh39zNWSazQmxPAFhEE0kbc= +=+Wlj +-----END PGP SIGNATURE-----` + armoredKeyRingFixture = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQSuBF9+HgMRDADKT8UBcSzpTi4JXt/ohhVW3x81AGFPrQvs6MYrcnNJfIkPTJD8 +mY5T7j1fkaN5wcf1wnxM9qTcW8BodkWNGEoEYOtVuigLSxPFqIncxK0PHvdU8ths +TEInBrgZv9t6xIVa4QngOEUd2D/aYni7M+75z7ntgj6eU1xLZ60upRFn05862OvJ +rZFUvzjsZXMAO3enCu2VhG/2axCY/5uI8PgWjyiKV2TH4LBJgzlb0v6SyI+fYf5K +Bg2WzDuLKvQBi9tFSwnUbQoFFlOeiGW8G/bdkoJDWeS1oYgSD3nkmvXvrVESCrbT +C05OtQOiDXjSpkLim81vNVPtI2XEug+9fEA+jeJakyGwwB+K8xqV3QILKCoWHKGx +yWcMHSR6cP9tdXCk2JHZBm1PLSJ8hIgMH/YwBJLYg90u8lLAs9WtpVBKkLplzzgm +B4Z4VxCC+xI1kt+3ZgYvYC+oUXJXrjyAzy+J1f+aWl2+S/79glWgl/xz2VibWMz6 +nZUE+wLMxOQqyOsBALsoE6z81y/7gfn4R/BziBASi1jq/r/wdboFYowmqd39DACX ++i+V0OplP2TN/F5JajzRgkrlq5cwZHinnw+IFwj9RTfOkdGb3YwhBt/h2PP38969 +ZG+y8muNtaIqih1pXj1fz9HRtsiCABN0j+JYpvV2D2xuLL7P1O0dt5BpJ3KqNCRw +mGgO2GLxbwvlulsLidCPxdK/M8g9Eeb/xwA5LVwvjVchHkzHuUT7durn7AT0RWiK +BT8iDfeBB9RKienAbWyybEqRaR6/Tv+mghFIalsDiBPbfm4rsNzsq3ohfByqECiy +yUvs2O3NDwkoaBDkA3GFyKv8/SVpcuL5OkVxAHNCIMhNzSgotQ3KLcQc0IREfFCa +3CsBAC7CsE2bJZ9IA9sbBa3jimVhWUQVudRWiLFeYHUF/hjhqS8IHyFwprjEOLaV +EG0kBO6ELypD/bOsmN9XZLPYyI3y9DM6Vo0KMomE+yK/By/ZMxVfex8/TZreUdhP +VdCLL95Rc4w9io8qFb2qGtYBij2wm0RWLcM0IhXWAtjI3B17IN+6hmv+JpiZccsM +AMNR5/RVdXIl0hzr8LROD0Xe4sTyZ+fm3mvpczoDPQNRrWpmI/9OT58itnVmZ5jM +7djV5y/NjBk63mlqYYfkfWto97wkhg0MnTnOhzdtzSiZQRzj+vf+ilLfIlLnuRr1 +JRV9Skv6xQltcFArx4JyfZCo7JB1ZXcbdFAvIXXS11RTErO0XVrXNm2RenpW/yZA +9f+ESQ/uUB6XNuyqVUnJDAFJFLdzx8sO3DXo7dhIlgpFqgQobUl+APpbU5LT95sm +89UrV0Lt9vh7k6zQtKOjEUhm+dErmuBnJo8MvchAuXLagHjvb58vYBCUxVxzt1KG +2IePwJ/oXIfawNEGad9Lmdo1FYG1u53AKWZmpYOTouu92O50FG2+7dBh0V2vO253 +aIGFRT1r14B1pkCIun7z7B/JELqOkmwmlRrUnxlADZEcQT3z/S8/4+2P7P6kXO7X +/TAX5xBhSqUbKe3DhJSOvf05/RVL5ULc2U2JFGLAtmBOFmnD/u0qoo5UvWliI+v/ +47QnU3RlZmFuIFByb2RhbiA8c3RlZmFuLnByb2RhbkBnbWFpbC5jb20+iJAEExEI +ADgWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34eAwIbAwULCQgHAgYVCgkICwIE +FgIDAQIeAQIXgAAKCRAyma6w5Ahbrzu/AP9l2YpRaWZr6wSQuEn0gMN8DRzsWJPx +pn0akdY7SRP3ngD9GoKgu41FAItnHAJ2KiHv/fHFyHMndNP3kPGPNW4BF+65Aw0E +X34eAxAMAMdYFCHmVA8TZxSTMBDpKYave8RiDCMMMjk26Gl0EPN9f2Y+s5++DhiQ +hojNH9VmJkFwZX1xppxe1y1aLa/U6fBAqMP/IdNH8270iv+A9YIxdsWLmpm99BDO +3suRfsHcOe9T0x/CwRfDNdGM/enGMhYGTgF4VD58DRDE6WntaBhl4JJa300NG6X0 +GM4Gh59DKWDnez/Shulj8demlWmakP5imCVoY+omOEc2k3nH02U+foqaGG5WxZZ+ +GwEPswm2sBxvn8nwjy9gbQwEtzNI7lWYiz36wCj2VS56Udqt+0eNg8WzocUT0XyI +moe1qm8YJQ6fxIzaC431DYi/mCDzgx4EV9ww33SXX3Yp2NL6PsdWJWw2QnoqSMpM +z5otw2KlMgUHkkXEKs0apmK4Hu2b6KD7/ydoQRFUqR38Gb0IZL1tOL6PnbCRUcig +Aypy016W/WMCjBfQ8qxIGTaj5agX2t28hbiURbxZkCkz+Z3OWkO0Rq3Y2hNAYM5s +eTn94JIGGwADBgv/dbSZ9LrBvdMwg8pAtdlLtQdjPiT1i9w5NZuQd7OuKhOxYTEB +NRDTgy4/DgeNThCeOkMB/UQQPtJ3Et45S2YRtnnuvfxgnlz7xlUn765/grtnRk4t +ONjMmb6tZos1FjIJecB/6h4RsvUd2egvtlpD/Z3YKr6MpNjWg4ji7m27e9pcJfP6 +YpTDrq9GamiHy9FS2F2pZlQxriPpVhjCLVn9tFGBIsXNxxn7SP4so6rJBmyHEAlq +iym9wl933e0FIgAw5C1vvprYu2amk+jmVBsJjjCmInW5q/kWAFnFaHBvk+v+/7tX +hywWUI7BqseikgUlkgJ6eU7E9z1DEyuS08x/cViDoNh2ntVUhpnluDu48pdqBvvY +a4uL/D+KI84THUAJ/vZy+q6G3BEb4hI9pFjgrdJpUKubxyZolmkCFZHjV34uOcTc +LQr28P8xW8vQbg5DpIsivxYLqDGXt3OyiItxvLMtw/ypt6PkoeP9A4KDST4StITE +1hrOrPtJ/VRmS2o0iHgEGBEIACAWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34e +AwIbDAAKCRAyma6w5Ahbr6QWAP9/pl2R6r1nuCnXzewSbnH1OLsXf32hFQAjaQ5o +Oomb3gD/TRf/nAdVED+k81GdLzciYdUGtI71/qI47G0nMBluLRE= +=/4e+ +-----END PGP PUBLIC KEY BLOCK----- +` +) + +var ( + testGitImplementations = []string{sourcev1.GoGitImplementation, sourcev1.LibGit2Implementation} +) + +func TestGitRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "gitrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: server.HTTPAddress() + repoPath, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for GitRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: gitRepoReadyConditions.NegativePolarity} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for GitRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } + + tests := []struct { + name string + skipForImplementation string + protocol string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without secretRef makes ArtifactOutdated=True", + protocol: "http", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), + }, + }, + { + name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: "cert", - Namespace: namespace.Name, + Name: "basic-auth", }, Data: map[string][]byte{ - "caFile": exampleCA, + "username": []byte("git"), + "password": []byte("1234"), }, - } - err = k8sClient.Create(context.Background(), &cert) - Expect(err).NotTo(HaveOccurred()) + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), + }, + }, + { + name: "HTTPS with CAFile secret makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", + skipForImplementation: sourcev1.LibGit2Implementation, + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "x509: certificate signed by unknown authority"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error", + skipForImplementation: sourcev1.GoGitImplementation, + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "failed to checkout and determine revision: unable to clone '': PEM CA bundle could not be appended to x509 certificate pool"), + }, + }, + { + name: "SSH with private key secret makes ArtifactOutdated=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMBytes["rsa"], + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), + }, + }, + { + name: "SSH with password protected private key secret makes ArtifactOutdated=True", + protocol: "ssh", + server: options{ + username: "git", + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "private-key", + }, + Data: map[string][]byte{ + "username": []byte("git"), + "identity": sshtestdata.PEMEncryptedKeys[2].PEMBytes, + "password": []byte("password"), + }, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"), + }, + }, + { + name: "Include get failure makes CheckoutFailed=True and returns error", + protocol: "http", + server: options{ + username: "git", + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/non-existing': secrets \"non-existing\" not found"), + }, + }, + } - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - }) - - AfterEach(func() { - os.RemoveAll(gitServer.Root()) - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - type refTestCase struct { - reference *sourcev1.GitRepositoryRef - createRefs []string - - waitForReason string - - expectStatus metav1.ConditionStatus - expectMessage string - expectRevision string - - secretRef *meta.LocalObjectReference - gitImplementation string + for _, tt := range tests { + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + }, } - DescribeTable("Git references tests", func(t refTestCase) { - err = gitServer.StartHTTP() - defer gitServer.StopHTTP() - Expect(err).NotTo(HaveOccurred()) + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - for _, ref := range t.createRefs { - hRef := plumbing.NewHashReference(plumbing.ReferenceName(ref), commit) - err = gitrepo.Storer.SetReference(hRef) - Expect(err).NotTo(HaveOccurred()) + if len(tt.server.username+tt.server.password) > 0 { + server.Auth(tt.server.username, tt.server.password) } - remote, err := gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + obj.Spec.URL = server.HTTPAddress() + repoPath + case "https": + g.Expect(server.StartHTTPS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + obj.Spec.URL = server.HTTPAddress() + repoPath + case "ssh": + server.KeyDir(filepath.Join(server.Root(), "keys")) - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) + g.Expect(server.ListenSSH()).To(Succeed()) + obj.Spec.URL = server.SSHAddress() + repoPath - t.reference.Commit = strings.Replace(t.reference.Commit, "", commit.String(), 1) + go func() { + server.StartSSH() + }() + defer server.StopSSH() - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, + if secret != nil && len(secret.Data["known_hosts"]) == 0 { + u, err := url.Parse(obj.Spec.URL) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(u.Host).ToNot(BeEmpty()) + knownHosts, err := ssh.ScanHostKey(u.Host, timeout) + g.Expect(err).NotTo(HaveOccurred()) + secret.Data["known_hosts"] = knownHosts + } + default: + t.Fatalf("unsupported protocol %q", tt.protocol) } - created := &sourcev1.GitRepository{ + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if secret != nil { + builder.WithObjects(secret.DeepCopy()) + } + + r := &GitRepositoryReconciler{ + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + for _, i := range testGitImplementations { + t.Run(i, func(t *testing.T) { + g := NewWithT(t) + + if tt.skipForImplementation == i { + t.Skipf("Skipped for Git implementation %q", i) + } + + tmpDir, err := os.MkdirTemp("", "auth-strategy-") + g.Expect(err).To(BeNil()) + defer os.RemoveAll(tmpDir) + + obj := obj.DeepCopy() + obj.Spec.GitImplementation = i + + head, _ := localRepo.Head() + assertConditions := tt.assertConditions + for k := range assertConditions { + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", head.Hash().String()) + assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.URL) + } + + var commit git.Commit + var includes artifactSet + + got, err := r.reconcileSource(context.TODO(), obj, &commit, &includes, tmpDir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(commit).ToNot(BeNil()) + }) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) { + g := NewWithT(t) + + branches := []string{"staging"} + tags := []string{"non-semver-tag", "v0.1.0", "0.2.0", "v0.2.1", "v1.0.0-alpha", "v1.1.0", "v2.0.0"} + + tests := []struct { + name string + skipForImplementation string + reference *sourcev1.GitRepositoryRef + want sreconcile.Result + wantErr bool + wantRevision string + }{ + { + name: "Nil reference (default branch)", + want: sreconcile.ResultSuccess, + wantRevision: "master/", + }, + { + name: "Branch", + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging/", + }, + { + name: "Tag", + reference: &sourcev1.GitRepositoryRef{ + Tag: "v0.1.0", + }, + want: sreconcile.ResultSuccess, + wantRevision: "v0.1.0/", + }, + { + name: "Branch commit", + skipForImplementation: sourcev1.LibGit2Implementation, + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + Commit: "", + }, + want: sreconcile.ResultSuccess, + wantRevision: "staging/", + }, + { + name: "Branch commit", + skipForImplementation: sourcev1.GoGitImplementation, + reference: &sourcev1.GitRepositoryRef{ + Branch: "staging", + Commit: "", + }, + want: sreconcile.ResultSuccess, + wantRevision: "HEAD/", + }, + { + name: "SemVer", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "*", + }, + want: sreconcile.ResultSuccess, + wantRevision: "v2.0.0/", + }, + { + name: "SemVer range", + reference: &sourcev1.GitRepositoryRef{ + SemVer: "", + }, + { + name: "SemVer prerelease", + reference: &sourcev1.GitRepositoryRef{ + SemVer: ">=1.0.0-0 <1.1.0-0", + }, + wantRevision: "v1.0.0-alpha/", + want: sreconcile.ResultSuccess, + }, + } + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).To(BeNil()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + headRef, err := localRepo.Head() + g.Expect(err).NotTo(HaveOccurred()) + + for _, branch := range branches { + g.Expect(remoteBranchForHead(localRepo, headRef, branch)).To(Succeed()) + } + for _, tag := range tags { + g.Expect(remoteTagForHead(localRepo, headRef, tag)).To(Succeed()) + } + + r := &GitRepositoryReconciler{ + Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + GenerateName: "checkout-strategy-", }, Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: t.reference, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + URL: server.HTTPAddress() + repoPath, + Reference: tt.reference, }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - got := &sourcev1.GitRepository{} - var cond metav1.Condition - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == t.waitForReason { - cond = c - return true + if obj.Spec.Reference != nil && obj.Spec.Reference.Commit == "" { + obj.Spec.Reference.Commit = headRef.Hash().String() + } + + for _, i := range testGitImplementations { + t.Run(i, func(t *testing.T) { + g := NewWithT(t) + + if tt.skipForImplementation == i { + t.Skipf("Skipped for Git implementation %q", i) } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(cond.Status).To(Equal(t.expectStatus)) - Expect(cond.Message).To(ContainSubstring(t.expectMessage)) - Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == "")) - if t.expectRevision != "" { - Expect(got.Status.Artifact.Revision).To(Equal(t.expectRevision + "/" + commit.String())) + tmpDir, err := os.MkdirTemp("", "checkout-strategy-") + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := obj.DeepCopy() + obj.Spec.GitImplementation = i + + var commit git.Commit + var includes artifactSet + got, err := r.reconcileSource(ctx, obj, &commit, &includes, tmpDir) + if err != nil { + println(err.Error()) + } + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + if tt.wantRevision != "" { + revision := strings.ReplaceAll(tt.wantRevision, "", headRef.Hash().String()) + g.Expect(commit.String()).To(Equal(revision)) + g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(BeTrue()) + } + }) } + }) + } +} + +func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + dir string + includes artifactSet + beforeFunc func(obj *sourcev1.GitRepository) + afterFunc func(t *WithT, obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes Ready=True", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, }, - Entry("branch", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"}, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - }), - Entry("branch non existing", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "invalid-branch"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "couldn't find remote ref", - }), - Entry("tag", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Tag: "some-tag"}, - createRefs: []string{"refs/tags/some-tag"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-tag", - }), - Entry("tag non existing", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Tag: "invalid-tag"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "couldn't find remote ref", - }), - Entry("semver", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"}, - createRefs: []string{"refs/tags/v1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "v1.0.0", - }), - Entry("semver range", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"}, - createRefs: []string{"refs/tags/0.1.0", "refs/tags/0.1.1", "refs/tags/0.2.0", "refs/tags/1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "0.2.0", - }), - Entry("mixed semver range", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"}, - createRefs: []string{"refs/tags/0.1.0", "refs/tags/v0.1.1", "refs/tags/v0.2.0", "refs/tags/1.0.0"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "v0.2.0", - }), - Entry("semver invalid", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.2.3.4"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "semver parse error: improper constraint: 1.2.3.4", - }), - Entry("semver no match", refTestCase{ - reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "no match found for semver: 1.0.0", - }), - Entry("commit", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Commit: "", - }, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "HEAD", - }), - Entry("commit in branch", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Branch: "some-branch", - Commit: "", - }, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - }), - Entry("invalid commit", refTestCase{ - reference: &sourcev1.GitRepositoryRef{ - Branch: "master", - Commit: "invalid", - }, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "failed to resolve commit object for 'invalid': object not found", - }), - ) + { + name: "Archiving artifact to storage with includes makes Ready=True", + dir: "testdata/git/repository", + includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) + t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty()) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + dir: "testdata/git/repository", + includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}}, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"} + obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision"}} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + }, + }, + { + name: "Spec ignore overwrite is taken into account", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Ignore = pointer.StringPtr("!**.txt\n") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("11f7f007dce5619bd79e6c57688261058d09f5271e802463ac39f2b9ead7cabd")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + dir: "testdata/git/repository", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.GitRepository) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) - DescribeTable("Git self signed cert tests", func(t refTestCase) { - err = gitServer.StartHTTPS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - defer gitServer.StopHTTP() - Expect(err).NotTo(HaveOccurred()) + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, + }, + { + name: "Target path does not exists", + dir: "testdata/git/foo", + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, + }, + { + name: "Target path is not a directory", + dir: "testdata/git/repository/foo.txt", + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'main/revision'"), + }, + }, + } - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - var transport = httptransport.NewClient(&http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - }, - }) - client.InstallProtocol("https", transport) - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - for _, ref := range t.createRefs { - hRef := plumbing.NewHashReference(plumbing.ReferenceName(ref), commit) - err = gitrepo.Storer.SetReference(hRef) - Expect(err).NotTo(HaveOccurred()) + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } - remote, err := gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - t.reference.Commit = strings.Replace(t.reference.Commit, "", commit.String(), 1) - - client.InstallProtocol("https", httptransport.DefaultClient) - - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - created := &sourcev1.GitRepository{ + obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: t.reference, - GitImplementation: t.gitImplementation, - SecretRef: t.secretRef, + GenerateName: "reconcile-artifact-", + Generation: 1, }, + Status: sourcev1.GitRepositoryStatus{}, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - got := &sourcev1.GitRepository{} - var cond metav1.Condition - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == t.waitForReason { - cond = c - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } - Expect(cond.Status).To(Equal(t.expectStatus)) - Expect(cond.Message).To(ContainSubstring(t.expectMessage)) - Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == "")) + commit := git.Commit{ + Hash: []byte("revision"), + Reference: "refs/heads/main", + } + + got, err := r.reconcileArtifact(ctx, obj, &commit, &tt.includes, tt.dir) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} + +func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) { + g := NewWithT(t) + + server, err := testserver.NewTempArtifactServer() + g.Expect(err).NotTo(HaveOccurred()) + storage, err := newTestStorage(server.HTTPServer) + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(storage.BasePath) + + dependencyInterval := 5 * time.Second + + type dependency struct { + name string + withArtifact bool + conditions []metav1.Condition + } + + type include struct { + name string + fromPath string + toPath string + shouldExist bool + } + + tests := []struct { + name string + dependencies []dependency + includes []include + beforeFunc func(obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "New includes make ArtifactOutdated=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"), + }, + }, + { + name: "b", + withArtifact: true, + conditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/", shouldExist: true}, + {name: "b", toPath: "b/", shouldExist: true}, + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "IncludeChange", "included artifacts differ from last observed includes"), + }, }, - Entry("self signed libgit2 without CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "main"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "user rejected certificate", - gitImplementation: sourcev1.LibGit2Implementation, - }), - Entry("self signed libgit2 with CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"}, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - secretRef: &meta.LocalObjectReference{Name: "cert"}, - gitImplementation: sourcev1.LibGit2Implementation, - }), - Entry("self signed go-git without CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "main"}, - waitForReason: sourcev1.GitOperationFailedReason, - expectStatus: metav1.ConditionFalse, - expectMessage: "x509: certificate signed by unknown authority", - }), - Entry("self signed go-git with CA", refTestCase{ - reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"}, - createRefs: []string{"refs/heads/some-branch"}, - waitForReason: sourcev1.GitOperationSucceedReason, - expectStatus: metav1.ConditionTrue, - expectRevision: "some-branch", - secretRef: &meta.LocalObjectReference{Name: "cert"}, - gitImplementation: sourcev1.GoGitImplementation, - }), - ) + { + name: "Include get failure makes IncludeUnavailable=True and returns error", + includes: []include{ + {name: "a", toPath: "a/"}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"), + }, + }, + { + name: "Include without an artifact makes IncludeUnavailable=True", + dependencies: []dependency{ + { + name: "a", + withArtifact: false, + conditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"), + }, + }, + }, + includes: []include{ + {name: "a", toPath: "a/"}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"), + }, + }, + { + name: "Invalid FromPath makes IncludeUnavailable=True and returns error", + dependencies: []dependency{ + { + name: "a", + withArtifact: true, + }, + }, + includes: []include{ + {name: "a", fromPath: "../../../path", shouldExist: false}, + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "CopyFailure", "unpack/path: no such file or directory"), + }, + }, + { + name: "Outdated IncludeUnavailable is removed", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - Context("recurse submodules", func() { - It("downloads submodules when asked", func() { - Expect(gitServer.StartHTTP()).To(Succeed()) - defer gitServer.StopHTTP() - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - - subRepoURL := *u - subRepoURL.Path = path.Join(u.Path, fmt.Sprintf("subrepository-%s.git", randStringRunes(5))) - - // create the git repo to use as a submodule - fs := memfs.New() - subRepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := subRepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := fs.Create("fixture") - _ = ff.Close() - _, err = wt.Add(fs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - remote, err := subRepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{subRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - // this one is linked to a real directory, so that I can - // exec `git submodule add` later - tmp, err := os.MkdirTemp("", "flux-test") - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(tmp) - - repoDir := filepath.Join(tmp, "git") - repo, err := git.PlainInit(repoDir, false) - Expect(err).NotTo(HaveOccurred()) - - wt, err = repo.Worktree() - Expect(err).NotTo(HaveOccurred()) - _, err = wt.Commit("Initial revision", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - submodAdd := exec.Command("git", "submodule", "add", "-b", "master", subRepoURL.String(), "sub") - submodAdd.Dir = repoDir - out, err := submodAdd.CombinedOutput() - os.Stdout.Write(out) - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Add submodule", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - mainRepoURL := *u - mainRepoURL.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - remote, err = repo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{mainRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = remote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - key := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - created := &sourcev1.GitRepository{ + var depObjs []client.Object + for _, d := range tt.dependencies { + obj := &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Name: d.name, }, - Spec: sourcev1.GitRepositorySpec{ - URL: mainRepoURL.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: &sourcev1.GitRepositoryRef{Branch: "master"}, - GitImplementation: sourcev1.GoGitImplementation, // only works with go-git - RecurseSubmodules: true, + Status: sourcev1.GitRepositoryStatus{ + Conditions: d.conditions, }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - - got := &sourcev1.GitRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.GitOperationSucceedReason { - return true - } + if d.withArtifact { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: d.name + ".tar.gz", + Revision: d.name, + LastUpdateTime: metav1.Now(), } - return false - }, timeout, interval).Should(BeTrue()) + g.Expect(storage.Archive(obj.GetArtifact(), "testdata/git/repository", nil)).To(Succeed()) + } + depObjs = append(depObjs, obj) + } - // check that the downloaded artifact includes the - // file from the submodule - res, err := http.Get(got.Status.URL) - Expect(err).NotTo(HaveOccurred()) - Expect(res.StatusCode).To(Equal(http.StatusOK)) + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if len(tt.dependencies) > 0 { + builder.WithObjects(depObjs...) + } - _, err = untar.Untar(res.Body, filepath.Join(tmp, "tar")) - Expect(err).NotTo(HaveOccurred()) - Expect(filepath.Join(tmp, "tar", "sub", "fixture")).To(BeAnExistingFile()) - }) + r := &GitRepositoryReconciler{ + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: storage, + requeueDependency: dependencyInterval, + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-include", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + }, + } + + for i, incl := range tt.includes { + incl := sourcev1.GitRepositoryInclude{ + GitRepositoryRef: meta.LocalObjectReference{Name: incl.name}, + FromPath: incl.fromPath, + ToPath: incl.toPath, + } + tt.includes[i].fromPath = incl.GetFromPath() + tt.includes[i].toPath = incl.GetToPath() + obj.Spec.Include = append(obj.Spec.Include, incl) + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + tmpDir, err := os.MkdirTemp("", "include-") + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + var commit git.Commit + var includes artifactSet + + got, err := r.reconcileInclude(ctx, obj, &commit, &includes, tmpDir) + g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + if err == nil { + g.Expect(len(includes)).To(Equal(len(tt.includes))) + } + g.Expect(got).To(Equal(tt.want)) + for _, i := range tt.includes { + if i.toPath != "" { + expect := g.Expect(filepath.Join(tmpDir, i.toPath)) + if i.shouldExist { + expect.To(BeADirectory()) + } else { + expect.NotTo(BeADirectory()) + } + } + if i.shouldExist { + g.Expect(filepath.Join(tmpDir, i.toPath)).Should(BeADirectory()) + } else { + g.Expect(filepath.Join(tmpDir, i.toPath)).ShouldNot(BeADirectory()) + } + } }) + } +} - type includeTestCase struct { - fromPath string - toPath string - createFiles []string - checkFiles []string +func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.GitRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) { + tests := []struct { + name string + secret *corev1.Secret + commit git.Commit + beforeFunc func(obj *sourcev1.GitRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Valid commit makes SourceVerifiedCondition=True", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + Data: map[string][]byte{ + "foo": []byte(armoredKeyRingFixture), + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(encodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit 'shasum'"), + }, + }, + { + name: "Invalid commit makes SourceVerifiedCondition=False and returns error", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing", + }, + }, + commit: git.Commit{ + Hash: []byte("shasum"), + Encoded: []byte(malformedEncodedCommitFixture), + Signature: signatureCommitFixture, + }, + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "signature verification of commit 'shasum' failed: failed to verify commit with any of the given key rings"), + }, + }, + { + name: "Secret get failure makes SourceVerified=False and returns error", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{ + Mode: "head", + SecretRef: meta.LocalObjectReference{ + Name: "none-existing", + }, + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, meta.FailedReason, "PGP public keys secret error: secrets \"none-existing\" not found"), + }, + }, + { + name: "Nil verification in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{}, + }, + { + name: "Empty verification mode in spec deletes SourceVerified condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Spec.Verification = &sourcev1.GitRepositoryVerification{} + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + + r := &GitRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + } + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "verify-commit-", + Generation: 1, + }, + Status: sourcev1.GitRepositoryStatus{}, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + got, err := r.verifyCommitSignature(context.TODO(), obj, tt.commit) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + }) + } +} + +func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) { + g := NewWithT(t) + + server, err := gittestserver.NewTempGitServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + server.AutoCreate() + g.Expect(server.StartHTTP()).To(Succeed()) + defer server.StopHTTP() + + repoPath := "/test.git" + _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath) + g.Expect(err).NotTo(HaveOccurred()) + + tests := []struct { + name string + beforeFunc func(obj *sourcev1.GitRepository) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "no condition", + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "reconciling condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "stalled condition", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "mixed failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "reconciling and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + }, + }, + { + name: "stalled and failed conditions", + beforeFunc: func(obj *sourcev1.GitRepository) { + conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "condition-update", + Namespace: "default", + Finalizers: []string{sourcev1.SourceFinalizer}, + }, + Spec: sourcev1.GitRepositorySpec{ + URL: server.HTTPAddress() + repoPath, + GitImplementation: sourcev1.GoGitImplementation, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()).WithObjects(obj) + + r := &GitRepositoryReconciler{ + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + key := client.ObjectKeyFromObject(obj) + res, err := r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: key}) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(res).To(Equal(tt.want)) + + updatedObj := &sourcev1.GitRepository{} + err = r.Get(ctx, key, updatedObj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(updatedObj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +// helpers + +func initGitRepo(server *gittestserver.GitServer, fixture, branch, repositoryPath string) (*gogit.Repository, error) { + fs := memfs.New() + repo, err := gogit.Init(memory.NewStorage(), fs) + if err != nil { + return nil, err + } + + branchRef := plumbing.NewBranchReferenceName(branch) + if err = repo.CreateBranch(&config.Branch{ + Name: branch, + Remote: gogit.DefaultRemoteName, + Merge: branchRef, + }); err != nil { + return nil, err + } + + err = commitFromFixture(repo, fixture) + if err != nil { + return nil, err + } + + if server.HTTPAddress() == "" { + if err = server.StartHTTP(); err != nil { + return nil, err + } + defer server.StopHTTP() + } + if _, err = repo.CreateRemote(&config.RemoteConfig{ + Name: gogit.DefaultRemoteName, + URLs: []string{server.HTTPAddressWithCredentials() + repositoryPath}, + }); err != nil { + return nil, err + } + + if err = repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"}, + }); err != nil { + return nil, err + } + + return repo, nil +} + +func Test_commitFromFixture(t *testing.T) { + g := NewWithT(t) + + repo, err := gogit.Init(memory.NewStorage(), memfs.New()) + g.Expect(err).ToNot(HaveOccurred()) + + err = commitFromFixture(repo, "testdata/git/repository") + g.Expect(err).ToNot(HaveOccurred()) +} + +func commitFromFixture(repo *gogit.Repository, fixture string) error { + working, err := repo.Worktree() + if err != nil { + return err + } + fs := working.Filesystem + + if err = filepath.Walk(fixture, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return fs.MkdirAll(fs.Join(path[len(fixture):]), info.Mode()) } - DescribeTable("Include git repositories", func(t includeTestCase) { - Expect(gitServer.StartHTTP()).To(Succeed()) - defer gitServer.StopHTTP() + fileBytes, err := os.ReadFile(path) + if err != nil { + return err + } - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) + ff, err := fs.Create(path[len(fixture):]) + if err != nil { + return err + } + defer ff.Close() - // create the main git repository - mainRepoURL := *u - mainRepoURL.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) + _, err = ff.Write(fileBytes) + return err + }); err != nil { + return err + } - mainFs := memfs.New() - mainRepo, err := git.Init(memory.NewStorage(), mainFs) - Expect(err).NotTo(HaveOccurred()) + _, err = working.Add(".") + if err != nil { + return err + } - mainWt, err := mainRepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - ff, _ := mainFs.Create("fixture") - _ = ff.Close() - _, err = mainWt.Add(mainFs.Join("fixture")) - Expect(err).NotTo(HaveOccurred()) - - _, err = mainWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - mainRemote, err := mainRepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{mainRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = mainRemote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - // create the sub git repository - subRepoURL := *u - subRepoURL.Path = path.Join(u.Path, fmt.Sprintf("subrepository-%s.git", randStringRunes(5))) - - subFs := memfs.New() - subRepo, err := git.Init(memory.NewStorage(), subFs) - Expect(err).NotTo(HaveOccurred()) - - subWt, err := subRepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - for _, v := range t.createFiles { - if dir := filepath.Base(v); dir != v { - err := subFs.MkdirAll(dir, 0700) - Expect(err).NotTo(HaveOccurred()) - } - ff, err := subFs.Create(v) - Expect(err).NotTo(HaveOccurred()) - _ = ff.Close() - _, err = subWt.Add(subFs.Join(v)) - Expect(err).NotTo(HaveOccurred()) - } - - _, err = subWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - subRemote, err := subRepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{subRepoURL.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - err = subRemote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - // create main and sub resetRepositories - subKey := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - subCreated := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: subKey.Name, - Namespace: subKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: subRepoURL.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: &sourcev1.GitRepositoryRef{Branch: "master"}, - }, - } - Expect(k8sClient.Create(context.Background(), subCreated)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), subCreated) - - mainKey := types.NamespacedName{ - Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - mainCreated := &sourcev1.GitRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: mainKey.Name, - Namespace: mainKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: mainRepoURL.String(), - Interval: metav1.Duration{Duration: indexInterval}, - Reference: &sourcev1.GitRepositoryRef{Branch: "master"}, - Include: []sourcev1.GitRepositoryInclude{ - { - GitRepositoryRef: meta.LocalObjectReference{ - Name: subKey.Name, - }, - FromPath: t.fromPath, - ToPath: t.toPath, - }, - }, - }, - } - Expect(k8sClient.Create(context.Background(), mainCreated)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), mainCreated) - - got := &sourcev1.GitRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), mainKey, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.GitOperationSucceedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - // check the contents of the repository - res, err := http.Get(got.Status.URL) - Expect(err).NotTo(HaveOccurred()) - Expect(res.StatusCode).To(Equal(http.StatusOK)) - tmp, err := os.MkdirTemp("", "flux-test") - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(tmp) - _, err = untar.Untar(res.Body, filepath.Join(tmp, "tar")) - Expect(err).NotTo(HaveOccurred()) - for _, v := range t.checkFiles { - Expect(filepath.Join(tmp, "tar", v)).To(BeAnExistingFile()) - } - - // add new file to check that the change is reconciled - ff, err = subFs.Create(subFs.Join(t.fromPath, "test")) - Expect(err).NotTo(HaveOccurred()) - err = ff.Close() - Expect(err).NotTo(HaveOccurred()) - _, err = subWt.Add(subFs.Join(t.fromPath, "test")) - Expect(err).NotTo(HaveOccurred()) - - hash, err := subWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = subRemote.Push(&git.PushOptions{ - RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"}, - }) - Expect(err).NotTo(HaveOccurred()) - - got = &sourcev1.GitRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), mainKey, got) - if got.Status.IncludedArtifacts[0].Revision == fmt.Sprintf("master/%s", hash.String()) { - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.GitOperationSucceedReason { - return true - } - } - } - return false - }, timeout, interval).Should(BeTrue()) - - // get the main repository artifact - res, err = http.Get(got.Status.URL) - Expect(err).NotTo(HaveOccurred()) - Expect(res.StatusCode).To(Equal(http.StatusOK)) - tmp, err = os.MkdirTemp("", "flux-test") - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(tmp) - _, err = untar.Untar(res.Body, filepath.Join(tmp, "tar")) - Expect(err).NotTo(HaveOccurred()) - Expect(filepath.Join(tmp, "tar", t.toPath, "test")).To(BeAnExistingFile()) + if _, err = working.Commit("Fixtures from "+fixture, &gogit.CommitOptions{ + Author: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), }, - Entry("only to path", includeTestCase{ - fromPath: "", - toPath: "sub", - createFiles: []string{"dir1", "dir2"}, - checkFiles: []string{"sub/dir1", "sub/dir2"}, - }), - Entry("to nested path", includeTestCase{ - fromPath: "", - toPath: "sub/nested", - createFiles: []string{"dir1", "dir2"}, - checkFiles: []string{"sub/nested/dir1", "sub/nested/dir2"}, - }), - Entry("from and to path", includeTestCase{ - fromPath: "nested", - toPath: "sub", - createFiles: []string{"dir1", "nested/dir2", "nested/dir3", "nested/foo/bar"}, - checkFiles: []string{"sub/dir2", "sub/dir3", "sub/foo/bar"}, - }), - ) + }); err != nil { + return err + } + + return nil +} + +func remoteBranchForHead(repo *gogit.Repository, head *plumbing.Reference, branch string) error { + refSpec := fmt.Sprintf("%s:refs/heads/%s", head.Name(), branch) + return repo.Push(&gogit.PushOptions{ + RemoteName: "origin", + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + Force: true, }) -}) +} + +func remoteTagForHead(repo *gogit.Repository, head *plumbing.Reference, tag string) error { + if _, err := repo.CreateTag(tag, head.Hash(), &gogit.CreateTagOptions{ + // Not setting this seems to make things flaky + // Expected success, but got an error: + // <*errors.errorString | 0xc0000f6350>: { + // s: "tagger field is required", + // } + // tagger field is required + Tagger: &object.Signature{ + Name: "Jane Doe", + Email: "jane@example.com", + When: time.Now(), + }, + Message: tag, + }); err != nil { + return err + } + refSpec := fmt.Sprintf("refs/tags/%[1]s:refs/tags/%[1]s", tag) + return repo.Push(&gogit.PushOptions{ + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + }) +} diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index e63f8e45..5bbe56cd 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -31,12 +31,10 @@ import ( helmgetter "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -48,17 +46,51 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/pkg/untar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/chart" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" + "github.com/fluxcd/source-controller/internal/util" ) +// helmChartReadyConditions contains all the conditions information +// needed for HelmChart Ready status conditions summary calculation. +var helmChartReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.BuildFailedCondition, + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/finalizers,verbs=get;create;update;patch;delete @@ -67,18 +99,24 @@ import ( // HelmChartReconciler reconciles a HelmChart object type HelmChartReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - Getters helmgetter.Providers - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage + Getters helmgetter.Providers + ControllerName string } func (r *HelmChartReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, HelmChartReconcilerOptions{}) } +type HelmChartReconcilerOptions struct { + MaxConcurrentReconciles int +} + +type helmChartReconcilerFunc func(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) + func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmChartReconcilerOptions) error { if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey, r.indexHelmRepositoryByURL); err != nil { @@ -112,211 +150,264 @@ func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Complete(r) } -func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var chart sourcev1.HelmChart - if err := r.Get(ctx, req.NamespacedName, &chart); err != nil { - return ctrl.Result{Requeue: true}, client.IgnoreNotFound(err) + // Fetch the HelmChart + obj := &sourcev1.HelmChart{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, chart) + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&chart, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(chart.DeepCopy()) - controllerutil.AddFinalizer(&chart, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &chart, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !chart.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, chart) - } - - // Return early if the object is suspended. - if chart.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // Record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // Conditionally set progressing condition in status - resetChart, changed := r.resetStatus(chart) - if changed { - chart = resetChart - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, chart) - } - - // Record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(chart.GetAnnotations()); ok { - chart.Status.SetLastHandledReconcileRequest(v) - } - - // Purge all but current artifact from storage - if err := r.gc(chart); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // Retrieve the source - source, err := r.getSource(ctx, chart) + // Initialize the patch helper with the current version of the object. + patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - } - return ctrl.Result{Requeue: true}, err + return ctrl.Result{}, err } - // Assert source is ready - if source.GetArtifact() == nil { - err = fmt.Errorf("no artifact found for source `%s` kind '%s'", - chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind) - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - } - r.recordReadiness(ctx, chart) - return ctrl.Result{Requeue: true}, err - } + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result - // Create working directory - workDir, err := os.MkdirTemp("", chart.Kind+"-"+chart.Namespace+"-"+chart.Name+"-") - if err != nil { - err = fmt.Errorf("failed to create temporary working directory: %w", err) - chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error()) - if err := r.updateStatus(ctx, req, chart.Status); err != nil { - log.Error(err, "unable to update status") - } - r.recordReadiness(ctx, chart) - return ctrl.Result{Requeue: true}, err - } + // Always attempt to patch the object after each reconciliation. + // NOTE: The final runtime result and error are set in this block. defer func() { - if err := os.RemoveAll(workDir); err != nil { - log.Error(err, "failed to remove working directory", "path", workDir) + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmChartReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), + } + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Add finalizer first if not exist to avoid the race condition + // between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return + } + + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return + } + + // Reconcile actual object + reconcilers := []helmChartReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return +} + +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *HelmChartReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmChart, reconcilers []helmChartReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) + } + + // Run the sub-reconcilers and build the result of reconciliation. + var ( + build chart.Build + res sreconcile.Result + resErr error + ) + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &build) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + return res, resErr +} + +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the BuildResult is zero. +func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +func (r *HelmChartReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) { + // Retrieve the source + s, err := r.getSource(ctx, obj) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get source: %w", err), + Reason: "SourceUnavailable", + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "SourceUnavailable", e.Err.Error()) + + // Return Kubernetes client errors, but ignore others which can only be + // solved by a change in generation + if apierrs.ReasonForError(err) == metav1.StatusReasonUnknown { + return sreconcile.ResultEmpty, &serror.Stalling{ + Err: fmt.Errorf("failed to get source: %w", err), + Reason: "UnsupportedSourceKind", + } + } + return sreconcile.ResultEmpty, e + } + + // Assert source has an artifact + if s.GetArtifact() == nil || !r.Storage.ArtifactExist(*s.GetArtifact()) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "NoSourceArtifact", + "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) + r.eventLogf(ctx, obj, events.EventTypeTrace, "NoSourceArtifact", + "no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name) + return sreconcile.ResultRequeue, nil + } + + // Record current artifact revision as last observed + obj.Status.ObservedSourceArtifactRevision = s.GetArtifact().Revision + + // Defer observation of build result + defer func() { + // Record both success and error observations on the object + observeChartBuild(obj, build, retErr) + + // If we actually build a chart, take a historical note of any dependencies we resolved. + // The reason this is a done conditionally, is because if we have a cached one in storage, + // we can not recover this information (and put it in a condition). Which would result in + // a sudden (partial) disappearance of observed state. + // TODO(hidde): include specific name/version information? + if depNum := build.ResolvedDependencies; build.Complete() && depNum > 0 { + r.Eventf(obj, events.EventTypeTrace, "ResolvedDependencies", "resolved %d chart dependencies", depNum) + } + + // Handle any build error + if retErr != nil { + if buildErr := new(chart.BuildError); errors.As(retErr, &buildErr) { + retErr = &serror.Event{ + Err: buildErr, + Reason: buildErr.Reason.Reason, + } + if chart.IsPersistentBuildErrorReason(buildErr.Reason) { + retErr = &serror.Stalling{ + Err: buildErr, + Reason: buildErr.Reason.Reason, + } + } + } } }() - // Perform the reconciliation for the chart source type - var reconciledChart sourcev1.HelmChart - var reconcileErr error - switch typedSource := source.(type) { + // Perform the build for the chart source type + switch typedSource := s.(type) { case *sourcev1.HelmRepository: - reconciledChart, reconcileErr = r.fromHelmRepository(ctx, *typedSource, *chart.DeepCopy(), workDir, changed) + return r.buildFromHelmRepository(ctx, obj, typedSource, build) case *sourcev1.GitRepository, *sourcev1.Bucket: - reconciledChart, reconcileErr = r.fromTarballArtifact(ctx, *typedSource.GetArtifact(), *chart.DeepCopy(), - workDir, changed) + return r.buildFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build) default: - err := fmt.Errorf("unable to reconcile unsupported source reference kind '%s'", chart.Spec.SourceRef.Kind) - return ctrl.Result{Requeue: false}, err + // Ending up here should generally not be possible + // as getSource already validates + return sreconcile.ResultEmpty, nil } - - // Update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledChart.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // If reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledChart, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledChart) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // Emit an event if we did not have an artifact before, or the revision has changed - if (chart.GetArtifact() == nil && reconciledChart.GetArtifact() != nil) || - (chart.GetArtifact() != nil && reconciledChart.GetArtifact() != nil && reconciledChart.GetArtifact().Revision != chart.GetArtifact().Revision) { - r.event(ctx, reconciledChart, events.EventSeverityInfo, sourcev1.HelmChartReadyMessage(reconciledChart)) - } - r.recordReadiness(ctx, reconciledChart) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - chart.GetInterval().Duration.String(), - )) - return ctrl.Result{RequeueAfter: chart.GetInterval().Duration}, nil } -type HelmChartReconcilerOptions struct { - MaxConcurrentReconciles int -} +func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart, + repo *sourcev1.HelmRepository, b *chart.Build) (sreconcile.Result, error) { -func (r *HelmChartReconciler) getSource(ctx context.Context, chart sourcev1.HelmChart) (sourcev1.Source, error) { - var source sourcev1.Source - namespacedName := types.NamespacedName{ - Namespace: chart.GetNamespace(), - Name: chart.Spec.SourceRef.Name, - } - switch chart.Spec.SourceRef.Kind { - case sourcev1.HelmRepositoryKind: - var repository sourcev1.HelmRepository - err := r.Client.Get(ctx, namespacedName, &repository) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) - } - source = &repository - case sourcev1.GitRepositoryKind: - var repository sourcev1.GitRepository - err := r.Client.Get(ctx, namespacedName, &repository) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) - } - source = &repository - case sourcev1.BucketKind: - var bucket sourcev1.Bucket - err := r.Client.Get(ctx, namespacedName, &bucket) - if err != nil { - return source, fmt.Errorf("failed to retrieve source: %w", err) - } - source = &bucket - default: - return source, fmt.Errorf("source `%s` kind '%s' not supported", - chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind) - } - return source, nil -} - -func (r *HelmChartReconciler) fromHelmRepository(ctx context.Context, repo sourcev1.HelmRepository, c sourcev1.HelmChart, - workDir string, force bool) (sourcev1.HelmChart, error) { - // Configure Index getter options + // Construct the Getter options from the HelmRepository data clientOpts := []helmgetter.Option{ helmgetter.WithURL(repo.Spec.URL), helmgetter.WithTimeout(repo.Spec.Timeout.Duration), helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), } - if secret, err := r.getHelmRepositorySecret(ctx, &repo); err != nil { - return sourcev1.HelmChartNotReady(c, sourcev1.AuthenticationFailedReason, err.Error()), err - } else if secret != nil { - // Create temporary working directory for credentials - authDir := filepath.Join(workDir, "creds") - if err := os.Mkdir(authDir, 0700); err != nil { - err = fmt.Errorf("failed to create temporary directory for repository credentials: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + if secret, err := r.getHelmRepositorySecret(ctx, repo); secret != nil || err != nil { + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", repo.Spec.SecretRef.Name, err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return error as the world as observed may change + return sreconcile.ResultEmpty, e } + + // Create temporary working directory for credentials + authDir, err := util.TempDirForObj("", obj) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to create temporary working directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + defer os.RemoveAll(authDir) + + // Build client options from secret opts, err := getter.ClientOptionsFromSecret(authDir, *secret) if err != nil { - err = fmt.Errorf("failed to create client options for HelmRepository '%s': %w", repo.Name, err) - return sourcev1.HelmChartNotReady(c, sourcev1.AuthenticationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Requeue as content of secret might change + return sreconcile.ResultEmpty, e } clientOpts = append(clientOpts, opts...) } @@ -324,139 +415,145 @@ func (r *HelmChartReconciler) fromHelmRepository(ctx context.Context, repo sourc // Initialize the chart repository chartRepo, err := repository.NewChartRepository(repo.Spec.URL, r.Storage.LocalPath(*repo.GetArtifact()), r.Getters, clientOpts) if err != nil { + // Any error requires a change in generation, + // which we should be informed about by the watcher switch err.(type) { case *url.Error: - return sourcev1.HelmChartNotReady(c, sourcev1.URLInvalidReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("invalid Helm repository URL: %w", err), + Reason: sourcev1.URLInvalidReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, e.Err.Error()) + return sreconcile.ResultEmpty, e default: - return sourcev1.HelmChartNotReady(c, sourcev1.ChartPullFailedReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("failed to construct Helm client: %w", err), + Reason: meta.FailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } - // Build the chart + // Construct the chart builder with scoped configuration cb := chart.NewRemoteBuilder(chartRepo) - ref := chart.RemoteReference{Name: c.Spec.Chart, Version: c.Spec.Version} opts := chart.BuildOptions{ - ValuesFiles: c.GetValuesFiles(), - Force: force, + ValuesFiles: obj.GetValuesFiles(), + Force: obj.Generation != obj.Status.ObservedGeneration, } - if artifact := c.GetArtifact(); artifact != nil { + if artifact := obj.GetArtifact(); artifact != nil { opts.CachedChart = r.Storage.LocalPath(*artifact) } // Set the VersionMetadata to the object's Generation if ValuesFiles is defined // This ensures changes can be noticed by the Artifact consumer if len(opts.GetValuesFiles()) > 0 { - opts.VersionMetadata = strconv.FormatInt(c.Generation, 10) + opts.VersionMetadata = strconv.FormatInt(obj.Generation, 10) } - b, err := cb.Build(ctx, ref, filepath.Join(workDir, "chart.tgz"), opts) + + // Build the chart + ref := chart.RemoteReference{Name: obj.Spec.Chart, Version: obj.Spec.Version} + build, err := cb.Build(ctx, ref, util.TempPathForObj("", ".tgz", obj), opts) if err != nil { - return sourcev1.HelmChartNotReady(c, sourcev1.ChartPullFailedReason, err.Error()), err + return sreconcile.ResultEmpty, err } - newArtifact := r.Storage.NewArtifactFor(c.Kind, c.GetObjectMeta(), b.Version, - fmt.Sprintf("%s-%s.tgz", b.Name, b.Version)) - - // If the path of the returned build equals the cache path, - // there are no changes to the chart - if b.Path == opts.CachedChart { - // Ensure hostname is updated - if c.GetArtifact().URL != newArtifact.URL { - r.Storage.SetArtifactURL(c.GetArtifact()) - c.Status.URL = r.Storage.SetHostname(c.Status.URL) - } - return c, nil - } - - // Ensure artifact directory exists - err = r.Storage.MkdirAll(newArtifact) - if err != nil { - err = fmt.Errorf("unable to create chart directory: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // Acquire a lock for the artifact - unlock, err := r.Storage.Lock(newArtifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer unlock() - - // Copy the packaged chart to the artifact path - if err = r.Storage.CopyFromPath(&newArtifact, b.Path); err != nil { - err = fmt.Errorf("failed to write chart package to storage: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err - } - - // Update symlink - cUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", b.Name)) - if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err - } - return sourcev1.HelmChartReady(c, newArtifact, cUrl, sourcev1.ChartPullSucceededReason, b.Summary()), nil + *b = *build + return sreconcile.ResultSuccess, nil } -func (r *HelmChartReconciler) fromTarballArtifact(ctx context.Context, source sourcev1.Artifact, c sourcev1.HelmChart, - workDir string, force bool) (sourcev1.HelmChart, error) { - // Create temporary working directory to untar into - sourceDir := filepath.Join(workDir, "source") +func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) { + // Create temporary working directory + tmpDir, err := util.TempDirForObj("", obj) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to create temporary working directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + defer os.RemoveAll(tmpDir) + + // Create directory to untar source into + sourceDir := filepath.Join(tmpDir, "source") if err := os.Mkdir(sourceDir, 0700); err != nil { - err = fmt.Errorf("failed to create temporary directory to untar source into: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to create directory to untar source into: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Open the tarball artifact file and untar files into working directory f, err := os.Open(r.Storage.LocalPath(source)) if err != nil { - err = fmt.Errorf("artifact open error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to open source artifact: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.StorageOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if _, err = untar.Untar(f, sourceDir); err != nil { _ = f.Close() - err = fmt.Errorf("artifact untar error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("artifact untar error: %w", err), + Reason: meta.FailedReason, + } } if err = f.Close(); err != nil { - err = fmt.Errorf("artifact close error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("artifact close error: %w", err), + Reason: meta.FailedReason, + } } - chartPath, err := securejoin.SecureJoin(sourceDir, c.Spec.Chart) + // Calculate (secure) absolute chart path + chartPath, err := securejoin.SecureJoin(sourceDir, obj.Spec.Chart) if err != nil { - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("path calculation for chart '%s' failed: %w", obj.Spec.Chart, err), + Reason: "IllegalPath", + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "IllegalPath", e.Err.Error()) + // We are unable to recover from this change without a change in generation + return sreconcile.ResultEmpty, e } // Setup dependency manager - authDir := filepath.Join(workDir, "creds") + authDir := filepath.Join(tmpDir, "creds") if err = os.Mkdir(authDir, 0700); err != nil { - err = fmt.Errorf("failed to create temporaRy directory for dependency credentials: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory for dependency credentials: %w", err), + Reason: meta.FailedReason, + } } dm := chart.NewDependencyManager( - chart.WithRepositoryCallback(r.namespacedChartRepositoryCallback(ctx, authDir, c.GetNamespace())), + chart.WithRepositoryCallback(r.namespacedChartRepositoryCallback(ctx, authDir, obj.GetNamespace())), ) defer dm.Clear() // Configure builder options, including any previously cached chart opts := chart.BuildOptions{ - ValuesFiles: c.GetValuesFiles(), - Force: force, + ValuesFiles: obj.GetValuesFiles(), + Force: obj.Generation != obj.Status.ObservedGeneration, } - if artifact := c.Status.Artifact; artifact != nil { - opts.CachedChart = artifact.Path + if artifact := obj.Status.Artifact; artifact != nil { + opts.CachedChart = r.Storage.LocalPath(*artifact) } // Configure revision metadata for chart build if we should react to revision changes - if c.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { + if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision { rev := source.Revision - if c.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { + if obj.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { // Split the reference by the `/` delimiter which may be present, // and take the last entry which contains the SHA. split := strings.Split(source.Revision, "/") rev = split[len(split)-1] } - if kind := c.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { + if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { // The SemVer from the metadata is at times used in e.g. the label metadata for a resource // in a chart, which has a limited length of 63 characters. // To not fill most of this space with a full length SHA hex (40 characters for SHA-1, and @@ -477,68 +574,182 @@ func (r *HelmChartReconciler) fromTarballArtifact(ctx context.Context, source so if opts.VersionMetadata != "" { opts.VersionMetadata += "." } - opts.VersionMetadata += strconv.FormatInt(c.Generation, 10) + opts.VersionMetadata += strconv.FormatInt(obj.Generation, 10) } // Build chart cb := chart.NewLocalBuilder(dm) - b, err := cb.Build(ctx, chart.LocalReference{WorkDir: sourceDir, Path: chartPath}, filepath.Join(workDir, "chart.tgz"), opts) + build, err := cb.Build(ctx, chart.LocalReference{ + WorkDir: sourceDir, + Path: chartPath, + }, util.TempPathForObj("", ".tgz", obj), opts) if err != nil { - return sourcev1.HelmChartNotReady(c, reasonForBuildError(err), err.Error()), err + return sreconcile.ResultEmpty, err } - newArtifact := r.Storage.NewArtifactFor(c.Kind, c.GetObjectMeta(), b.Version, - fmt.Sprintf("%s-%s.tgz", b.Name, b.Version)) + *b = *build + return sreconcile.ResultSuccess, nil +} - // If the path of the returned build equals the cache path, - // there are no changes to the chart - if apimeta.IsStatusConditionTrue(c.Status.Conditions, meta.ReadyCondition) && - b.Path == opts.CachedChart { - // Ensure hostname is updated - if c.GetArtifact().URL != newArtifact.URL { - r.Storage.SetArtifactURL(c.GetArtifact()) - c.Status.URL = r.Storage.SetHostname(c.Status.URL) +// reconcileArtifact reconciles the given chart.Build to an v1beta1.Artifact in the Storage, and records it +// on the object. +func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmChart, b *chart.Build) (sreconcile.Result, error) { + // Without a complete chart build, there is little to reconcile + if !b.Complete() { + return sreconcile.ResultRequeue, nil + } + + // Always restore the conditions in case they got overwritten by transient errors + defer func() { + if obj.Status.ObservedChartName == b.Name && obj.GetArtifact().HasRevision(b.Version) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, reasonForBuild(b), b.Summary()) } - return c, nil + }() + + // Create artifact from build data + artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), b.Version, fmt.Sprintf("%s-%s.tgz", b.Name, b.Version)) + + // Return early if the build path equals the current artifact path + if curArtifact := obj.GetArtifact(); curArtifact != nil && r.Storage.LocalPath(*curArtifact) == b.Path { + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) + return sreconcile.ResultSuccess, nil } - // Ensure artifact directory exists - err = r.Storage.MkdirAll(newArtifact) - if err != nil { - err = fmt.Errorf("unable to create chart directory: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err - } + // Garbage collect chart build once persisted to storage + defer os.Remove(b.Path) - // Acquire a lock for the artifact - unlock, err := r.Storage.Lock(newArtifact) + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + unlock, err := r.Storage.Lock(artifact) if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } defer unlock() // Copy the packaged chart to the artifact path - if err = r.Storage.CopyFromPath(&newArtifact, b.Path); err != nil { - err = fmt.Errorf("failed to write chart package to storage: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + if err = r.Storage.CopyFromPath(&artifact, b.Path); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to copy Helm chart to storage: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } - // Update symlink - cUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", b.Name)) + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + obj.Status.ObservedChartName = b.Name + + // Publish an event + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, reasonForBuild(b), b.Summary()) + + // Update symlink on a "best effort" basis + symURL, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) } - - return sourcev1.HelmChartReady(c, newArtifact, cUrl, reasonForBuildSuccess(b), b.Summary()), nil + if symURL != "" { + obj.Status.URL = symURL + } + return sreconcile.ResultSuccess, nil } -// namespacedChartRepositoryCallback returns a chart.GetChartRepositoryCallback -// scoped to the given namespace. Credentials for retrieved v1beta1.HelmRepository -// objects are stored in the given directory. -// The returned callback returns a repository.ChartRepository configured with the -// retrieved v1beta1.HelmRepository, or a shim with defaults if no object could -// be found. +// getSource returns the v1beta1.Source for the given object, or an error describing why the source could not be +// returned. +func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmChart) (sourcev1.Source, error) { + namespacedName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SourceRef.Name, + } + var s sourcev1.Source + switch obj.Spec.SourceRef.Kind { + case sourcev1.HelmRepositoryKind: + var repo sourcev1.HelmRepository + if err := r.Client.Get(ctx, namespacedName, &repo); err != nil { + return nil, err + } + s = &repo + case sourcev1.GitRepositoryKind: + var repo sourcev1.GitRepository + if err := r.Client.Get(ctx, namespacedName, &repo); err != nil { + return nil, err + } + s = &repo + case sourcev1.BucketKind: + var bucket sourcev1.Bucket + if err := r.Client.Get(ctx, namespacedName, &bucket); err != nil { + return nil, err + } + s = &bucket + default: + return nil, fmt.Errorf("unsupported source kind '%s', must be one of: %v", obj.Spec.SourceRef.Kind, []string{ + sourcev1.HelmRepositoryKind, sourcev1.GitRepositoryKind, sourcev1.BucketKind}) + } + return s, nil +} + +// reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmChart) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return sreconcile.ResultEmpty, err + } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return sreconcile.ResultEmpty, nil +} + +// garbageCollect performs a garbage collection for the given v1beta1.HelmChart. It removes all but the current +// artifact, unless the deletion timestamp is set. Which will result in the removal of all artifacts for the +// resource. +func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), + Reason: "GarbageCollectionFailed", + } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil + } + if obj.GetArtifact() != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), + Reason: "GarbageCollectionFailed", + } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") + } + } + return nil +} + +// namespacedChartRepositoryCallback returns a chart.GetChartRepositoryCallback scoped to the given namespace. +// Credentials for retrieved v1beta1.HelmRepository objects are stored in the given directory. +// The returned callback returns a repository.ChartRepository configured with the retrieved v1beta1.HelmRepository, +// or a shim with defaults if no object could be found. func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Context, dir, namespace string) chart.GetChartRepositoryCallback { return func(url string) (*repository.ChartRepository, error) { repo, err := r.resolveDependencyRepository(ctx, url, namespace) @@ -559,9 +770,10 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont helmgetter.WithTimeout(repo.Spec.Timeout.Duration), helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), } - if secret, err := r.getHelmRepositorySecret(ctx, repo); err != nil { - return nil, err - } else if secret != nil { + if secret, err := r.getHelmRepositorySecret(ctx, repo); secret != nil || err != nil { + if err != nil { + return nil, err + } opts, err := getter.ClientOptionsFromSecret(dir, *secret) if err != nil { return nil, err @@ -579,109 +791,37 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont } } -func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, chart sourcev1.HelmChart) (ctrl.Result, error) { - // Our finalizer is still present, so lets handle garbage collection - if err := r.gc(chart); err != nil { - r.event(ctx, chart, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err +func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) { + listOpts := []client.ListOption{ + client.InNamespace(namespace), + client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url}, + client.Limit(1), } - - // Record deleted status - r.recordReadiness(ctx, chart) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&chart, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &chart); err != nil { - return ctrl.Result{}, err - } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil -} - -// resetStatus returns a modified v1beta1.HelmChart and a boolean indicating -// if the status field has been reset. -func (r *HelmChartReconciler) resetStatus(chart sourcev1.HelmChart) (sourcev1.HelmChart, bool) { - // We do not have an artifact, or it does no longer exist - if chart.GetArtifact() == nil || !r.Storage.ArtifactExist(*chart.GetArtifact()) { - chart = sourcev1.HelmChartProgressing(chart) - chart.Status.Artifact = nil - return chart, true - } - // The chart specification has changed - if chart.Generation != chart.Status.ObservedGeneration { - return sourcev1.HelmChartProgressing(chart), true - } - return chart, false -} - -// gc performs a garbage collection for the given v1beta1.HelmChart. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *HelmChartReconciler) gc(chart sourcev1.HelmChart) error { - if !chart.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(chart.Kind, chart.GetObjectMeta(), "", "*")) - } - if chart.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*chart.GetArtifact()) - } - return nil -} - -// event emits a Kubernetes event and forwards the event to notification -// controller if configured. -func (r *HelmChartReconciler) event(ctx context.Context, chart sourcev1.HelmChart, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&chart, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *HelmChartReconciler) recordReadiness(ctx context.Context, chart sourcev1.HelmChart) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &chart) + var list sourcev1.HelmRepositoryList + err := r.Client.List(ctx, &list, listOpts...) if err != nil { - log.Error(err, "unable to record readiness metric") - return + return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err) } - if rc := apimeta.FindStatusCondition(chart.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !chart.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !chart.DeletionTimestamp.IsZero()) + if len(list.Items) > 0 { + return &list.Items[0], nil } + return nil, fmt.Errorf("no HelmRepository found for '%s' in '%s' namespace", url, namespace) } -func (r *HelmChartReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmChartStatus) error { - var chart sourcev1.HelmChart - if err := r.Get(ctx, req.NamespacedName, &chart); err != nil { - return err +func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) { + if repository.Spec.SecretRef == nil { + return nil, nil } - - patch := client.MergeFrom(chart.DeepCopy()) - chart.Status = newStatus - - return r.Status().Patch(ctx, &chart, patch) + name := types.NamespacedName{ + Namespace: repository.GetNamespace(), + Name: repository.Spec.SecretRef.Name, + } + var secret corev1.Secret + err := r.Client.Get(ctx, name, &secret) + if err != nil { + return nil, err + } + return &secret, nil } func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string { @@ -704,40 +844,6 @@ func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string { return []string{fmt.Sprintf("%s/%s", hc.Spec.SourceRef.Kind, hc.Spec.SourceRef.Name)} } -func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) { - listOpts := []client.ListOption{ - client.InNamespace(namespace), - client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url}, - } - var list sourcev1.HelmRepositoryList - err := r.Client.List(ctx, &list, listOpts...) - if err != nil { - return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err) - } - if len(list.Items) > 0 { - return &list.Items[0], nil - } - return nil, fmt.Errorf("no HelmRepository found for '%s' in '%s' namespace", url, namespace) -} - -func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) { - if repository.Spec.SecretRef != nil { - name := types.NamespacedName{ - Namespace: repository.GetNamespace(), - Name: repository.Spec.SecretRef.Name, - } - - var secret corev1.Secret - err := r.Client.Get(ctx, name, &secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return nil, err - } - return &secret, nil - } - return nil, nil -} - func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) []reconcile.Request { repo, ok := o.(*sourcev1.HelmRepository) if !ok { @@ -756,13 +862,11 @@ func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) [ return nil } - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. var reqs []reconcile.Request for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + if i.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + } } return reqs } @@ -785,13 +889,11 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) [] return nil } - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. var reqs []reconcile.Request for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + if i.Status.ObservedSourceArtifactRevision != repo.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + } } return reqs } @@ -814,51 +916,68 @@ func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconci return nil } - // TODO(hidde): unlike other places (e.g. the helm-controller), - // we have no reference here to determine if the request is coming - // from the _old_ or _new_ update event, and resources are thus - // enqueued twice. var reqs []reconcile.Request for _, i := range list.Items { - reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + if i.Status.ObservedSourceArtifactRevision != bucket.GetArtifact().Revision { + reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)}) + } } return reqs } -func (r *HelmChartReconciler) recordSuspension(ctx context.Context, chart sourcev1.HelmChart) { - if r.MetricsRecorder == nil { - return - } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !chart.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) +// eventLogf records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) } else { - r.MetricsRecorder.RecordSuspend(*objRef, chart.Spec.Suspend) + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} + +// observeChartBuild records the observation on the given given build and error on the object. +func observeChartBuild(obj *sourcev1.HelmChart, build *chart.Build, err error) { + if build.HasMetadata() { + if build.Name != obj.Status.ObservedChartName || !obj.GetArtifact().HasRevision(build.Version) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewChart", build.Summary()) + } + } + + if build.Complete() { + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.Delete(obj, sourcev1.BuildFailedCondition) + } + + if err != nil { + var buildErr *chart.BuildError + if ok := errors.As(err, &buildErr); !ok { + buildErr = &chart.BuildError{ + Reason: chart.ErrUnknown, + Err: err, + } + } + + switch buildErr.Reason { + case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: + conditions.Delete(obj, sourcev1.FetchFailedCondition) + conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + default: + conditions.Delete(obj, sourcev1.BuildFailedCondition) + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, buildErr.Reason.Reason, buildErr.Error()) + } + return } } -func reasonForBuildError(err error) string { - var buildErr *chart.BuildError - if ok := errors.As(err, &buildErr); !ok { - return sourcev1.ChartPullFailedReason +func reasonForBuild(build *chart.Build) string { + if !build.Complete() { + return "" } - switch buildErr.Reason { - case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage: - return sourcev1.ChartPackageFailedReason - default: - return sourcev1.ChartPullFailedReason - } -} - -func reasonForBuildSuccess(result *chart.Build) string { - if result.Packaged { + if build.Packaged { return sourcev1.ChartPackageSucceededReason } return sourcev1.ChartPullSucceededReason diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index cb9838b1..b031e9d5 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -18,1320 +18,1466 @@ package controllers import ( "context" + "errors" "fmt" + "io" "net/http" - "net/url" "os" - "path" "path/filepath" + "reflect" "strings" + "testing" "time" - "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/gittestserver" - "github.com/fluxcd/pkg/helmtestserver" - "github.com/go-git/go-billy/v5/memfs" - "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage/memory" - . "github.com/onsi/ginkgo" + "github.com/darkowlzz/controller-check/status" . "github.com/onsi/gomega" - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chart/loader" - "helm.sh/helm/v3/pkg/chartutil" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/yaml" + "k8s.io/client-go/tools/record" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + "github.com/fluxcd/pkg/testserver" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/helm/chart" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) -var _ = Describe("HelmChartReconciler", func() { +func TestHelmChartReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 2 - pullInterval = time.Second * 3 + chartName = "helmchart" + chartVersion = "0.2.0" + chartPath = "testdata/charts/helmchart" ) - Context("HelmChart from HelmRepository", func() { - var ( - namespace *corev1.Namespace - helmServer *helmtestserver.HelmServer - err error - ) + server, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "helm-chart-test-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") + g.Expect(server.PackageChartWithVersion(chartPath, chartVersion)).To(Succeed()) + g.Expect(server.GenerateIndex()).To(Succeed()) - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - helmServer.Start() - }) + server.Start() + defer server.Stop() - AfterEach(func() { - helmServer.Stop() - os.RemoveAll(helmServer.Root()) + ns, err := testEnv.CreateNamespace(ctx, "helmchart") + g.Expect(err).ToNot(HaveOccurred()) + defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }() - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) + repository := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmRepositorySpec{ + URL: server.URL(), + }, + } + g.Expect(testEnv.CreateAndWait(ctx, repository)).To(Succeed()) - It("Creates artifacts for", func() { - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: ns.Name, + }, + Spec: sourcev1.HelmChartSpec{ + Chart: chartName, + Version: chartVersion, + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Kind: sourcev1.HelmRepositoryKind, + Name: repository.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - Expect(k8sClient.Create(context.Background(), &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, - }, - })).Should(Succeed()) + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) + // Wait for HelmChart to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) - By("Packaging a new chart version and regenerating the index") - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), "0.2.0")).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: helmChartReadyConditions.NegativePolarity} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) - By("Expecting new artifact revision and GC") - Eventually(func() bool { - now := &sourcev1.HelmChart{} - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) - When("Setting valid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "values.yaml", - "override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) - When("Setting invalid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "values.yaml", - "invalid.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) - When("Setting valid valuesFiles and valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "values.yaml" - updated.Spec.ValuesFiles = []string{ - "override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + // Wait for HelmChart to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "override.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting identical valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "duplicate.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - }) - - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "invalid.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - }) - - By("Expecting missing HelmRepository error") - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.SourceRef.Name = "invalid" - updated.Spec.ValuesFile = "" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, updated) - for _, c := range updated.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason && - strings.Contains(c.Message, "failed to retrieve source") { - return true +func TestHelmChartReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmChart, storage *Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err } } - return false - }, timeout, interval).Should(BeTrue()) - Expect(updated.Status.Artifact).ToNot(BeNil()) + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - By("Expecting to delete successfully") - got = &sourcev1.HelmChart{} - Eventually(func() error { - _ = k8sClient.Get(context.Background(), key, got) - return k8sClient.Delete(context.Background(), got) - }, timeout, interval).Should(Succeed()) - - By("Expecting delete to finish") - Eventually(func() error { - c := &sourcev1.HelmChart{} - return k8sClient.Get(context.Background(), key, c) - }, timeout, interval).ShouldNot(Succeed()) - - exists := func(path string) bool { - // wait for tmp sync on macOS - time.Sleep(time.Second) - _, err := os.Stat(path) - return err == nil + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } - By("Expecting GC on delete") - Eventually(exists(got.Status.Artifact.Path), timeout, interval).ShouldNot(BeTrue()) + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + got, err := r.reconcileStorage(context.TODO(), obj, nil) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } }) + } +} - It("Filters versions", func() { - versions := []string{"0.1.0", "0.1.1", "0.2.0", "0.3.0-rc.1", "1.0.0-alpha.1", "1.0.0"} - for k := range versions { - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), versions[k])).Should(Succeed()) - } +func TestHelmChartReconciler_reconcileSource(t *testing.T) { + g := NewWithT(t) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + tmpDir, err := os.MkdirTemp("", "reconcile-tarball-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - repository := &sourcev1.HelmRepository{ + storage, err := NewStorage(tmpDir, "example.com", timeout) + g.Expect(err).ToNot(HaveOccurred()) + + gitArtifact := &sourcev1.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(storage.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed()) + + tests := []struct { + name string + source sourcev1.Source + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build, obj sourcev1.HelmChart) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Observes Artifact revision and build result", + source: &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, + Name: "gitrepository", + Namespace: "default", }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: 1 * time.Hour}, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeTrue()) + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(BeARegularFile()) - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled 'helmchart' chart with version '0.1.0'"), + })) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Error on unavailable source", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: sourcev1.GitRepositoryKind, + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found"), + })) + }, + }, + { + name: "Stalling on unsupported source kind", + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "unavailable", + Kind: "Unsupported", + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("unsupported source kind 'Unsupported'")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: unsupported source kind"), + })) + }, + }, + //{ + // name: "Error on transient build error", + //}, + { + name: "Stalling on persistent build error", + source: &sourcev1.GitRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Name: "gitrepository", + Namespace: "default", }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: 1 * time.Hour}, + Status: sourcev1.GitRepositoryStatus{ + Artifact: gitArtifact, }, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Spec.ValuesFiles = []string{"invalid.yaml"} + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("values files merge error: no values file found at path")}, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ValuesFilesError", "values files merge error: no values file found at path"), + })) + }, + }, + { + name: "ResultRequeue when source artifact is unavailable", + source: &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "default", + }, + Status: sourcev1.GitRepositoryStatus{}, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{ + Name: "gitrepository", + Kind: sourcev1.GitRepositoryKind, + } + obj.Status.ObservedSourceArtifactRevision = "foo" + }, + want: sreconcile.ResultRequeue, + assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal("foo")) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available"), + })) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + clientBuilder := fake.NewClientBuilder() + if tt.source != nil { + clientBuilder.WithRuntimeObjects(tt.source) } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - Eventually(func() string { - _ = k8sClient.Get(context.Background(), key, chart) - if chart.Status.Artifact != nil { - return chart.Status.Artifact.Revision - } - return "" - }, timeout, interval).Should(Equal("1.0.0")) + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: storage, + } - chart.Spec.Version = "<0.2.0" - Expect(k8sClient.Update(context.Background(), chart)).Should(Succeed()) - Eventually(func() string { - _ = k8sClient.Get(context.Background(), key, chart) - if chart.Status.Artifact != nil { - return chart.Status.Artifact.Revision - } - return "" - }, timeout, interval).Should(Equal("0.1.1")) + obj := sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "chart", + Namespace: "default", + }, + Spec: sourcev1.HelmChartSpec{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(&obj) + } - chart.Spec.Version = "invalid" - Expect(k8sClient.Update(context.Background(), chart)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, chart) - for _, c := range chart.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(chart.GetArtifact()).NotTo(BeNil()) - Expect(chart.Status.Artifact.Revision).Should(Equal("0.1.1")) + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) + } + + got, err := r.reconcileSource(context.TODO(), &obj, &b) + + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, b, obj) + } }) + } +} - It("Authenticates when credentials are provided", func() { - helmServer.Stop() - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - helmServer.Start() +func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) { + g := NewWithT(t) - Expect(helmServer.PackageChartWithVersion(path.Join("testdata/charts/helmchart"), "0.1.0")).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + const ( + chartName = "helmchart" + chartVersion = "0.2.0" + higherChartVersion = "0.3.0" + chartPath = "testdata/charts/helmchart" + ) - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ + serverFactory, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(serverFactory.Root()) + + for _, ver := range []string{chartVersion, higherChartVersion} { + g.Expect(serverFactory.PackageChartWithVersion(chartPath, ver)).To(Succeed()) + } + g.Expect(serverFactory.GenerateIndex()).To(Succeed()) + + type options struct { + username string + password string + } + + tests := []struct { + name string + server options + secret *corev1.Secret + beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Reconciles chart build", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = "helmchart" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Reconciles chart build with repository credentials", + server: options{ + username: "foo", + password: "bar", + }, + secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, + Name: "auth", }, Data: map[string][]byte{ - "username": []byte(username), - "password": []byte(password), + "username": []byte("foo"), + "password": []byte("bar"), }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) + }, + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Uses artifact as build cache", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion + obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + }, + { + name: "Sets Generation as VersionMetadata with values files", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Spec.Chart = chartName + obj.Generation = 3 + obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(higherChartVersion + "+3")) + g.Expect(build.Path).ToNot(BeEmpty()) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Forces build on generation change", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + obj.Generation = 3 + obj.Spec.Chart = chartName + obj.Spec.Version = chartVersion - By("Creating repository and waiting for artifact") - repositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, + obj.Status.ObservedGeneration = 2 + obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"} + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Name).To(Equal(chartName)) + g.Expect(build.Version).To(Equal(chartVersion)) + g.Expect(build.Path).ToNot(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Event on unsuccessful secret retrieval", + beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "invalid", + } + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("failed to get secret 'invalid'")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret 'invalid'"), + })) + }, + }, + { + name: "Stalling on invalid client options", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "file://unsupported" // Unsupported protocol + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("scheme \"file\" not supported")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"), + })) + }, + }, + { + name: "Stalling on invalid repository URL", + beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) { + repository.Spec.URL = "://unsupported" // Invalid URL + }, + want: sreconcile.ResultEmpty, + wantErr: &serror.Stalling{Err: errors.New("missing protocol scheme")}, + assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "invalid Helm repository URL"), + })) + }, + }, + { + name: "BuildError on temporary build error", + beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) { + obj.Spec.Chart = "invalid" + }, + want: sreconcile.ResultEmpty, + wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + server := testserver.NewHTTPServer(serverFactory.Root()) + server.Start() + defer server.Stop() + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) + }) } + + clientBuilder := fake.NewClientBuilder() + if tt.secret != nil { + clientBuilder.WithObjects(tt.secret.DeepCopy()) + } + + storage, err := newTestStorage(server) + g.Expect(err).ToNot(HaveOccurred()) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + EventRecorder: record.NewFakeRecorder(32), + Getters: testGetters, + Storage: storage, + } + repository := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, + GenerateName: "helmrepository-", }, Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, + URL: server.URL(), + Timeout: &metav1.Duration{Duration: timeout}, + }, + Status: sourcev1.HelmRepositoryStatus{ + Artifact: &sourcev1.Artifact{ + Path: "index.yaml", }, - Interval: metav1.Duration{Duration: pullInterval}, }, } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), repositoryKey, repository) - return repository.Status.Artifact != nil - }, timeout, interval).Should(BeTrue()) - - By("Deleting secret before applying HelmChart") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - - By("Applying HelmChart") - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "helmchart", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.HelmRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, + GenerateName: "helmrepository-", }, + Spec: sourcev1.HelmChartSpec{}, } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - By("Expecting missing secret error") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason && - strings.Contains(c.Message, "auth secret error") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Applying secret with missing keys") - secret.ResourceVersion = "" - secret.Data["username"] = []byte{} - secret.Data["password"] = []byte{} - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - By("Expecting 401") - Eventually(func() bool { - got := &sourcev1.HelmChart{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.ChartPullFailedReason && - strings.Contains(c.Message, "401 Unauthorized") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Adding username key") - secret.Data["username"] = []byte(username) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - - By("Expecting missing field error") - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Adding password key") - secret.Data["password"] = []byte(password) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - - By("Expecting artifact") - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return apimeta.IsStatusConditionTrue(got.Status.Conditions, meta.ReadyCondition) - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ToNot(BeNil()) - }) - }) - - Context("HelmChart from GitRepository", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - err error - ) - - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "test-git-repository-" + randStringRunes(5)}, + if tt.beforeFunc != nil { + tt.beforeFunc(obj, repository) } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - Expect(gitServer.StartHTTP()).To(Succeed()) - }) - - AfterEach(func() { - gitServer.StopHTTP() - os.RemoveAll(gitServer.Root()) - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts" - Expect(filepath.Walk(chartDir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - switch { - case fi.Mode().IsDir(): - return fs.MkdirAll(p, os.ModeDir) - case !fi.Mode().IsRegular(): - return nil - } - - b, err := os.ReadFile(p) - if err != nil { - return err - } - - ff, err := fs.Create(p) - if err != nil { - return err - } - if _, err := ff.Write(b); err != nil { - return err - } - _ = ff.Close() - _, err = wt.Add(p) - - return err - })).To(Succeed()) - - _, err = wt.Commit("Helm charts", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) } - repository := &sourcev1.GitRepository{ + got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b) + + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) + + if tt.assertFunc != nil { + tt.assertFunc(g, obj, b) + } + }) + } +} + +func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) { + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "reconcile-tarball-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + storage, err := NewStorage(tmpDir, "example.com", timeout) + g.Expect(err).ToNot(HaveOccurred()) + + chartsArtifact := &sourcev1.Artifact{ + Revision: "mock-ref/abcdefg12345678", + Path: "mock.tgz", + } + g.Expect(storage.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed()) + yamlArtifact := &sourcev1.Artifact{ + Revision: "9876abcd", + Path: "values.yaml", + } + g.Expect(storage.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed()) + cachedArtifact := &sourcev1.Artifact{ + Revision: "0.1.0", + Path: "cached.tgz", + } + g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed()) + + tests := []struct { + name string + source sourcev1.Artifact + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr error + assertFunc func(g *WithT, build chart.Build) + cleanFunc func(g *WithT, build *chart.Build) + }{ + { + name: "Resolves chart dependencies and builds", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchartwithdeps" + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchartwithdeps")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.ResolvedDependencies).To(Equal(3)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ReconcileStrategyRevision sets VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+abcdefg12345")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "ValuesFiles sets Generation as VersionMetadata", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 3 + obj.Spec.Chart = "testdata/charts/helmchart" + obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind + obj.Spec.ValuesFiles = []string{ + filepath.Join(obj.Spec.Chart, "values.yaml"), + filepath.Join(obj.Spec.Chart, "override.yaml"), + } + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0+3")) + g.Expect(build.ResolvedDependencies).To(Equal(0)) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Chart from storage cache", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + }, + }, + { + name: "Generation change forces rebuild", + source: *chartsArtifact.DeepCopy(), + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Generation = 2 + obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz" + obj.Status.Artifact = cachedArtifact.DeepCopy() + obj.Status.ObservedGeneration = 1 + }, + want: sreconcile.ResultSuccess, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Name).To(Equal("helmchart")) + g.Expect(build.Version).To(Equal("0.1.0")) + g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy()))) + g.Expect(build.Path).To(BeARegularFile()) + }, + cleanFunc: func(g *WithT, build *chart.Build) { + g.Expect(os.Remove(build.Path)).To(Succeed()) + }, + }, + { + name: "Empty source artifact", + source: sourcev1.Artifact{}, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("no such file or directory")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + { + name: "Invalid artifact type", + source: *yamlArtifact, + want: sreconcile.ResultEmpty, + wantErr: &serror.Event{Err: errors.New("artifact untar error: requires gzip-compressed body")}, + assertFunc: func(g *WithT, build chart.Build) { + g.Expect(build.Complete()).To(BeFalse()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + Client: fake.NewClientBuilder().Build(), + EventRecorder: record.NewFakeRecorder(32), + Storage: storage, + Getters: testGetters, + } + + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, + Name: "artifact", + Namespace: "default", }, + Spec: sourcev1.HelmChartSpec{}, } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, + if tt.beforeFunc != nil { + tt.beforeFunc(obj) } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: "testdata/charts/helmchartwithdeps", - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, + + var b chart.Build + if tt.cleanFunc != nil { + defer tt.cleanFunc(g, &b) } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) + got, err := r.buildFromTarballArtifact(context.TODO(), obj, tt.source, &b) + g.Expect(err != nil).To(Equal(tt.wantErr != nil)) + if tt.wantErr != nil { + g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String())) + g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error())) + } + g.Expect(got).To(Equal(tt.want)) - By("Committing a new version in the chart metadata") - f, err := fs.OpenFile(fs.Join(chartDir, "helmchartwithdeps", chartutil.ChartfileName), os.O_RDWR, os.FileMode(0600)) - Expect(err).NotTo(HaveOccurred()) + if tt.assertFunc != nil { + tt.assertFunc(g, b) + } + }) + } +} - b := make([]byte, 2048) - n, err := f.Read(b) - Expect(err).NotTo(HaveOccurred()) - b = b[0:n] - - y := new(helmchart.Metadata) - err = yaml.Unmarshal(b, y) - Expect(err).NotTo(HaveOccurred()) - - y.Version = "0.2.0" - b, err = yaml.Marshal(y) - Expect(err).NotTo(HaveOccurred()) - - _, err = f.Write(b) - Expect(err).NotTo(HaveOccurred()) - - err = f.Close() - Expect(err).NotTo(HaveOccurred()) - - commit, err := wt.Commit("Chart version bump", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }, - All: true, - }) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("Expecting new artifact revision and GC") - now := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*now.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values).ToNot(BeNil()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) - - When("Setting reconcileStrategy to Revision", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Revision != updated.Status.Artifact.Revision && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact.Revision).To(ContainSubstring(updated.Status.Artifact.Revision)) - Expect(got.Status.Artifact.Revision).To(ContainSubstring(commit.String()[0:12])) - }) - - When("Setting valid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchart/values.yaml", - "./testdata/charts/helmchart/override.yaml", +func TestHelmChartReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + build *chart.Build + beforeFunc func(obj *sourcev1.HelmChart) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + afterFunc func(t *WithT, obj *sourcev1.HelmChart) + }{ + { + name: "Incomplete build requeues and does not update status", + build: &chart.Build{}, + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "Foo", ""), + }, + }, + { + name: "Copying artifact to storage from build makes Ready=True", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Up-to-date chart build does not persist artifact to storage", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "testdata/charts/helmchart-0.1.0.tgz", } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchart/values.yaml", - "./testdata/charts/helmchart/invalid.yaml", + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.ObservedChartName).To(BeEmpty()) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + }, + { + name: "Restores conditions in case artifact matches current chart build", + build: &chart.Build{ + Name: "helmchart", + Version: "0.1.0", + Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"), + Packaged: true, + }, + beforeFunc: func(obj *sourcev1.HelmChart) { + obj.Status.ObservedChartName = "helmchart" + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "0.1.0", + Path: "testdata/charts/helmchart-0.1.0.tgz", } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + }, + want: sreconcile.ResultSuccess, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz")) + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating new artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), + beforeFunc: func(obj *sourcev1.HelmChart) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) + t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a")) + t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0")) + t.Expect(obj.Status.URL).ToNot(BeEmpty()) + t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart")) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"), + afterFunc: func(t *WithT, obj *sourcev1.HelmChart) { + t.Expect(obj.GetArtifact()).ToNot(BeNil()) - When("Setting valid valuesFiles and valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchart/values.yaml" - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchart/override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"), + }, + }, + } - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchart/override.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - // Since a lot of chart updates took place above, checking - // artifact checksum isn't the most reliable way to find out - // if the artifact was changed due to the current update. - // Use status condition to be sure. - for _, condn := range got.Status.Conditions { - if strings.Contains(condn.Message, "with merged values files [./testdata/charts/helmchart/override.yaml]") && - storage.ArtifactExist(*got.Status.Artifact) { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchart/invalid.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - }) - - It("Creates artifacts with .tgz file", func() { - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts/helmchart" - helmChart, err := loader.LoadDir(chartDir) - Expect(err).NotTo(HaveOccurred()) - - chartPackagePath, err := os.MkdirTemp("", fmt.Sprintf("chartpackage-%s-%s", helmChart.Name(), randStringRunes(5))) - Expect(err).NotTo(HaveOccurred()) - defer os.RemoveAll(chartPackagePath) - - pkg, err := chartutil.Save(helmChart, chartPackagePath) - Expect(err).NotTo(HaveOccurred()) - - b, err := os.ReadFile(pkg) - Expect(err).NotTo(HaveOccurred()) - - tgz := filepath.Base(pkg) - ff, err := fs.Create(tgz) - Expect(err).NotTo(HaveOccurred()) - - _, err = ff.Write(b) - Expect(err).NotTo(HaveOccurred()) - - ff.Close() - _, err = wt.Add(tgz) - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Helm chart", &git.CommitOptions{Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }}) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } - repository := &sourcev1.GitRepository{ + + obj := &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, - }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, + GenerateName: "reconcile-artifact-", + Generation: 1, }, + Status: sourcev1.HelmChartStatus{}, + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, + got, err := r.reconcileArtifact(ctx, obj, tt.build) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + if tt.afterFunc != nil { + tt.afterFunc(g, obj) } - chart := &sourcev1.HelmChart{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmChartSpec{ - Chart: tgz, - Version: "*", - SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, - }, - Interval: metav1.Duration{Duration: pullInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) }) - }) + } +} - Context("HelmChart from GitRepository with HelmRepository dependency", func() { - var ( - namespace *corev1.Namespace - gitServer *gittestserver.GitServer - helmServer *helmtestserver.HelmServer - err error - ) +func TestHelmChartReconciler_getHelmRepositorySecret(t *testing.T) { + mock := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "secret", + Namespace: "foo", + }, + Data: map[string][]byte{ + "key": []byte("bar"), + }, + } + clientBuilder := fake.NewClientBuilder() + clientBuilder.WithObjects(mock) - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "test-git-repository-" + randStringRunes(5)}, - } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + } - gitServer, err = gittestserver.NewTempGitServer() - Expect(err).NotTo(HaveOccurred()) - gitServer.AutoCreate() - Expect(gitServer.StartHTTP()).To(Succeed()) - - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - helmServer.Start() - }) - - AfterEach(func() { - gitServer.StopHTTP() - os.RemoveAll(gitServer.Root()) - - helmServer.Stop() - os.RemoveAll(helmServer.Root()) - - err = k8sClient.Delete(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - helmServer.Stop() - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) - }) - }) - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ + tests := []struct { + name string + repository *sourcev1.HelmRepository + want *corev1.Secret + wantErr bool + }{ + { + name: "Existing secret reference", + repository: &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, - }, - StringData: map[string]string{ - "username": username, - "password": password, - }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - By("Creating repository and waiting for artifact") - helmRepositoryKey := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - helmRepository := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: helmRepositoryKey.Name, - Namespace: helmRepositoryKey.Namespace, + Namespace: mock.Namespace, }, Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, + Name: mock.Name, }, - Interval: metav1.Duration{Duration: pullInterval}, }, - } - Expect(k8sClient.Create(context.Background(), helmRepository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), helmRepository) - - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), helmRepositoryKey, helmRepository) - return helmRepository.Status.Artifact != nil - }, timeout, interval).Should(BeTrue()) - - fs := memfs.New() - gitrepo, err := git.Init(memory.NewStorage(), fs) - Expect(err).NotTo(HaveOccurred()) - - wt, err := gitrepo.Worktree() - Expect(err).NotTo(HaveOccurred()) - - u, err := url.Parse(gitServer.HTTPAddress()) - Expect(err).NotTo(HaveOccurred()) - u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5))) - - _, err = gitrepo.CreateRemote(&config.RemoteConfig{ - Name: "origin", - URLs: []string{u.String()}, - }) - Expect(err).NotTo(HaveOccurred()) - - chartDir := "testdata/charts/helmchartwithdeps" - Expect(filepath.Walk(chartDir, func(p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - switch { - case fi.Mode().IsDir(): - return fs.MkdirAll(p, os.ModeDir) - case !fi.Mode().IsRegular(): - return nil - } - - b, err := os.ReadFile(p) - if err != nil { - return err - } - - ff, err := fs.Create(p) - if err != nil { - return err - } - if _, err := ff.Write(b); err != nil { - return err - } - _ = ff.Close() - _, err = wt.Add(p) - - return err - })).To(Succeed()) - - By("Configuring the chart dependency") - filePath := fs.Join(chartDir, chartutil.ChartfileName) - f, err := fs.OpenFile(filePath, os.O_RDWR, os.FileMode(0600)) - Expect(err).NotTo(HaveOccurred()) - - b := make([]byte, 2048) - n, err := f.Read(b) - Expect(err).NotTo(HaveOccurred()) - b = b[0:n] - - err = f.Close() - Expect(err).NotTo(HaveOccurred()) - - y := new(helmchart.Metadata) - err = yaml.Unmarshal(b, y) - Expect(err).NotTo(HaveOccurred()) - - y.Dependencies = []*helmchart.Dependency{ - { - Name: "helmchart", - Version: ">=0.1.0", - Repository: helmRepository.Spec.URL, + }, + want: mock, + }, + { + name: "Empty secret reference", + repository: &sourcev1.HelmRepository{ + Spec: sourcev1.HelmRepositorySpec{ + SecretRef: nil, }, - } - - b, err = yaml.Marshal(y) - Expect(err).NotTo(HaveOccurred()) - - ff, err := fs.Create(filePath) - Expect(err).NotTo(HaveOccurred()) - - _, err = ff.Write(b) - Expect(err).NotTo(HaveOccurred()) - - err = ff.Close() - Expect(err).NotTo(HaveOccurred()) - - _, err = wt.Commit("Helm charts", &git.CommitOptions{ - Author: &object.Signature{ - Name: "John Doe", - Email: "john@example.com", - When: time.Now(), - }, - All: true, - }) - Expect(err).NotTo(HaveOccurred()) - - err = gitrepo.Push(&git.PushOptions{}) - Expect(err).NotTo(HaveOccurred()) - - repositoryKey := types.NamespacedName{ - Name: fmt.Sprintf("git-repository-sample-%s", randStringRunes(5)), - Namespace: namespace.Name, - } - repository := &sourcev1.GitRepository{ + }, + want: nil, + }, + { + name: "Error on client error", + repository: &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: repositoryKey.Name, - Namespace: repositoryKey.Namespace, + Namespace: "different", }, - Spec: sourcev1.GitRepositorySpec{ - URL: u.String(), - Interval: metav1.Duration{Duration: indexInterval}, + Spec: sourcev1.HelmRepositorySpec{ + SecretRef: &meta.LocalObjectReference{ + Name: mock.Name, + }, }, - } - Expect(k8sClient.Create(context.Background(), repository)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), repository) + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - key := types.NamespacedName{ - Name: "helmchart-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - chart := &sourcev1.HelmChart{ + got, err := r.getHelmRepositorySecret(context.TODO(), tt.repository) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + }) + } +} + +func TestHelmChartReconciler_getSource(t *testing.T) { + mocks := []client.Object{ + &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, + APIVersion: "source.toolkit.fluxcd.io/v1beta2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "helmrepository", + Namespace: "foo", + }, + }, + &sourcev1.GitRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.GitRepositoryKind, + APIVersion: "source.toolkit.fluxcd.io/v1beta2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "gitrepository", + Namespace: "foo", + }, + }, + &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + APIVersion: "source.toolkit.fluxcd.io/v1beta2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Namespace: "foo", + }, + }, + } + clientBuilder := fake.NewClientBuilder() + clientBuilder.WithObjects(mocks...) + + r := &HelmChartReconciler{ + Client: clientBuilder.Build(), + } + + tests := []struct { + name string + obj *sourcev1.HelmChart + want sourcev1.Source + wantErr bool + }{ + { + name: "Get HelmRepository source for reference", + obj: &sourcev1.HelmChart{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Namespace: mocks[0].GetNamespace(), }, Spec: sourcev1.HelmChartSpec{ - Chart: "testdata/charts/helmchartwithdeps", - Version: "*", SourceRef: sourcev1.LocalHelmChartSourceReference{ - Kind: sourcev1.GitRepositoryKind, - Name: repositoryKey.Name, + Name: mocks[0].GetName(), + Kind: mocks[0].GetObjectKind().GroupVersionKind().Kind, }, - Interval: metav1.Duration{Duration: pullInterval}, + }, + }, + want: mocks[0].(sourcev1.Source), + }, + { + name: "Get GitRepository source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[1].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[1].GetName(), + Kind: mocks[1].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[1].(sourcev1.Source), + }, + { + name: "Get Bucket source for reference", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[2].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[2].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + want: mocks[2].(sourcev1.Source), + }, + { + name: "Error on client error", + obj: &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: mocks[2].GetNamespace(), + }, + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: mocks[1].GetName(), + Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind, + }, + }, + }, + wantErr: true, + }, + { + name: "Error on unsupported source kind", + obj: &sourcev1.HelmChart{ + Spec: sourcev1.HelmChartSpec{ + SourceRef: sourcev1.LocalHelmChartSourceReference{ + Name: "unsupported", + Kind: "Unsupported", + }, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := r.getSource(context.TODO(), tt.obj) + + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + g.Expect(got).To(BeNil()) + return + } + + g.Expect(got).To(Equal(tt.want)) + g.Expect(err).ToNot(HaveOccurred()) + }) + } +} + +func TestHelmChartReconciler_reconcileDelete(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + Name: "reconcile-delete-", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{ + sourcev1.SourceFinalizer, + }, + }, + Status: sourcev1.HelmChartStatus{}, + } + + artifact := testStorage.NewArtifactFor(sourcev1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt") + obj.Status.Artifact = &artifact + + got, err := r.reconcileDelete(ctx, obj) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(got).To(Equal(sreconcile.ResultEmpty)) + g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse()) + g.Expect(obj.Status.Artifact).To(BeNil()) +} + +func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmChartReconcilerFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcilerFunc { + return func(_ context.Context, _ *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + return r, e + } + } + + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmChartReconcilerFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmChartReconcilerFunc{ + func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil + }, + func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") + return sreconcile.ResultSuccess, nil + }, + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmChartReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmChartReconciler{} + obj := &sourcev1.HelmChart{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Status: sourcev1.HelmChartStatus{ + ObservedGeneration: tt.observedGeneration, }, } - Expect(k8sClient.Create(context.Background(), chart)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), chart) - By("Expecting artifact") - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeFalse()) + got, err := r.reconcile(context.TODO(), obj, tt.reconcileFuncs) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.wantResult)) - When("Setting valid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchartwithdeps/values.yaml", - "./testdata/charts/helmchartwithdeps/override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFiles attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchartwithdeps/values.yaml", - "./testdata/charts/helmchartwithdeps/invalid.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFiles and valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchartwithdeps/values.yaml" - updated.Spec.ValuesFiles = []string{ - "./testdata/charts/helmchartwithdeps/override.yaml", - } - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(helmChart.Values["testDefault"]).To(BeTrue()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting valid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchartwithdeps/override.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) - - When("Setting invalid valuesFile attribute", func() { - updated := &sourcev1.HelmChart{} - Expect(k8sClient.Get(context.Background(), key, updated)).To(Succeed()) - updated.Spec.ValuesFile = "./testdata/charts/helmchartwithdeps/invalid.yaml" - updated.Spec.ValuesFiles = []string{} - Expect(k8sClient.Update(context.Background(), updated)).To(Succeed()) - got := &sourcev1.HelmChart{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) - Expect(err).NotTo(HaveOccurred()) - _, exists := helmChart.Values["testDefault"] - Expect(exists).To(BeFalse()) - Expect(helmChart.Values["testOverride"]).To(BeTrue()) - }) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) }) - }) -}) + } +} + +func mockChartBuild(name, version, path string) *chart.Build { + var copyP string + if path != "" { + f, err := os.Open(path) + if err == nil { + defer f.Close() + ff, err := os.CreateTemp("", "chart-mock-*.tgz") + if err == nil { + defer ff.Close() + if _, err = io.Copy(ff, f); err == nil { + copyP = ff.Name() + } + } + } + } + return &chart.Build{ + Name: name, + Version: version, + Path: copyP, + } +} diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index d82bdad6..bfdce295 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -18,19 +18,18 @@ package controllers import ( "context" + "errors" "fmt" "net/url" "os" "time" + "github.com/docker/go-units" helmgetter "helm.sh/helm/v3/pkg/getter" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -38,15 +37,45 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/repository" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" + "github.com/fluxcd/source-controller/internal/reconcile/summarize" ) +// helmRepoReadyConditions contains all the conditions information needed +// for HelmRepository Ready status conditions summary calculation. +var helmRepoReadyConditions = summarize.Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete @@ -55,18 +84,23 @@ import ( // HelmRepositoryReconciler reconciles a HelmRepository object type HelmRepositoryReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - Getters helmgetter.Providers - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Getters helmgetter.Providers + Storage *Storage + ControllerName string } type HelmRepositoryReconcilerOptions struct { MaxConcurrentReconciles int } +// helmRepoReconcilerFunc is the function type for all the helm repository +// reconciler functions. The reconciler functions are grouped together and +// executed serially to perform the main operation of the reconciler. +type helmRepoReconcilerFunc func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) + func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) } @@ -79,327 +113,407 @@ func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, Complete(r) } -func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var repository sourcev1.HelmRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { + // Fetch the HelmRepository + obj := &sourcev1.HelmRepository{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, repository) + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(repository.DeepCopy()) - controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &repository, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !repository.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, repository) - } - - // Return early if the object is suspended. - if repository.Spec.Suspend { - log.Info("Reconciliation is suspended for this object") + // Return early if the object is suspended + if obj.Spec.Suspend { + log.Info("reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - return ctrl.Result{}, err + // Initialize the patch helper with the current version of the object. + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err + } + + // recResult stores the abstracted reconcile result. + var recResult sreconcile.Result + + // Always attempt to patch the object after each reconciliation. + // NOTE: The final runtime result and error are set in this block. + defer func() { + summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper) + summarizeOpts := []summarize.Option{ + summarize.WithConditions(helmRepoReadyConditions), + summarize.WithReconcileResult(recResult), + summarize.WithReconcileError(retErr), + summarize.WithIgnoreNotFound(), + summarize.WithProcessors( + summarize.RecordContextualError, + summarize.RecordReconcileReq, + ), + summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetInterval().Duration}), + summarize.WithPatchFieldOwner(r.ControllerName), } - defer r.MetricsRecorder.RecordDuration(*objRef, start) + result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Add finalizer first if not exist to avoid the race condition + // between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue + return } - // set initial status - if resetRepository, ok := r.resetStatus(repository); ok { - repository = resetRepository - if err := r.updateStatus(ctx, req, repository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, repository) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + recResult, retErr = r.reconcileDelete(ctx, obj) + return } - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok { - repository.Status.SetLastHandledReconcileRequest(v) + // Reconcile actual object + reconcilers := []helmRepoReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, } - - // purge old artifacts from storage - if err := r.gc(repository); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile repository by downloading the index.yaml file - reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledRepository) - return ctrl.Result{Requeue: true}, reconcileErr - } - - // emit revision change event - if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision { - r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.HelmRepositoryReadyMessage(reconciledRepository)) - } - r.recordReadiness(ctx, reconciledRepository) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - repository.GetInterval().Duration.String(), - )) - - return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil + recResult, retErr = r.reconcile(ctx, obj, reconcilers) + return } -func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, repo sourcev1.HelmRepository) (sourcev1.HelmRepository, error) { - log := ctrl.LoggerFrom(ctx) - clientOpts := []helmgetter.Option{ - helmgetter.WithURL(repo.Spec.URL), - helmgetter.WithTimeout(repo.Spec.Timeout.Duration), - helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials), +// reconcile iterates through the sub-reconcilers and processes the source +// object. The sub-reconcilers are run sequentially. The result and error of +// the sub-reconciliation are collected and returned. For multiple results +// from different sub-reconcilers, the results are combined to return the +// result with the shortest requeue period. +func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository, reconcilers []helmRepoReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation) } - if repo.Spec.SecretRef != nil { + + var chartRepo repository.ChartRepository + var artifact sourcev1.Artifact + + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &artifact, &chartRepo) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result for successful results. + res = sreconcile.LowestRequeuingResult(res, recResult) + } + return res, resErr +} + +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage") + return sreconcile.ResultSuccess, nil + } + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return sreconcile.ResultSuccess, nil +} + +// reconcileSource ensures the upstream Helm repository can be reached and downloaded out using the declared +// configuration, and stores a new artifact in the storage. +// +// The Helm repository index is downloaded using the defined configuration, and in case of an error during this process +// (including transient errors), it records v1beta1.FetchFailedCondition=True and returns early. +// If the download is successful, the given artifact pointer is set to a new artifact with the available metadata, and +// the index pointer is set to the newly downloaded index. +func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + // Configure Helm client to access repository + clientOpts := []helmgetter.Option{ + helmgetter.WithTimeout(obj.Spec.Timeout.Duration), + helmgetter.WithURL(obj.Spec.URL), + helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials), + } + + // Configure any authentication related options + if obj.Spec.SecretRef != nil { + // Attempt to retrieve secret name := types.NamespacedName{ - Namespace: repo.GetNamespace(), - Name: repo.Spec.SecretRef.Name, + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, } - var secret corev1.Secret - err := r.Client.Get(ctx, name, &secret) - if err != nil { - err = fmt.Errorf("auth secret error: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err - } - - authDir, err := os.MkdirTemp("", repo.Kind+"-"+repo.Namespace+"-"+repo.Name+"-") - if err != nil { - err = fmt.Errorf("failed to create temporary working directory for credentials: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err - } - defer func() { - if err := os.RemoveAll(authDir); err != nil { - log.Error(err, "failed to remove working directory", "path", authDir) + if err := r.Client.Get(ctx, name, &secret); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err), + Reason: sourcev1.AuthenticationFailedReason, } - }() + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } - opts, err := getter.ClientOptionsFromSecret(authDir, secret) + // Get client options from secret + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-auth-", obj.Name, obj.Namespace)) if err != nil { - err = fmt.Errorf("auth options error: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory for credentials: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + defer os.RemoveAll(tmpDir) + + // Construct actual options + opts, err := getter.ClientOptionsFromSecret(tmpDir, secret) + if err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) + // Return err as the content of the secret may change. + return sreconcile.ResultEmpty, e } clientOpts = append(clientOpts, opts...) } - chartRepo, err := repository.NewChartRepository(repo.Spec.URL, "", r.Getters, clientOpts) + // Construct Helm chart repository with options and download index + newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, clientOpts) if err != nil { switch err.(type) { case *url.Error: - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.URLInvalidReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("invalid Helm repository URL: %w", err), + Reason: sourcev1.URLInvalidReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, e.Err.Error()) + return sreconcile.ResultEmpty, e default: - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err + e := &serror.Stalling{ + Err: fmt.Errorf("failed to construct Helm client: %w", err), + Reason: meta.FailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } - checksum, err := chartRepo.CacheIndex() + checksum, err := newChartRepo.CacheIndex() if err != nil { - err = fmt.Errorf("failed to download repository index: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err + e := &serror.Event{ + Err: fmt.Errorf("failed to fetch Helm repository index: %w", err), + Reason: meta.FailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, meta.FailedReason, e.Err.Error()) + // Coin flip on transient or persistent error, return error and hope for the best + return sreconcile.ResultEmpty, e } - defer chartRepo.RemoveCache() + *chartRepo = *newChartRepo - artifact := r.Storage.NewArtifactFor(repo.Kind, - repo.ObjectMeta.GetObjectMeta(), - "", + // Load the cached repository index to ensure it passes validation. + if err := chartRepo.LoadFromCache(); err != nil { + e := &serror.Event{ + Err: fmt.Errorf("failed to load Helm repository from cache: %w", err), + Reason: sourcev1.FetchFailedCondition, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.IndexationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e + } + defer chartRepo.Unload() + + // Mark observations about the revision on the object. + if !obj.GetArtifact().HasRevision(checksum) { + message := fmt.Sprintf("new index revision '%s'", checksum) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) + } + + conditions.Delete(obj, sourcev1.FetchFailedCondition) + + // Create potential new artifact. + *artifact = r.Storage.NewArtifactFor(obj.Kind, + obj.ObjectMeta.GetObjectMeta(), + chartRepo.Checksum, fmt.Sprintf("index-%s.yaml", checksum)) - // Return early on unchanged index - if apimeta.IsStatusConditionTrue(repo.Status.Conditions, meta.ReadyCondition) && - (repo.GetArtifact() != nil && repo.GetArtifact().Checksum == checksum) { - if artifact.URL != repo.GetArtifact().URL { - r.Storage.SetArtifactURL(repo.GetArtifact()) - repo.Status.URL = r.Storage.SetHostname(repo.Status.URL) + return sreconcile.ResultSuccess, nil +} + +// reconcileArtifact stores a new artifact in the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact does not differ from the object's current, it returns early. +// On a successful write of a new artifact, the artifact in the status of the given object is set, and the symlink in +// the storage is updated to its path. +func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error. + defer func() { + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "stored artifact for revision '%s'", artifact.Revision) } - return repo, nil + + if err := chartRepo.RemoveCache(); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary cached index file") + } + }() + + if obj.GetArtifact().HasRevision(artifact.Revision) { + ctrl.LoggerFrom(ctx).Info("artifact up-to-date", "revision", artifact.Revision) + return sreconcile.ResultSuccess, nil } - // Load the cached repository index to ensure it passes validation - if err := chartRepo.LoadFromCache(); err != nil { - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err - } - // The repository checksum is the SHA256 of the loaded bytes, after sorting - artifact.Revision = chartRepo.Checksum - chartRepo.Unload() + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new index revision '%s'", artifact.Revision) // Create artifact dir - err = r.Storage.MkdirAll(artifact) - if err != nil { - err = fmt.Errorf("unable to create repository index directory: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + if err := r.Storage.MkdirAll(*artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } - // Acquire lock - unlock, err := r.Storage.Lock(artifact) + // Acquire lock. + unlock, err := r.Storage.Lock(*artifact) if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, + } } defer unlock() - // Save artifact to storage - if err = r.Storage.CopyFromPath(&artifact, chartRepo.CachePath); err != nil { - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + // Save artifact to storage. + if err = r.Storage.CopyFromPath(artifact, chartRepo.CachePath); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to save artifact to storage: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } - // Update index symlink - indexURL, err := r.Storage.Symlink(artifact, "index.yaml") + // Calculate the artifact size to be included in the NewArtifact event. + fi, err := os.Stat(chartRepo.CachePath) if err != nil { - err = fmt.Errorf("storage error: %w", err) - return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to read the artifact: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } + } + size := units.HumanSize(float64(fi.Size())) + + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, "NewArtifact", "fetched index of size %s from '%s'", size, chartRepo.URL) + + // Record it on the object. + obj.Status.Artifact = artifact.DeepCopy() + + // Update index symlink. + indexURL, err := r.Storage.Symlink(*artifact, "index.yaml") + if err != nil { + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) } - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.HelmRepositoryReady(repo, artifact, indexURL, sourcev1.IndexationSucceededReason, message), nil + if indexURL != "" { + obj.Status.URL = indexURL + } + return sreconcile.ResultSuccess, nil } -func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.HelmRepository) (ctrl.Result, error) { - // Our finalizer is still present, so lets handle garbage collection - if err := r.gc(repository); err != nil { - r.event(ctx, repository, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) +// reconcileDelete handles the delete of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (sreconcile.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } - // Record deleted status - r.recordReadiness(ctx, repository) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &repository); err != nil { - return ctrl.Result{}, err - } + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, nil } -// resetStatus returns a modified v1beta1.HelmRepository and a boolean indicating -// if the status field has been reset. -func (r *HelmRepositoryReconciler) resetStatus(repository sourcev1.HelmRepository) (sourcev1.HelmRepository, bool) { - // We do not have an artifact, or it does no longer exist - if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) { - repository = sourcev1.HelmRepositoryProgressing(repository) - repository.Status.Artifact = nil - return repository, true +// garbageCollect performs a garbage collection for the given v1beta1.HelmRepository. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { + if !obj.DeletionTimestamp.IsZero() { + if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err), + Reason: "GarbageCollectionFailed", + } + } else if deleted != "" { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") + } + obj.Status.Artifact = nil + return nil } - if repository.Generation != repository.Status.ObservedGeneration { - return sourcev1.HelmRepositoryProgressing(repository), true - } - return repository, false -} - -// gc performs a garbage collection for the given v1beta1.HelmRepository. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *HelmRepositoryReconciler) gc(repository sourcev1.HelmRepository) error { - if !repository.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*")) - } - if repository.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*repository.GetArtifact()) + if obj.GetArtifact() != nil { + if deleted, err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %w", err), + Reason: "GarbageCollectionFailed", + } + } else if len(deleted) > 0 { + r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded", + "garbage collected old artifacts") + } } return nil } -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *HelmRepositoryReconciler) event(ctx context.Context, repository sourcev1.HelmRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) - } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } - } -} - -func (r *HelmRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.HelmRepository) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero()) +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *HelmRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !repository.DeletionTimestamp.IsZero()) - } -} - -func (r *HelmRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmRepositoryStatus) error { - var repository sourcev1.HelmRepository - if err := r.Get(ctx, req.NamespacedName, &repository); err != nil { - return err - } - - patch := client.MergeFrom(repository.DeepCopy()) - repository.Status = newStatus - - return r.Status().Patch(ctx, &repository, patch) -} - -func (r *HelmRepositoryReconciler) recordSuspension(ctx context.Context, hr sourcev1.HelmRepository) { - if r.MetricsRecorder == nil { - return - } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &hr) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !hr.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, hr.Spec.Suspend) + ctrl.LoggerFrom(ctx).Info(msg) } + r.Eventf(obj, eventType, reason, msg) } diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index e7d945a6..137df58f 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -18,389 +18,730 @@ package controllers import ( "context" + "fmt" "net/http" "os" - "path" + "path/filepath" "strings" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "testing" + "github.com/darkowlzz/controller-check/status" "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/helmtestserver" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/source-controller/internal/helm/repository" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) -var _ = Describe("HelmRepositoryReconciler", func() { +func TestHelmRepositoryReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) - const ( - timeout = time.Second * 30 - interval = time.Second * 1 - indexInterval = time.Second * 2 - repositoryTimeout = time.Second * 5 - ) + testServer, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(testServer.Root()) - Context("HelmRepository", func() { - var ( - namespace *corev1.Namespace - helmServer *helmtestserver.HelmServer - err error - ) + g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(testServer.GenerateIndex()).To(Succeed()) - BeforeEach(func() { - namespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{Name: "helm-repository-" + randStringRunes(5)}, + testServer.Start() + defer testServer.Stop() + + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "helmrepository-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + URL: testServer.URL(), + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for HelmRepository to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) && obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return readyCondition.Status == metav1.ConditionTrue && + obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: helmRepoReadyConditions.NegativePolarity} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + + // kstatus client conformance check. + u, err := patch.ToUnstructured(obj) + g.Expect(err).ToNot(HaveOccurred()) + res, err := kstatus.Compute(u) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(res.Status).To(Equal(kstatus.CurrentStatus)) + + // Patch the object with reconcile request annotation. + patchHelper, err := patch.NewHelper(obj, testEnv.Client) + g.Expect(err).ToNot(HaveOccurred()) + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred()) + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return obj.Status.LastHandledReconcileAt == "now" + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for HelmRepository to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error + want sreconcile.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string + }{ + { + name: "garbage collects", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + want: sreconcile.ResultSuccess, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/invalid.txt", + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + want: sreconcile.ResultSuccess, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, } - err = k8sClient.Create(context.Background(), namespace) - Expect(err).NotTo(HaveOccurred(), "failed to create test namespace") - helmServer, err = helmtestserver.NewTempHelmServer() - Expect(err).To(Succeed()) - }) - - AfterEach(func() { - helmServer.Stop() - os.RemoveAll(helmServer.Root()) - - Eventually(func() error { - return k8sClient.Delete(context.Background(), namespace) - }, timeout, interval).Should(Succeed(), "failed to delete test namespace") - }) - - It("Creates artifacts for", func() { - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, - Timeout: &metav1.Duration{Duration: repositoryTimeout}, + GenerateName: "test-", }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } - By("Expecting artifact") - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) + var chartRepo repository.ChartRepository + var artifact sourcev1.Artifact - By("Updating the chart index") - // Regenerating the index is sufficient to make the revision change - Expect(helmServer.GenerateIndex()).Should(Succeed()) + got, err := r.reconcileStorage(context.TODO(), obj, &artifact, &chartRepo) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) - By("Expecting revision change and GC") - Eventually(func() bool { - now := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, now) - // Test revision change and garbage collection - return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) - updated := &sourcev1.HelmRepository{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.URL = "invalid#url?" - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, updated) - for _, c := range updated.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason { - return true - } + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue } - return false - }, timeout, interval).Should(BeTrue()) - Expect(updated.Status.Artifact).ToNot(BeNil()) - - By("Expecting to delete successfully") - got = &sourcev1.HelmRepository{} - Eventually(func() error { - _ = k8sClient.Get(context.Background(), key, got) - return k8sClient.Delete(context.Background(), got) - }, timeout, interval).Should(Succeed()) - - By("Expecting delete to finish") - Eventually(func() error { - r := &sourcev1.HelmRepository{} - return k8sClient.Get(context.Background(), key, r) - }, timeout, interval).ShouldNot(Succeed()) - - exists := func(path string) bool { - // wait for tmp sync on macOS - time.Sleep(time.Second) - _, err := os.Stat(path) - return err == nil + g.Expect(absoluteP).NotTo(BeAnExistingFile()) } - - By("Expecting GC after delete") - Eventually(exists(got.Status.Artifact.Path), timeout, interval).ShouldNot(BeTrue()) }) + } +} - It("Handles timeout", func() { - helmServer.Start() +func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) { + type options struct { + username string + password string + publicKey []byte + privateKey []byte + ca []byte + } - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ + tests := []struct { + name string + protocol string + server options + secret *corev1.Secret + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository) + afterFunc func(t *WithT, obj *sourcev1.HelmRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "HTTP without secretRef makes ArtifactOutdated=True", + protocol: "http", + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + }, + }, + { + name: "HTTP with Basic Auth secret makes ArtifactOutdated=True", + protocol: "http", + server: options{ + username: "git", + password: "1234", + }, + secret: &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + Name: "basic-auth", }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - Interval: metav1.Duration{Duration: indexInterval}, + Data: map[string][]byte{ + "username": []byte("git"), + "password": []byte("1234"), }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + }, + }, + { + name: "HTTPS with CAFile secret makes ArtifactOutdated=True", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ca-file", + }, + Data: map[string][]byte{ + "caFile": tlsCA, + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + }, + }, + { + name: "HTTPS with invalid CAFile secret makes FetchFailed=True and returns error", + protocol: "https", + server: options{ + publicKey: tlsPublicKey, + privateKey: tlsPrivateKey, + ca: tlsCA, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-ca", + }, + Data: map[string][]byte{ + "caFile": []byte("invalid"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "can't create TLS config for client: failed to append certificates from file"), + }, + }, + { + name: "Invalid URL makes FetchFailed=True and returns stalling error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"), + }, + }, + { + name: "Unsupported scheme makes FetchFailed=True and returns stalling error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://") + }, + want: sreconcile.ResultEmpty, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"), + }, + }, + { + name: "Missing secret returns FetchFailed=True and returns error", + protocol: "http", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secrets \"non-existing\" not found"), + }, + }, + { + name: "Malformed secret returns FetchFailed=True and returns error", + protocol: "http", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "malformed-basic-auth", + }, + Data: map[string][]byte{ + "username": []byte("git"), + }, + }, + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"} + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "required fields 'username' and 'password"), + }, + }, + } - By("Expecting index download to succeed") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, condition := range got.Status.Conditions { - if condition.Reason == sourcev1.IndexationSucceededReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) + for _, tt := range tests { + obj := &sourcev1.HelmRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "auth-strategy-", + }, + Spec: sourcev1.HelmRepositorySpec{ + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: interval}, + }, + } - By("Expecting index download to timeout") - updated := &sourcev1.HelmRepository{} - Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) - updated.Spec.Timeout = &metav1.Duration{Duration: time.Microsecond} - Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) - Eventually(func() string { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, condition := range got.Status.Conditions { - if condition.Reason == sourcev1.IndexationFailedReason { - return condition.Message - } - } - return "" - }, timeout, interval).Should(MatchRegexp("(?i)timeout")) - }) + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - It("Authenticates when basic auth credentials are provided", func() { - var username, password = "john", "doe" - helmServer.WithMiddleware(func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - u, p, ok := r.BasicAuth() - if !ok || username != u || password != p { - w.WriteHeader(401) - return - } - handler.ServeHTTP(w, r) + server, err := helmtestserver.NewTempHelmServer() + g.Expect(err).NotTo(HaveOccurred()) + defer os.RemoveAll(server.Root()) + + g.Expect(server.PackageChart("testdata/charts/helmchart")).To(Succeed()) + g.Expect(server.GenerateIndex()).To(Succeed()) + + if len(tt.server.username+tt.server.password) > 0 { + server.WithMiddleware(func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != tt.server.username || p != tt.server.password { + w.WriteHeader(401) + return + } + handler.ServeHTTP(w, r) + }) }) - }) - helmServer.Start() - - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) - - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, + + secret := tt.secret.DeepCopy() + switch tt.protocol { + case "http": + server.Start() + defer server.Stop() + obj.Spec.URL = server.URL() + case "https": + g.Expect(server.StartTLS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed()) + defer server.Stop() + obj.Spec.URL = server.URL() + default: + t.Fatalf("unsupported protocol %q", tt.protocol) + } + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj) + } + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()) + if secret != nil { + builder.WithObjects(secret.DeepCopy()) + } + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + Getters: testGetters, + } + + var chartRepo repository.ChartRepository + var artifact sourcev1.Artifact + got, err := r.reconcileSource(context.TODO(), obj, &artifact, &chartRepo) + defer os.Remove(chartRepo.CachePath) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} + +func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) + afterFunc func(t *WithT, obj *sourcev1.HelmRepository) + want sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) + }, + want: sreconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision 'existing'"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.HelmRepository{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.HelmRepositoryKind, }, - } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", }, Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: indexInterval}, + Timeout: &metav1.Duration{Duration: timeout}, + URL: "https://example.com/index.yaml", }, } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) - By("Expecting 401") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason && - strings.Contains(c.Message, "401 Unauthorized") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) + tmpDir, err := os.MkdirTemp("", "test-reconcile-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) - By("Expecting missing field error") - secret.Data = map[string][]byte{ - "username": []byte(username), + // Create an empty cache file. + cachePath := filepath.Join(tmpDir, "index.yaml") + cacheFile, err := os.Create(cachePath) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(cacheFile.Close()).ToNot(HaveOccurred()) + + chartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil) + g.Expect(err).ToNot(HaveOccurred()) + chartRepo.CachePath = cachePath + + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + // Checksum of the index file calculated by the ChartRepository. + artifact.Checksum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, chartRepo) } - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - By("Expecting artifact") - secret.Data["password"] = []byte(password) - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) + got, err := r.reconcileArtifact(context.TODO(), obj, &artifact, chartRepo) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) - By("Expecting missing secret error") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ShouldNot(BeNil()) + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } }) + } +} - It("Authenticates when TLS credentials are provided", func() { - err = helmServer.StartTLS(examplePublicKey, examplePrivateKey, exampleCA, "example.com") - Expect(err).NotTo(HaveOccurred()) +func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) { + // Helper to build simple helmRepoReconcilerFunc with result and error. + buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepoReconcilerFunc { + return func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + return r, e + } + } - Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed()) - Expect(helmServer.GenerateIndex()).Should(Succeed()) + tests := []struct { + name string + generation int64 + observedGeneration int64 + reconcileFuncs []helmRepoReconcilerFunc + wantResult sreconcile.Result + wantErr bool + assertConditions []metav1.Condition + }{ + { + name: "successful reconciliations", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + }, + { + name: "successful reconciliation with generation difference", + generation: 3, + observedGeneration: 2, + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"), + }, + }, + { + name: "failed reconciliation", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + { + name: "multiple object status conditions mutations", + reconcileFuncs: []helmRepoReconcilerFunc{ + func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + return sreconcile.ResultSuccess, nil + }, + func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) { + conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact") + return sreconcile.ResultSuccess, nil + }, + }, + wantResult: sreconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"), + }, + }, + { + name: "subrecs with one result=Requeue, no error", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + }, + wantResult: sreconcile.ResultRequeue, + wantErr: false, + }, + { + name: "subrecs with error before result=Requeue", + reconcileFuncs: []helmRepoReconcilerFunc{ + buildReconcileFuncs(sreconcile.ResultSuccess, nil), + buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")), + buildReconcileFuncs(sreconcile.ResultRequeue, nil), + }, + wantResult: sreconcile.ResultEmpty, + wantErr: true, + }, + } - secretKey := types.NamespacedName{ - Name: "helmrepository-auth-" + randStringRunes(5), - Namespace: namespace.Name, - } - secret := &corev1.Secret{ + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &HelmRepositoryReconciler{} + obj := &sourcev1.HelmRepository{ ObjectMeta: metav1.ObjectMeta{ - Name: secretKey.Name, - Namespace: secretKey.Namespace, + GenerateName: "test-", + Generation: tt.generation, + }, + Status: sourcev1.HelmRepositoryStatus{ + ObservedGeneration: tt.observedGeneration, }, } - Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed()) - key := types.NamespacedName{ - Name: "helmrepository-sample-" + randStringRunes(5), - Namespace: namespace.Name, - } - created := &sourcev1.HelmRepository{ - ObjectMeta: metav1.ObjectMeta{ - Name: key.Name, - Namespace: key.Namespace, - }, - Spec: sourcev1.HelmRepositorySpec{ - URL: helmServer.URL(), - SecretRef: &meta.LocalObjectReference{ - Name: secretKey.Name, - }, - Interval: metav1.Duration{Duration: indexInterval}, - }, - } - Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) - defer k8sClient.Delete(context.Background(), created) + ctx := context.TODO() - By("Expecting unknown authority error") - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.IndexationFailedReason && - strings.Contains(c.Message, "certificate signed by unknown authority") { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) + gotRes, gotErr := r.reconcile(ctx, obj, tt.reconcileFuncs) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + g.Expect(gotRes).To(Equal(tt.wantResult)) - By("Expecting missing field error") - secret.Data = map[string][]byte{ - "certFile": examplePublicKey, - } - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - - By("Expecting artifact") - secret.Data["keyFile"] = examplePrivateKey - secret.Data["caFile"] = exampleCA - Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed()) - Eventually(func() bool { - got := &sourcev1.HelmRepository{} - _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) - }, timeout, interval).Should(BeTrue()) - - By("Expecting missing secret error") - Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed()) - got := &sourcev1.HelmRepository{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, got) - for _, c := range got.Status.Conditions { - if c.Reason == sourcev1.AuthenticationFailedReason { - return true - } - } - return false - }, timeout, interval).Should(BeTrue()) - Expect(got.Status.Artifact).ShouldNot(BeNil()) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) }) - }) -}) + } +} diff --git a/controllers/source_predicate.go b/controllers/source_predicate.go index 47dc73c2..60786b87 100644 --- a/controllers/source_predicate.go +++ b/controllers/source_predicate.go @@ -20,7 +20,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) type SourceRevisionChangePredicate struct { diff --git a/controllers/storage.go b/controllers/storage.go index a7015051..0e9e5fe8 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -36,7 +36,7 @@ import ( "github.com/fluxcd/pkg/lockedfile" "github.com/fluxcd/pkg/untar" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/fs" "github.com/fluxcd/source-controller/pkg/sourceignore" ) @@ -53,7 +53,7 @@ type Storage struct { Timeout time.Duration `json:"timeout"` } -// NewStorage creates the storage helper for a given path and hostname +// NewStorage creates the storage helper for a given path and hostname. func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) { if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { return nil, fmt.Errorf("invalid dir path: %s", basePath) @@ -81,7 +81,11 @@ func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { if artifact.Path == "" { return } - artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path) + format := "http://%s/%s" + if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { + format = "%s/%s" + } + artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) } // SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. @@ -101,13 +105,20 @@ func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error { } // RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir. -func (s *Storage) RemoveAll(artifact sourcev1.Artifact) error { +func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) { + var deletedDir string dir := filepath.Dir(s.LocalPath(artifact)) - return os.RemoveAll(dir) + // Check if the dir exists. + _, err := os.Stat(dir) + if err == nil { + deletedDir = dir + } + return deletedDir, os.RemoveAll(dir) } // RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one. -func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) error { +func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) { + deletedFiles := []string{} localPath := s.LocalPath(artifact) dir := filepath.Dir(localPath) var errors []string @@ -120,15 +131,18 @@ func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) error { if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink { if err := os.Remove(path); err != nil { errors = append(errors, info.Name()) + } else { + // Collect the successfully deleted file paths. + deletedFiles = append(deletedFiles, path) } } return nil }) if len(errors) > 0 { - return fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) + return deletedFiles, fmt.Errorf("failed to remove files: %s", strings.Join(errors, " ")) } - return nil + return deletedFiles, nil } // ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file. diff --git a/controllers/storage_test.go b/controllers/storage_test.go index 4af3a341..7da575c6 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -28,8 +28,9 @@ import ( "time" "github.com/go-git/go-git/v5/plumbing/format/gitignore" + . "github.com/onsi/gomega" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" ) func createStoragePath() (string, error) { @@ -293,10 +294,96 @@ func TestStorageRemoveAllButCurrent(t *testing.T) { t.Fatalf("Valid path did not successfully return: %v", err) } - if err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil { + if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil { t.Fatal("Did not error while pruning non-existent path") } }) + + t.Run("collect names of deleted items", func(t *testing.T) { + g := NewWithT(t) + dir, err := os.MkdirTemp("", "") + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { os.RemoveAll(dir) }) + + s, err := NewStorage(dir, "hostname", time.Minute) + g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") + + artifact := sourcev1.Artifact{ + Path: path.Join("foo", "bar", "artifact1.tar.gz"), + } + + // Create artifact dir and artifacts. + artifactDir := path.Join(dir, "foo", "bar") + g.Expect(os.MkdirAll(artifactDir, 0755)).NotTo(HaveOccurred()) + current := []string{ + path.Join(artifactDir, "artifact1.tar.gz"), + } + wantDeleted := []string{ + path.Join(artifactDir, "file1.txt"), + path.Join(artifactDir, "file2.txt"), + } + createFile := func(files []string) { + for _, c := range files { + f, err := os.Create(c) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(f.Close()).ToNot(HaveOccurred()) + } + } + createFile(current) + createFile(wantDeleted) + _, err = s.Symlink(artifact, "latest.tar.gz") + g.Expect(err).ToNot(HaveOccurred(), "failed to create symlink") + + deleted, err := s.RemoveAllButCurrent(artifact) + g.Expect(err).ToNot(HaveOccurred(), "failed to remove all but current") + g.Expect(deleted).To(Equal(wantDeleted)) + }) +} + +func TestStorageRemoveAll(t *testing.T) { + tests := []struct { + name string + artifactPath string + createArtifactPath bool + wantDeleted string + }{ + { + name: "delete non-existent path", + artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"), + createArtifactPath: false, + wantDeleted: "", + }, + { + name: "delete existing path", + artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"), + createArtifactPath: true, + wantDeleted: path.Join("foo", "bar"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + dir, err := os.MkdirTemp("", "") + g.Expect(err).ToNot(HaveOccurred()) + t.Cleanup(func() { os.RemoveAll(dir) }) + + s, err := NewStorage(dir, "hostname", time.Minute) + g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") + + artifact := sourcev1.Artifact{ + Path: tt.artifactPath, + } + + if tt.createArtifactPath { + g.Expect(os.MkdirAll(path.Join(dir, tt.artifactPath), 0755)).ToNot(HaveOccurred()) + } + + deleted, err := s.RemoveAll(artifact) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(deleted).To(ContainSubstring(tt.wantDeleted), "unexpected deleted path") + }) + } } func TestStorageCopyFromPath(t *testing.T) { @@ -317,15 +404,11 @@ func TestStorageCopyFromPath(t *testing.T) { } createFile := func(file *File) (absPath string, err error) { - defer func() { - if err != nil && dir != "" { - os.RemoveAll(dir) - } - }() dir, err = os.MkdirTemp("", "test-files-") if err != nil { return } + t.Cleanup(cleanupStoragePath(dir)) absPath = filepath.Join(dir, file.Name) if err = os.MkdirAll(filepath.Dir(absPath), 0755); err != nil { return diff --git a/controllers/suite_test.go b/controllers/suite_test.go index fae4c3b0..b4a6ca69 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,178 +17,171 @@ limitations under the License. package controllers import ( - "context" + "fmt" "math/rand" - "net/http" "os" "path/filepath" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" "helm.sh/helm/v3/pkg/getter" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/testenv" + "github.com/fluxcd/pkg/testserver" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" // +kubebuilder:scaffold:imports ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +// These tests make use of plain Go using Gomega for assertions. +// At the beginning of every (sub)test Gomega can be initialized +// using gomega.NewWithT. +// Refer to http://onsi.github.io/gomega/ to learn more about +// Gomega. -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var storage *Storage +const ( + timeout = 10 * time.Second + interval = 1 * time.Second +) -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte -var ctx context.Context -var cancel context.CancelFunc +var ( + testEnv *testenv.Environment + testStorage *Storage + testServer *testserver.ArtifactServer + testMetricsH controller.Metrics + ctx = ctrl.SetupSignalHandler() +) -const timeout = time.Second * 60 - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) -} - -var _ = BeforeSuite(func() { - done := make(chan interface{}) - go func() { - close(done) - }() - - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) - ctx, cancel = context.WithCancel(context.TODO()) - - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } - - var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ +var ( + testGetters = getter.Providers{ + getter.Provider{ Schemes: []string{"http", "https"}, New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") - - go func() { - defer GinkgoRecover() - err = k8sManager.Start(ctx) - Expect(err).ToNot(HaveOccurred()) - }() - - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) - - Eventually(done, timeout).Should(BeClosed()) -}, timeout.Seconds()) - -var _ = AfterSuite(func() { - cancel() - By("tearing down the test environment") - if storage != nil { - err := os.RemoveAll(storage.BasePath) - Expect(err).NotTo(HaveOccurred()) + }, } - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) +) + +var ( + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte +) func init() { rand.Seed(time.Now().UnixNano()) } -func loadExampleKeys() (err error) { - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") +func TestMain(m *testing.M) { + initTestTLS() + + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases"))) + + var err error + testServer, err = testserver.NewTempArtifactServer() if err != nil { - return err + panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) } - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + fmt.Println("Starting the test storage server") + testServer.Start() + + testStorage, err = newTestStorage(testServer.HTTPServer) if err != nil { - return err + panic(fmt.Sprintf("Failed to create a test storage: %v", err)) } - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - return err + + testMetricsH = controller.MustMakeMetrics(testEnv) + + if err := (&GitRepositoryReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + } + + if err := (&BucketReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) + } + + if err := (&HelmRepositoryReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Getters: testGetters, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) + } + + if err := (&HelmChartReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Getters: testGetters, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err)) + } + + go func() { + fmt.Println("Starting the test environment") + if err := testEnv.Start(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } + }() + <-testEnv.Manager.Elected() + + code := m.Run() + + fmt.Println("Stopping the test environment") + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) + } + + fmt.Println("Stopping the storage server") + testServer.Stop() + if err := os.RemoveAll(testServer.Root()); err != nil { + panic(fmt.Sprintf("Failed to remove storage server dir: %v", err)) + } + + os.Exit(code) +} + +func initTestTLS() { + var err error + tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + panic(err) + } + tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + panic(err) + } + tlsCA, err = os.ReadFile("testdata/certs/ca.pem") + if err != nil { + panic(err) + } +} + +func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { + storage, err := NewStorage(s.Root(), s.URL(), timeout) + if err != nil { + return nil, err + } + return storage, nil } var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") diff --git a/controllers/testdata/git/repository/.sourceignore b/controllers/testdata/git/repository/.sourceignore new file mode 100644 index 00000000..989478d1 --- /dev/null +++ b/controllers/testdata/git/repository/.sourceignore @@ -0,0 +1 @@ +**.txt diff --git a/controllers/testdata/git/repository/foo.txt b/controllers/testdata/git/repository/foo.txt new file mode 100644 index 00000000..e69de29b diff --git a/controllers/testdata/git/repository/manifest.yaml b/controllers/testdata/git/repository/manifest.yaml new file mode 100644 index 00000000..220e1b33 --- /dev/null +++ b/controllers/testdata/git/repository/manifest.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: dummy diff --git a/docs/api/source.md b/docs/api/source.md index 9aabeee2..91ac4e94 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -2,22 +2,22 @@

Packages:

-

source.toolkit.fluxcd.io/v1beta1

-

Package v1beta1 contains API Schema definitions for the source v1beta1 API group

+

source.toolkit.fluxcd.io/v1beta2

+

Package v1beta2 contains API Schema definitions for the source v1beta2 API group

Resource Types: -

Bucket +

Bucket

Bucket is the Schema for the buckets API

@@ -35,7 +35,7 @@ Resource Types: apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -65,7 +65,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + BucketSpec @@ -171,7 +171,7 @@ Kubernetes meta/v1.Duration (Optional) -

The timeout for download operations, defaults to 60s.

+

The timeout for fetch operations, defaults to 60s.

@@ -221,7 +221,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + BucketStatus @@ -233,7 +233,7 @@ BucketStatus
-

GitRepository +

GitRepository

GitRepository is the Schema for the gitrepositories API

@@ -251,7 +251,7 @@ BucketStatus apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -281,7 +281,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + GitRepositorySpec @@ -313,10 +313,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional)

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

+For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.

@@ -350,7 +348,7 @@ Kubernetes meta/v1.Duration ref
- + GitRepositoryRef @@ -365,14 +363,14 @@ master branch.

verify
- + GitRepositoryVerification (Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

+

Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.

@@ -384,9 +382,8 @@ string (Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.

@@ -398,7 +395,8 @@ bool (Optional) -

This flag tells the controller to suspend the reconciliation of this source.

+

Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.

@@ -423,8 +421,7 @@ bool (Optional) -

When enabled, after the clone is created, initializes all submodules within, -using their default settings. +

When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.

@@ -432,13 +429,14 @@ This option is available only when using the ‘go-git’ GitImplementat include
- + []GitRepositoryInclude -

Extra git repositories to map into the repository

+

Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.

@@ -462,7 +460,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + GitRepositoryStatus @@ -474,7 +472,7 @@ GitRepositoryStatus
-

HelmChart +

HelmChart

HelmChart is the Schema for the helmcharts API

@@ -492,7 +490,7 @@ GitRepositoryStatus apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -522,7 +520,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + HelmChartSpec @@ -559,7 +557,7 @@ and Bucket sources. Defaults to latest when omitted.

sourceRef
- + LocalHelmChartSourceReference @@ -659,7 +657,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + HelmChartStatus @@ -671,7 +669,7 @@ HelmChartStatus
-

HelmRepository +

HelmRepository

HelmRepository is the Schema for the helmrepositories API

@@ -689,7 +687,7 @@ HelmChartStatus apiVersion
string -source.toolkit.fluxcd.io/v1beta1 +source.toolkit.fluxcd.io/v1beta2 @@ -719,7 +717,7 @@ Refer to the Kubernetes API documentation for the fields of the spec
- + HelmRepositorySpec @@ -799,7 +797,7 @@ Kubernetes meta/v1.Duration (Optional) -

The timeout of index downloading, defaults to 60s.

+

The timeout of index fetching, defaults to 60s.

@@ -835,7 +833,7 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom status
- + HelmRepositoryStatus @@ -847,16 +845,16 @@ HelmRepositoryStatus
-

Artifact +

Artifact

(Appears on: -BucketStatus, -GitRepositoryStatus, -HelmChartStatus, -HelmRepositoryStatus) +BucketStatus, +GitRepositoryStatus, +HelmChartStatus, +HelmRepositoryStatus)

-

Artifact represents the output of a source synchronisation.

+

Artifact represents the output of a Source synchronisation.

@@ -875,7 +873,9 @@ string @@ -886,7 +886,9 @@ string @@ -933,11 +935,11 @@ artifact.

-

Path is the relative file path of this artifact.

+

Path is the relative file path of this Artifact. +It can be used to locate the Artifact file in the root of the Artifact +storage on the local file system of the controller managing the Source.

-

URL is the HTTP address of this artifact.

+

URL is the HTTP address of this artifact. +It is used by the consumers of the artifacts to fetch and use the +artifacts. It is expected to be resolvable from within the cluster.

-

BucketSpec +

BucketSpec

(Appears on: -Bucket) +Bucket)

BucketSpec defines the desired state of an S3 compatible bucket

@@ -1047,7 +1049,7 @@ Kubernetes meta/v1.Duration (Optional) -

The timeout for download operations, defaults to 60s.

+

The timeout for fetch operations, defaults to 60s.

@@ -1094,11 +1096,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

BucketStatus +

BucketStatus

(Appears on: -Bucket) +Bucket)

BucketStatus defines the observed state of a bucket

@@ -1146,14 +1148,14 @@ string (Optional) -

URL is the download link for the artifact output of the last Bucket sync.

+

URL is the fetch link for the artifact output of the last Bucket sync.

artifact
- + Artifact @@ -1182,11 +1184,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

GitRepositoryInclude +

GitRepositoryInclude

(Appears on: -GitRepositorySpec) +GitRepositorySpec)

GitRepositoryInclude defines a source with a from and to path.

@@ -1240,11 +1242,11 @@ string
-

GitRepositoryRef +

GitRepositoryRef

(Appears on: -GitRepositorySpec) +GitRepositorySpec)

GitRepositoryRef defines the Git ref used for pull and checkout operations.

@@ -1309,11 +1311,11 @@ string
-

GitRepositorySpec +

GitRepositorySpec

(Appears on: -GitRepository) +GitRepository)

GitRepositorySpec defines the desired state of a Git repository.

@@ -1349,10 +1351,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference (Optional)

The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.

+For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.

@@ -1386,7 +1386,7 @@ Kubernetes meta/v1.Duration ref
- + GitRepositoryRef @@ -1401,14 +1401,14 @@ master branch.

verify
- + GitRepositoryVerification (Optional) -

Verify OpenPGP signature for the Git commit HEAD points to.

+

Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.

@@ -1420,9 +1420,8 @@ string (Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.

@@ -1434,7 +1433,8 @@ bool (Optional) -

This flag tells the controller to suspend the reconciliation of this source.

+

Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.

@@ -1459,8 +1459,7 @@ bool (Optional) -

When enabled, after the clone is created, initializes all submodules within, -using their default settings. +

When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.

@@ -1468,13 +1467,14 @@ This option is available only when using the ‘go-git’ GitImplementat include
- + []GitRepositoryInclude -

Extra git repositories to map into the repository

+

Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.

@@ -1495,11 +1495,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

GitRepositoryStatus +

GitRepositoryStatus

(Appears on: -GitRepository) +GitRepository)

GitRepositoryStatus defines the observed state of a Git repository.

@@ -1547,15 +1547,14 @@ string (Optional) -

URL is the download link for the artifact output of the last repository -sync.

+

URL is the fetch link for the artifact output of the last repository sync.

artifact
- + Artifact @@ -1569,8 +1568,8 @@ Artifact includedArtifacts
- -[]*github.com/fluxcd/source-controller/api/v1beta1.Artifact + +[]*github.com/fluxcd/source-controller/api/v1beta2.Artifact @@ -1598,11 +1597,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

GitRepositoryVerification +

GitRepositoryVerification

(Appears on: -GitRepositorySpec) +GitRepositorySpec)

GitRepositoryVerification defines the OpenPGP signature verification process.

@@ -1623,7 +1622,7 @@ string -

Mode describes what git object should be verified, currently (‘head’).

+

Mode describes what Git object should be verified, currently (‘head’).

@@ -1636,18 +1635,18 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference -

The secret name containing the public keys of all trusted Git authors.

+

SecretRef containing the public keys of all trusted Git authors.

-

HelmChartSpec +

HelmChartSpec

(Appears on: -HelmChart) +HelmChart)

HelmChartSpec defines the desired state of a Helm chart.

@@ -1688,7 +1687,7 @@ and Bucket sources. Defaults to latest when omitted.

sourceRef
- + LocalHelmChartSourceReference @@ -1785,11 +1784,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

HelmChartStatus +

HelmChartStatus

(Appears on: -HelmChart) +HelmChart)

HelmChartStatus defines the observed state of the HelmChart.

@@ -1816,6 +1815,32 @@ int64 +observedSourceArtifactRevision
+ +string + + + +(Optional) +

ObservedSourceArtifactRevision is the last observed Artifact.Revision +of the Source reference.

+ + + + +observedChartName
+ +string + + + +(Optional) +

ObservedChartName is the last observed chart name as defined by the +resolved chart reference.

+ + + + conditions
@@ -1837,14 +1862,14 @@ string (Optional) -

URL is the download link for the last chart pulled.

+

URL is the fetch link for the last chart pulled.

artifact
-
+ Artifact @@ -1873,11 +1898,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

HelmRepositorySpec +

HelmRepositorySpec

(Appears on: -HelmRepository) +HelmRepository)

HelmRepositorySpec defines the reference to a Helm repository.

@@ -1961,7 +1986,7 @@ Kubernetes meta/v1.Duration (Optional) -

The timeout of index downloading, defaults to 60s.

+

The timeout of index fetching, defaults to 60s.

@@ -1994,11 +2019,11 @@ github.com/fluxcd/pkg/apis/acl.AccessFrom
-

HelmRepositoryStatus +

HelmRepositoryStatus

(Appears on: -HelmRepository) +HelmRepository)

HelmRepositoryStatus defines the observed state of the HelmRepository.

@@ -2046,14 +2071,14 @@ string (Optional) -

URL is the download link for the last index fetched.

+

URL is the fetch link for the last index fetched.

artifact
- + Artifact @@ -2082,11 +2107,11 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-

LocalHelmChartSourceReference +

LocalHelmChartSourceReference

(Appears on: -HelmChartSpec) +HelmChartSpec)

LocalHelmChartSourceReference contains enough information to let you locate the typed referenced object at namespace level.

@@ -2139,7 +2164,7 @@ string -

Source +

Source

Source interface must be supported by all API types.

diff --git a/go.mod b/go.mod index 16b1fd8e..5ea8d80f 100644 --- a/go.mod +++ b/go.mod @@ -9,14 +9,17 @@ require ( github.com/Masterminds/semver/v3 v3.1.1 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/cyphar/filepath-securejoin v0.2.2 + github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c + github.com/docker/go-units v0.4.0 github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b - github.com/fluxcd/pkg/apis/meta v0.10.2 + github.com/fluxcd/pkg/apis/meta v0.12.0 github.com/fluxcd/pkg/gittestserver v0.5.0 github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.4.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.12.3 + github.com/fluxcd/pkg/runtime v0.13.1 github.com/fluxcd/pkg/ssh v0.2.0 + github.com/fluxcd/pkg/testserver v0.2.0 github.com/fluxcd/pkg/untar v0.1.0 github.com/fluxcd/pkg/version v0.1.0 github.com/fluxcd/source-controller/api v0.21.2 @@ -25,24 +28,25 @@ require ( github.com/go-logr/logr v1.2.2 github.com/libgit2/git2go/v33 v33.0.6 github.com/minio/minio-go/v7 v7.0.15 - github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.17.0 github.com/otiai10/copy v1.7.0 github.com/spf13/pflag v1.0.5 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - google.golang.org/api v0.54.0 + google.golang.org/api v0.62.0 gotest.tools v2.2.0+incompatible helm.sh/helm/v3 v3.7.2 k8s.io/api v0.23.3 k8s.io/apimachinery v0.23.3 k8s.io/client-go v0.23.3 + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 + sigs.k8s.io/cli-utils v0.28.0 sigs.k8s.io/controller-runtime v0.11.1 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go v0.90.0 // indirect + cloud.google.com/go v0.99.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/BurntSushi/toml v0.3.1 // indirect github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect @@ -60,6 +64,7 @@ require ( github.com/bugsnag/bugsnag-go v2.1.2+incompatible // indirect github.com/bugsnag/panicwrap v1.3.4 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect github.com/containerd/containerd v1.5.7 // indirect github.com/containerd/continuity v0.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -69,15 +74,13 @@ require ( github.com/docker/docker-credential-helpers v0.6.3 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.4.0 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/emirpasic/gods v1.12.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect - github.com/fatih/color v1.7.0 // indirect + github.com/fatih/color v1.13.0 // indirect github.com/fluxcd/pkg/apis/acl v0.0.3 // indirect - github.com/fluxcd/pkg/testserver v0.1.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-git/gcfg v1.5.0 // indirect @@ -95,12 +98,12 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/gax-go/v2 v2.1.0 // indirect + github.com/googleapis/gax-go/v2 v2.1.1 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/hashicorp/go-cleanhttp v0.5.1 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.6.8 // indirect github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect @@ -109,18 +112,18 @@ require ( github.com/jmoiron/sqlx v1.3.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/jstemmer/go-junit-report v0.9.1 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect github.com/klauspost/compress v1.13.5 // indirect github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.6 // indirect - github.com/mattn/go-colorable v0.0.9 // indirect - github.com/mattn/go-isatty v0.0.4 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/minio/md5-simd v1.1.0 // indirect @@ -136,7 +139,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect - github.com/nxadm/tail v1.4.8 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opencontainers/runc v1.0.2 // indirect @@ -155,8 +157,8 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d // indirect - github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/cobra v1.2.1 // indirect + github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cobra v1.3.0 // indirect github.com/stretchr/testify v1.7.0 // indirect github.com/xanzy/ssh-agent v0.3.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect @@ -171,36 +173,31 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.1 // indirect - golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect - golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect - google.golang.org/grpc v1.40.0 // indirect + google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/grpc v1.42.0 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/gorp.v1 v1.7.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.62.0 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect k8s.io/apiextensions-apiserver v0.23.3 // indirect k8s.io/apiserver v0.23.3 // indirect - k8s.io/cli-runtime v0.23.0 // indirect + k8s.io/cli-runtime v0.23.2 // indirect k8s.io/component-base v0.23.3 // indirect k8s.io/klog/v2 v2.40.1 // indirect k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect - k8s.io/kubectl v0.22.4 // indirect - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect + k8s.io/kubectl v0.23.2 // indirect oras.land/oras-go v0.4.0 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/kustomize/api v0.10.1 // indirect diff --git a/go.sum b/go.sum index 0b78f866..b6023992 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,13 @@ cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0 h1:MjvSkUq8RuAb+2JLDi5VQmmExRJPUQ3JLCWpRB6fmdw= cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -33,6 +38,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -68,6 +74,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= @@ -122,7 +129,9 @@ github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.m github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -156,6 +165,7 @@ github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3 github.com/bugsnag/panicwrap v1.3.4/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -163,6 +173,7 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -171,11 +182,19 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= @@ -222,6 +241,7 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= @@ -232,6 +252,8 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c h1:pyp/Dvd1gYP/D3z1zs46h0YhYzFp0hjxw0XVIO9+vh4= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c/go.mod h1:haYO9UW76kUUKpIBbv3ydaU5wZ/7r0yqp61PGzVRSYU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -285,7 +307,10 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -295,14 +320,17 @@ github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= +github.com/fluxcd/pkg/apis/meta v0.12.0 h1:Ssyltj6E9A7y32sZrzjog0m+bIsFM/3lHHfmpxesUAU= +github.com/fluxcd/pkg/apis/meta v0.12.0/go.mod h1:SPrSWMwDK7Ls2/4GadzhjDjPFbKrzzgzuZ0oDO3jzso= github.com/fluxcd/pkg/gittestserver v0.5.0 h1:pPdaz7pUsukt4eQ+xQeNwoypOXGGOHFHnPjIHQAv0tE= github.com/fluxcd/pkg/gittestserver v0.5.0/go.mod h1:mFEF/Xrg+CjQH4VFCRCou2qZmhWKo7EYcjr7MIoX6+s= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= @@ -311,12 +339,14 @@ github.com/fluxcd/pkg/helmtestserver v0.4.0 h1:RT0G5buw5qrzEfIIH0fklppIvPAaQF//p github.com/fluxcd/pkg/helmtestserver v0.4.0/go.mod h1:JOI9f3oXUFIWmMKWMBan7FjglAU+fRTO/sPPV/Kj3gQ= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/runtime v0.12.3 h1:h21AZ3YG5MAP7DxFF9hfKrP+vFzys2L7CkUbPFjbP/0= -github.com/fluxcd/pkg/runtime v0.12.3/go.mod h1:imJ2xYy/d4PbSinX2IefmZk+iS2c1P5fY0js8mCE4SM= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= +github.com/fluxcd/pkg/runtime v0.13.1 h1:/MVSjP/pySd7tNP5FaYMkWerfxf8NZmO7SlDkTUjkjU= +github.com/fluxcd/pkg/runtime v0.13.1/go.mod h1:G0EuJZJi/ZOjrWiclF4bBmkbzKhWssKuzSsmz3kVCMg= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= -github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= +github.com/fluxcd/pkg/testserver v0.2.0 h1:Mj0TapmKaywI6Fi5wvt1LAZpakUHmtzWQpJNKQ0Krt4= +github.com/fluxcd/pkg/testserver v0.2.0/go.mod h1:bgjjydkXsZTeFzjz9Cr4heGANr41uTB1Aj1Q5qzuYVk= github.com/fluxcd/pkg/untar v0.1.0 h1:k97V/xV5hFrAkIkVPuv5AVhyxh1ZzzAKba/lbDfGo6o= github.com/fluxcd/pkg/untar v0.1.0/go.mod h1:aGswNyzB1mlz/T/kpOS58mITBMxMKc9tlJBH037A2HY= github.com/fluxcd/pkg/version v0.1.0 h1:v+SmCanmCB5Tj2Cx9TXlj+kNRfPGbAvirkeqsp7ZEAQ= @@ -500,13 +530,13 @@ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= @@ -527,18 +557,28 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0 h1:bkKf0BeBXcSYa7f5Fyi9gMuQ8gNsxeiNpZjR6VxNZeo= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -546,15 +586,23 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -578,14 +626,13 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -620,6 +667,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -632,6 +681,7 @@ github.com/libgit2/git2go/v33 v33.0.6/go.mod h1:KdpqkU+6+++4oHna/MIOgx4GCQ92IPCd github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -649,11 +699,20 @@ github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kN github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -667,6 +726,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= @@ -676,6 +737,7 @@ github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKU github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.1.1 h1:Bp6x9R1Wn16SIz3OfeDr0b7RnCG2OB66Y7PQyC/cvq4= @@ -691,6 +753,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= @@ -766,9 +829,11 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.3 h1:7JgpsBaN0uMkyju4tbYHu0mnM55hNKVYLsXmwr15NQI= github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= @@ -781,11 +846,13 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= @@ -799,6 +866,7 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -809,6 +877,7 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -827,8 +896,10 @@ github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc/go.mod h1:HFLT6 github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -846,9 +917,7 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= @@ -857,17 +926,20 @@ github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d/go.mod h1:A+o6Za github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -879,6 +951,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spyzhov/ajson v0.4.2/go.mod h1:63V+CGM6f1Bu/p4nLIN8885ojBdt88TbLoSFzyqMuVA= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -898,6 +972,7 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= @@ -941,8 +1016,11 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= @@ -995,6 +1073,7 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1033,7 +1112,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1045,8 +1123,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1068,6 +1146,7 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1095,11 +1174,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1122,6 +1203,7 @@ golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1144,6 +1226,7 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1158,9 +1241,12 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1168,6 +1254,7 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1204,6 +1291,7 @@ golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1222,8 +1310,15 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c= @@ -1271,6 +1366,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1316,7 +1412,6 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff h1:VX/uD7MK0AHXGiScH3fsieUQUcpmRERPDYtqZdJnA+Q= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1353,8 +1448,14 @@ google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtuk google.golang.org/api v0.49.0/go.mod h1:BECiH72wsfwUvOVn3+btPD5WHi0LzavZReBndi42L18= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0 h1:PhGymJMXfGBzc4lBRmrx9+1w4w2wEzURHNGF/sD/xGc= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1420,9 +1521,21 @@ google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1449,8 +1562,10 @@ google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1485,8 +1600,9 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -1526,6 +1642,7 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/api v0.23.2/go.mod h1:sYuDb3flCtRPI8ghn6qFrcK5ZBu2mhbElxRE95qpwlI= k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw= @@ -1535,7 +1652,7 @@ k8s.io/apiextensions-apiserver v0.23.3/go.mod h1:/ZpRXdgKZA6DvIVPEmXDCZJN53YIQEU k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= -k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= +k8s.io/apimachinery v0.23.2/go.mod h1:zDqeV0AK62LbCI0CI7KbWCAYdLg+E+8UXJ0rIz5gmS8= k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1544,22 +1661,26 @@ k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= k8s.io/apiserver v0.23.3 h1:gWY1DmA0AdAGR/H+Q/1FtyGkFq8xqSaZOw7oLopmO8k= k8s.io/apiserver v0.23.3/go.mod h1:3HhsTmC+Pn+Jctw+Ow0LHA4dQ4oXrQ4XJDzrVDG64T4= k8s.io/cli-runtime v0.22.4/go.mod h1:x35r0ERHXr/MrbR1C6MPJxQ3xKG6+hXi9m2xLzlMPZA= -k8s.io/cli-runtime v0.23.0 h1:UONt0BV2+edjUVAXuR1nnOAL2CB9r+Gl9yk4UBQpKfs= -k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU= +k8s.io/cli-runtime v0.23.2 h1:4zOZX78mFSakwe4gef81XDBu94Yu0th6bfveTOx8ZQk= +k8s.io/cli-runtime v0.23.2/go.mod h1:Ag70akCDvwux4HxY+nH2J3UqE2e6iwSSdG1HE6p1VTU= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= +k8s.io/client-go v0.23.2/go.mod h1:k3YbsWg6GWdHF1THHTQP88X9RhB1DWPo3Dq7KfU/D1c= k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= +k8s.io/code-generator v0.23.2/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= +k8s.io/component-base v0.23.2/go.mod h1:wS9Z03MO3oJ0RU8bB/dbXTiluGju+SC/F5i660gxB8c= k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY= k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg= k8s.io/component-helpers v0.22.4/go.mod h1:A50qTyczDFbhZDifIfS2zFrHuPk9UNOWPpvNZ+3RSIs= +k8s.io/component-helpers v0.23.2/go.mod h1:J6CMwiaf0izLoNwiLl2OymB4+rGTsTpWp6PL/AqOM4U= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1577,9 +1698,11 @@ k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2R k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf h1:M9XBsiMslw2lb2ZzglC0TOkBPK5NQi0/noUrdnoFwUg= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kubectl v0.22.4 h1:ECUO1QWyZ70DiIKEfgBx+8i9D98uspVOwgc1APs/07w= k8s.io/kubectl v0.22.4/go.mod h1:ok2qRT6y2Gy4+y+mniJVyUMKeBHP4OWS9Rdtf/QTM5I= +k8s.io/kubectl v0.23.2 h1:YakGzFN1csIOW/Us5VsLxjcu5Q6Vh5rqcvukcNuBwFk= +k8s.io/kubectl v0.23.2/go.mod h1:zWm5wt8PdRmHiVhE9a7q7XYW4WFX9StkZGnC18+1v3M= k8s.io/metrics v0.22.4/go.mod h1:6F/iwuYb1w2QDCoHkeMFLf4pwHBcYKLm4mPtVHKYrIw= +k8s.io/metrics v0.23.2/go.mod h1:idJHc+lLK5teHUC6Z2+d6qTKA12d5FLDxmC/DHiUYKc= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1599,6 +1722,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= +sigs.k8s.io/cli-utils v0.28.0 h1:gsvwqygoXlW2y8CmKdflQJNZp1Yhi4geATW3/Ei7oYc= +sigs.k8s.io/cli-utils v0.28.0/go.mod h1:WDVRa5/eQBKntG++uyKdyT+xU7MLdCR4XsgseqL5uX4= sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU= sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= @@ -1609,7 +1734,9 @@ sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH8 sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= +sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ= sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go= +sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io= sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= sigs.k8s.io/kustomize/kyaml v0.13.0 h1:9c+ETyNfSrVhxvphs+K2dzT3dh5oVPPEqPOE/cUpScY= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index ae514112..74dbebc3 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2020 The Flux authors +Copyright 2022 The Flux authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/internal/error/error.go b/internal/error/error.go new file mode 100644 index 00000000..4333c460 --- /dev/null +++ b/internal/error/error.go @@ -0,0 +1,79 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +import "time" + +// Stalling is the reconciliation stalled state error. It contains an error +// and a reason for the stalled condition. +type Stalling struct { + // Reason is the stalled condition reason string. + Reason string + // Err is the error that caused stalling. This can be used as the message in + // stalled condition. + Err error +} + +// Error implements error interface. +func (se *Stalling) Error() string { + return se.Err.Error() +} + +// Unwrap returns the underlying error. +func (se *Stalling) Unwrap() error { + return se.Err +} + +// Event is an error event. It can be used to construct an event to be +// recorded. +type Event struct { + // Reason is the reason for the event error. + Reason string + // Error is the actual error for the event. + Err error +} + +// Error implements error interface. +func (ee *Event) Error() string { + return ee.Err.Error() +} + +// Unwrap returns the underlying error. +func (ee *Event) Unwrap() error { + return ee.Err +} + +// Waiting is the reconciliation wait state error. It contains an error, wait +// duration and a reason for the wait. +type Waiting struct { + // RequeueAfter is the wait duration after which to requeue. + RequeueAfter time.Duration + // Reason is the reason for the wait. + Reason string + // Err is the error that caused the wait. + Err error +} + +// Error implement error interface. +func (we *Waiting) Error() string { + return we.Err.Error() +} + +// Unwrap returns the underlying error. +func (we *Waiting) Unwrap() error { + return we.Err +} diff --git a/internal/helm/chart/builder.go b/internal/helm/chart/builder.go index 9aa2a17e..e3ce2207 100644 --- a/internal/helm/chart/builder.go +++ b/internal/helm/chart/builder.go @@ -115,15 +115,16 @@ func (o BuildOptions) GetValuesFiles() []string { return o.ValuesFiles } -// Build contains the Builder.Build result, including specific +// Build contains the (partial) Builder.Build result, including specific // information about the built chart like ResolvedDependencies. type Build struct { - // Path is the absolute path to the packaged chart. - Path string - // Name of the packaged chart. + // Name of the chart. Name string - // Version of the packaged chart. + // Version of the chart. Version string + // Path is the absolute path to the packaged chart. + // Can be empty, in which case a failure should be assumed. + Path string // ValuesFiles is the list of files used to compose the chart's // default "values.yaml". ValuesFiles []string @@ -138,30 +139,45 @@ type Build struct { // Summary returns a human-readable summary of the Build. func (b *Build) Summary() string { - if b == nil || b.Name == "" || b.Version == "" { - return "No chart build." + if !b.HasMetadata() { + return "no chart build" } var s strings.Builder - var action = "Pulled" - if b.Packaged { - action = "Packaged" + var action = "new" + if b.Path != "" { + action = "pulled" + if b.Packaged { + action = "packaged" + } } s.WriteString(fmt.Sprintf("%s '%s' chart with version '%s'", action, b.Name, b.Version)) - if b.Packaged && len(b.ValuesFiles) > 0 { - s.WriteString(fmt.Sprintf(", with merged values files %v", b.ValuesFiles)) + if len(b.ValuesFiles) > 0 { + s.WriteString(fmt.Sprintf(" and merged values files %v", b.ValuesFiles)) } - if b.Packaged && b.ResolvedDependencies > 0 { - s.WriteString(fmt.Sprintf(", resolving %d dependencies before packaging", b.ResolvedDependencies)) - } - - s.WriteString(".") return s.String() } +// HasMetadata returns if the Build contains chart metadata. +// +// NOTE: This may return True while the build did not Complete successfully. +// Which means it was able to successfully collect the metadata from the chart, +// but failed further into the process. +func (b *Build) HasMetadata() bool { + if b == nil { + return false + } + return b.Name != "" && b.Version != "" +} + +// Complete returns if the Build completed successfully. +func (b *Build) Complete() bool { + return b.HasMetadata() && b.Path != "" +} + // String returns the Path of the Build. func (b *Build) String() string { if b == nil { diff --git a/internal/helm/chart/builder_local.go b/internal/helm/chart/builder_local.go index 721238fe..2710e41a 100644 --- a/internal/helm/chart/builder_local.go +++ b/internal/helm/chart/builder_local.go @@ -77,10 +77,10 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // to a chart curMeta, err := LoadChartMetadata(localRef.Path) if err != nil { - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return nil, &BuildError{Reason: ErrChartReference, Err: err} } if err = curMeta.Validate(); err != nil { - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return nil, &BuildError{Reason: ErrChartReference, Err: err} } result := &Build{} @@ -101,6 +101,9 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, result.Version = ver.String() } + isChartDir := pathIsDir(localRef.Path) + requiresPackaging := isChartDir || opts.VersionMetadata != "" || len(opts.GetValuesFiles()) != 0 + // If all the following is true, we do not need to package the chart: // - Chart name from cached chart matches resolved name // - Chart version from cached chart matches calculated version @@ -112,7 +115,9 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if err = curMeta.Validate(); err == nil { if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart - result.ValuesFiles = opts.ValuesFiles + result.ValuesFiles = opts.GetValuesFiles() + result.Packaged = requiresPackaging + return result, nil } } @@ -121,10 +126,9 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // If the chart at the path is already packaged and no custom values files // options are set, we can copy the chart without making modifications - isChartDir := pathIsDir(localRef.Path) - if !isChartDir && len(opts.GetValuesFiles()) == 0 { + if !requiresPackaging { if err = copyFileToPath(localRef.Path, p); err != nil { - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return result, &BuildError{Reason: ErrChartPull, Err: err} } result.Path = p return result, nil @@ -134,7 +138,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, var mergedValues map[string]interface{} if len(opts.GetValuesFiles()) > 0 { if mergedValues, err = mergeFileValues(localRef.WorkDir, opts.ValuesFiles); err != nil { - return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } } @@ -143,7 +147,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // or because we have merged values and need to repackage chart, err := loader.Load(localRef.Path) if err != nil { - return nil, &BuildError{Reason: ErrChartPackage, Err: err} + return result, &BuildError{Reason: ErrChartPackage, Err: err} } // Set earlier resolved version (with metadata) chart.Metadata.Version = result.Version @@ -151,7 +155,7 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, // Overwrite default values with merged values, if any if ok, err = OverwriteChartDefaultValues(chart, mergedValues); ok || err != nil { if err != nil { - return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } result.ValuesFiles = opts.GetValuesFiles() } @@ -160,19 +164,19 @@ func (b *localChartBuilder) Build(ctx context.Context, ref Reference, p string, if isChartDir { if b.dm == nil { err = fmt.Errorf("local chart builder requires dependency manager for unpackaged charts") - return nil, &BuildError{Reason: ErrDependencyBuild, Err: err} + return result, &BuildError{Reason: ErrDependencyBuild, Err: err} } if result.ResolvedDependencies, err = b.dm.Build(ctx, ref, chart); err != nil { - return nil, &BuildError{Reason: ErrDependencyBuild, Err: err} + return result, &BuildError{Reason: ErrDependencyBuild, Err: err} } } // Package the chart if err = packageToPath(chart, p); err != nil { - return nil, &BuildError{Reason: ErrChartPackage, Err: err} + return result, &BuildError{Reason: ErrChartPackage, Err: err} } result.Path = p - result.Packaged = true + result.Packaged = requiresPackaging return result, nil } @@ -186,7 +190,7 @@ func mergeFileValues(baseDir string, paths []string) (map[string]interface{}, er if err != nil { return nil, err } - if f, err := os.Stat(secureP); os.IsNotExist(err) || !f.Mode().IsRegular() { + if f, err := os.Stat(secureP); err != nil || !f.Mode().IsRegular() { return nil, fmt.Errorf("no values file found at path '%s' (reference '%s')", strings.TrimPrefix(secureP, baseDir), p) } diff --git a/internal/helm/chart/builder_remote.go b/internal/helm/chart/builder_remote.go index 3252ff22..778efd25 100644 --- a/internal/helm/chart/builder_remote.go +++ b/internal/helm/chart/builder_remote.go @@ -82,12 +82,13 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o cv, err := b.remote.Get(remoteRef.Name, remoteRef.Version) if err != nil { err = fmt.Errorf("failed to get chart version for remote reference: %w", err) - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return nil, &BuildError{Reason: ErrChartReference, Err: err} } result := &Build{} result.Name = cv.Name result.Version = cv.Version + // Set build specific metadata if instructed if opts.VersionMetadata != "" { ver, err := semver.NewVersion(result.Version) @@ -102,6 +103,8 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o result.Version = ver.String() } + requiresPackaging := len(opts.GetValuesFiles()) != 0 || opts.VersionMetadata != "" + // If all the following is true, we do not need to download and/or build the chart: // - Chart name from cached chart matches resolved name // - Chart version from cached chart matches calculated version @@ -114,6 +117,7 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o if result.Name == curMeta.Name && result.Version == curMeta.Version { result.Path = opts.CachedChart result.ValuesFiles = opts.GetValuesFiles() + result.Packaged = requiresPackaging return result, nil } } @@ -124,12 +128,12 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o res, err := b.remote.DownloadChart(cv) if err != nil { err = fmt.Errorf("failed to download chart for remote reference: %w", err) - return nil, &BuildError{Reason: ErrChartPull, Err: err} + return result, &BuildError{Reason: ErrChartPull, Err: err} } // Use literal chart copy from remote if no custom values files options are - // set or build option version metadata isn't set. - if len(opts.GetValuesFiles()) == 0 && opts.VersionMetadata == "" { + // set or version metadata isn't set. + if !requiresPackaging { if err = validatePackageAndWriteToPath(res, p); err != nil { return nil, &BuildError{Reason: ErrChartPull, Err: err} } @@ -141,14 +145,14 @@ func (b *remoteChartBuilder) Build(_ context.Context, ref Reference, p string, o var chart *helmchart.Chart if chart, err = loader.LoadArchive(res); err != nil { err = fmt.Errorf("failed to load downloaded chart: %w", err) - return nil, &BuildError{Reason: ErrChartPackage, Err: err} + return result, &BuildError{Reason: ErrChartPackage, Err: err} } chart.Metadata.Version = result.Version mergedValues, err := mergeChartValues(chart, opts.ValuesFiles) if err != nil { err = fmt.Errorf("failed to merge chart values: %w", err) - return nil, &BuildError{Reason: ErrValuesFilesMerge, Err: err} + return result, &BuildError{Reason: ErrValuesFilesMerge, Err: err} } // Overwrite default values with merged values, if any if ok, err = OverwriteChartDefaultValues(chart, mergedValues); ok || err != nil { diff --git a/internal/helm/chart/builder_test.go b/internal/helm/chart/builder_test.go index d797a209..4d081229 100644 --- a/internal/helm/chart/builder_test.go +++ b/internal/helm/chart/builder_test.go @@ -138,12 +138,32 @@ func TestChartBuildResult_Summary(t *testing.T) { want string }{ { - name: "Simple", + name: "Build with metadata", build: &Build{ Name: "chart", Version: "1.2.3-rc.1+bd6bf40", }, - want: "Pulled 'chart' chart with version '1.2.3-rc.1+bd6bf40'.", + want: "new 'chart' chart with version '1.2.3-rc.1+bd6bf40'", + }, + { + name: "Pulled chart", + build: &Build{ + Name: "chart", + Version: "1.2.3-rc.1+bd6bf40", + Path: "chart.tgz", + }, + want: "pulled 'chart' chart with version '1.2.3-rc.1+bd6bf40'", + }, + { + name: "Packaged chart", + build: &Build{ + Name: "chart", + Version: "arbitrary-version", + Packaged: true, + ValuesFiles: []string{"a.yaml", "b.yaml"}, + Path: "chart.tgz", + }, + want: "packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", }, { name: "With values files", @@ -152,28 +172,19 @@ func TestChartBuildResult_Summary(t *testing.T) { Version: "arbitrary-version", Packaged: true, ValuesFiles: []string{"a.yaml", "b.yaml"}, + Path: "chart.tgz", }, - want: "Packaged 'chart' chart with version 'arbitrary-version', with merged values files [a.yaml b.yaml].", - }, - { - name: "With dependencies", - build: &Build{ - Name: "chart", - Version: "arbitrary-version", - Packaged: true, - ResolvedDependencies: 5, - }, - want: "Packaged 'chart' chart with version 'arbitrary-version', resolving 5 dependencies before packaging.", + want: "packaged 'chart' chart with version 'arbitrary-version' and merged values files [a.yaml b.yaml]", }, { name: "Empty build", build: &Build{}, - want: "No chart build.", + want: "no chart build", }, { name: "Nil build", build: nil, - want: "No chart build.", + want: "no chart build", }, } for _, tt := range tests { diff --git a/internal/helm/chart/errors.go b/internal/helm/chart/errors.go index dddd2e29..5b3a5bec 100644 --- a/internal/helm/chart/errors.go +++ b/internal/helm/chart/errors.go @@ -22,22 +22,29 @@ import ( ) // BuildErrorReason is the descriptive reason for a BuildError. -type BuildErrorReason string +type BuildErrorReason struct { + // Reason is the programmatic build error reason in CamelCase. + Reason string + + // Summary is the human build error reason, used to provide + // the Error string, and further context to the BuildError. + Summary string +} // Error returns the string representation of BuildErrorReason. func (e BuildErrorReason) Error() string { - return string(e) + return e.Summary } // BuildError contains a wrapped Err and a Reason indicating why it occurred. type BuildError struct { - Reason error + Reason BuildErrorReason Err error } // Error returns Err as a string, prefixed with the Reason to provide context. func (e *BuildError) Error() string { - if e.Reason == nil { + if e.Reason.Error() == "" { return e.Err.Error() } return fmt.Sprintf("%s: %s", e.Reason.Error(), e.Err.Error()) @@ -49,7 +56,7 @@ func (e *BuildError) Error() string { // err := &BuildError{Reason: ErrChartPull, Err: errors.New("arbitrary transport error")} // errors.Is(err, ErrChartPull) func (e *BuildError) Is(target error) bool { - if e.Reason != nil && e.Reason == target { + if e.Reason == target { return true } return errors.Is(e.Err, target) @@ -60,11 +67,21 @@ func (e *BuildError) Unwrap() error { return e.Err } +func IsPersistentBuildErrorReason(err error) bool { + switch err { + case ErrChartReference, ErrChartMetadataPatch, ErrValuesFilesMerge: + return true + default: + return false + } +} + var ( - ErrChartReference = BuildErrorReason("chart reference error") - ErrChartPull = BuildErrorReason("chart pull error") - ErrChartMetadataPatch = BuildErrorReason("chart metadata patch error") - ErrValuesFilesMerge = BuildErrorReason("values files merge error") - ErrDependencyBuild = BuildErrorReason("dependency build error") - ErrChartPackage = BuildErrorReason("chart package error") + ErrChartReference = BuildErrorReason{Reason: "InvalidChartReference", Summary: "invalid chart reference"} + ErrChartPull = BuildErrorReason{Reason: "ChartPullError", Summary: "chart pull error"} + ErrChartMetadataPatch = BuildErrorReason{Reason: "MetadataPatchError", Summary: "chart metadata patch error"} + ErrValuesFilesMerge = BuildErrorReason{Reason: "ValuesFilesError", Summary: "values files merge error"} + ErrDependencyBuild = BuildErrorReason{Reason: "DependencyBuildError", Summary: "dependency build error"} + ErrChartPackage = BuildErrorReason{Reason: "ChartPackageError", Summary: "chart package error"} + ErrUnknown = BuildErrorReason{Reason: "Unknown", Summary: "unknown build error"} ) diff --git a/internal/helm/chart/errors_test.go b/internal/helm/chart/errors_test.go index f006f336..13428e6c 100644 --- a/internal/helm/chart/errors_test.go +++ b/internal/helm/chart/errors_test.go @@ -26,7 +26,7 @@ import ( func TestBuildErrorReason_Error(t *testing.T) { g := NewWithT(t) - err := BuildErrorReason("reason") + err := BuildErrorReason{"Reason", "reason"} g.Expect(err.Error()).To(Equal("reason")) } @@ -39,7 +39,7 @@ func TestBuildError_Error(t *testing.T) { { name: "with reason", err: &BuildError{ - Reason: BuildErrorReason("reason"), + Reason: BuildErrorReason{"Reason", "reason"}, Err: errors.New("error"), }, want: "reason: error", diff --git a/internal/object/object.go b/internal/object/object.go new file mode 100644 index 00000000..c4bd32c2 --- /dev/null +++ b/internal/object/object.go @@ -0,0 +1,114 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "errors" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + ErrObservedGenerationNotFound = errors.New("observed generation not found") + ErrLastHandledReconcileAtNotFound = errors.New("last handled reconcile at not found") + ErrRequeueIntervalNotFound = errors.New("requeue interval not found") +) + +// toUnstructured converts a runtime object into Unstructured. +// Based on https://github.com/fluxcd/pkg/blob/b4a14854c75753ea9431693b39c4be672f246552/runtime/patch/utils.go#L55. +func toUnstructured(obj runtime.Object) (*unstructured.Unstructured, error) { + // If the incoming object is already unstructured, perform a deep copy first + // otherwise DefaultUnstructuredConverter ends up returning the inner map without + // making a copy. + if _, ok := obj.(runtime.Unstructured); ok { + obj = obj.DeepCopyObject() + } + rawMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: rawMap}, nil +} + +// GetStatusLastHandledReconcileAt returns the status.lastHandledReconcileAt +// value of a given runtime object, if present. +func GetStatusLastHandledReconcileAt(obj runtime.Object) (string, error) { + u, err := toUnstructured(obj) + if err != nil { + return "", err + } + ra, found, err := unstructured.NestedString(u.Object, "status", "lastHandledReconcileAt") + if err != nil { + return "", err + } + if !found { + return "", ErrLastHandledReconcileAtNotFound + } + return ra, nil +} + +// SetStatusLastHandledReconcileAt sets the status.lastHandledReconcileAt value +// of a given runtime object. +func SetStatusLastHandledReconcileAt(obj runtime.Object, val string) error { + content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return err + } + u := unstructured.Unstructured{} + u.SetUnstructuredContent(content) + if err := unstructured.SetNestedField(u.Object, val, "status", "lastHandledReconcileAt"); err != nil { + return err + } + return runtime.DefaultUnstructuredConverter.FromUnstructured(u.Object, obj) +} + +// GetStatusObservedGeneration returns the status.observedGeneration of a given +// runtime object. +func GetStatusObservedGeneration(obj runtime.Object) (int64, error) { + u, err := toUnstructured(obj) + if err != nil { + return 0, err + } + og, found, err := unstructured.NestedInt64(u.Object, "status", "observedGeneration") + if err != nil { + return 0, err + } + if !found { + return 0, ErrObservedGenerationNotFound + } + return og, nil +} + +// GetRequeueInterval returns the spec.interval of a given runtime object, if +// present. +func GetRequeueInterval(obj runtime.Object) (time.Duration, error) { + period := time.Second + u, err := toUnstructured(obj) + if err != nil { + return period, err + } + interval, found, err := unstructured.NestedString(u.Object, "spec", "interval") + if err != nil { + return period, err + } + if !found { + return period, ErrRequeueIntervalNotFound + } + return time.ParseDuration(interval) +} diff --git a/internal/object/object_test.go b/internal/object/object_test.go new file mode 100644 index 00000000..9f0d80bb --- /dev/null +++ b/internal/object/object_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package object + +import ( + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" +) + +func TestGetStatusLastHandledReconcileAt(t *testing.T) { + g := NewWithT(t) + + // Get unset status lastHandledReconcileAt. + obj := &sourcev1.GitRepository{} + _, err := GetStatusLastHandledReconcileAt(obj) + g.Expect(err).To(Equal(ErrLastHandledReconcileAtNotFound)) + + // Get set status lastHandledReconcileAt. + obj.Status.LastHandledReconcileAt = "foo" + ra, err := GetStatusLastHandledReconcileAt(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(ra).To(Equal("foo")) +} + +func TestSetStatusLastHandledReconcileAt(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + err := SetStatusLastHandledReconcileAt(obj, "now") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.Status.LastHandledReconcileAt).To(Equal("now")) +} + +func TestGetStatusObservedGeneration(t *testing.T) { + g := NewWithT(t) + + // Get unset status observedGeneration. + obj := &sourcev1.GitRepository{} + _, err := GetStatusObservedGeneration(obj) + g.Expect(err).To(Equal(ErrObservedGenerationNotFound)) + + // Get set status observedGeneration. + obj.Status.ObservedGeneration = 7 + og, err := GetStatusObservedGeneration(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(og).To(Equal(int64(7))) +} + +func TestGetRequeueInterval(t *testing.T) { + g := NewWithT(t) + + // Get empty requeue interval value. + obj := &sourcev1.GitRepository{} + pd, err := GetRequeueInterval(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pd).To(Equal(time.Duration(0))) + + // Get set requeue interval value. + obj.Spec.Interval = metav1.Duration{Duration: 3 * time.Second} + pd, err = GetRequeueInterval(obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(pd).To(Equal(3 * time.Second)) + + // Get non-existent requeue interval value. + obj2 := &corev1.Secret{} + _, err = GetRequeueInterval(obj2) + g.Expect(err).To(Equal(ErrRequeueIntervalNotFound)) +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go new file mode 100644 index 00000000..af0c71b9 --- /dev/null +++ b/internal/reconcile/reconcile.go @@ -0,0 +1,160 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "time" + + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + serror "github.com/fluxcd/source-controller/internal/error" +) + +// Result is a type for creating an abstraction for the controller-runtime +// reconcile Result to simplify the Result values. +type Result int + +const ( + // ResultEmpty indicates a reconcile result which does not requeue. It is + // also used when returning an error, since the error overshadows result. + ResultEmpty Result = iota + // ResultRequeue indicates a reconcile result which should immediately + // requeue. + ResultRequeue + // ResultSuccess indicates a reconcile success result. + // For a reconciler that requeues regularly at a fixed interval, runtime + // result with a fixed RequeueAfter is success result. + // For a reconciler that doesn't requeue on successful reconciliation, + // an empty runtime result is success result. + // It is usually returned at the end of a reconciler/sub-reconciler. + ResultSuccess +) + +// RuntimeResultBuilder defines an interface for runtime result builders. This +// can be implemented to build custom results based on the context of the +// reconciler. +type RuntimeResultBuilder interface { + BuildRuntimeResult(rr Result, err error) ctrl.Result +} + +// AlwaysRequeueResultBuilder implements a RuntimeResultBuilder for always +// requeuing reconcilers. A successful reconciliation result for such +// reconcilers contains a fixed RequeueAfter value. +type AlwaysRequeueResultBuilder struct { + // RequeueAfter is the fixed period at which the reconciler requeues on + // successful execution. + RequeueAfter time.Duration +} + +// BuildRuntimeResult converts a given Result and error into the +// return values of a controller's Reconcile function. +func (r AlwaysRequeueResultBuilder) BuildRuntimeResult(rr Result, err error) ctrl.Result { + // Handle special errors that contribute to expressing the result. + if e, ok := err.(*serror.Waiting); ok { + return ctrl.Result{RequeueAfter: e.RequeueAfter} + } + + switch rr { + case ResultRequeue: + return ctrl.Result{Requeue: true} + case ResultSuccess: + return ctrl.Result{RequeueAfter: r.RequeueAfter} + default: + return ctrl.Result{} + } +} + +// ComputeReconcileResult analyzes the reconcile results (result + error), +// updates the status conditions of the object with any corrections and returns +// object patch configuration, runtime result and runtime error. The caller is +// responsible for using the patch configuration while patching the object in +// the API server. +// The RuntimeResultBuilder is used to define how the ctrl.Result is computed. +func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, rb RuntimeResultBuilder) ([]patch.Option, ctrl.Result, error) { + var pOpts []patch.Option + + // Compute the runtime result. + var result ctrl.Result + if rb != nil { + result = rb.BuildRuntimeResult(res, recErr) + } + + // Remove reconciling condition on successful reconciliation. + if recErr == nil && res == ResultSuccess { + conditions.Delete(obj, meta.ReconcilingCondition) + } + + // Analyze the reconcile error. + switch t := recErr.(type) { + case *serror.Stalling: + if res == ResultEmpty { + // The current generation has been reconciled successfully and it + // has resulted in a stalled state. Return no error to stop further + // requeuing. + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + conditions.MarkStalled(obj, t.Reason, t.Error()) + return pOpts, result, nil + } + // NOTE: Non-empty result with stalling error indicates that the + // returned result is incorrect. + case *serror.Waiting: + // The reconcile resulted in waiting error, remove stalled condition if + // present. + conditions.Delete(obj, meta.StalledCondition) + // The reconciler needs to wait and retry. Return no error. + return pOpts, result, nil + case nil: + // The reconcile didn't result in any error, we are not in stalled + // state. If a requeue is requested, the current generation has not been + // reconciled successfully. + if res != ResultRequeue { + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + } + conditions.Delete(obj, meta.StalledCondition) + default: + // The reconcile resulted in some error, but we are not in stalled + // state. + conditions.Delete(obj, meta.StalledCondition) + } + + return pOpts, result, recErr +} + +// LowestRequeuingResult returns the ReconcileResult with the lowest requeue +// period. +// Weightage: +// ResultRequeue - immediate requeue (lowest) +// ResultSuccess - requeue at an interval +// ResultEmpty - no requeue +func LowestRequeuingResult(i, j Result) Result { + switch { + case i == ResultEmpty: + return j + case j == ResultEmpty: + return i + case i == ResultRequeue: + return i + case j == ResultRequeue: + return j + default: + return j + } +} diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go new file mode 100644 index 00000000..127e3c18 --- /dev/null +++ b/internal/reconcile/reconcile_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" +) + +func TestLowestRequeuingResult(t *testing.T) { + tests := []struct { + name string + i Result + j Result + wantResult Result + }{ + {"bail,requeue", ResultEmpty, ResultRequeue, ResultRequeue}, + {"bail,requeueInterval", ResultEmpty, ResultSuccess, ResultSuccess}, + {"requeue,bail", ResultRequeue, ResultEmpty, ResultRequeue}, + {"requeue,requeueInterval", ResultRequeue, ResultSuccess, ResultRequeue}, + {"requeueInterval,requeue", ResultSuccess, ResultRequeue, ResultRequeue}, + {"requeueInterval,requeueInterval", ResultSuccess, ResultSuccess, ResultSuccess}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(LowestRequeuingResult(tt.i, tt.j)).To(Equal(tt.wantResult)) + }) + } +} + +// This test uses AlwaysRequeueResultBuilder as the RuntimeResultBuilder. +func TestComputeReconcileResult(t *testing.T) { + testSuccessInterval := time.Minute + tests := []struct { + name string + result Result + beforeFunc func(obj conditions.Setter) + recErr error + wantResult ctrl.Result + wantErr bool + assertConditions []metav1.Condition + afterFunc func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) + }{ + { + name: "successful result", + result: ResultSuccess, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + }, + }, + { + name: "successful result, Reconciling=True, remove Reconciling", + result: ResultSuccess, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, "NewRevision", "new revision") + }, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + t.Expect(conditions.IsUnknown(obj, meta.ReconcilingCondition)).To(BeTrue()) + }, + }, + { + name: "successful result, Stalled=True, remove Stalled", + result: ResultSuccess, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkStalled(obj, "SomeReason", "some message") + }, + recErr: nil, + wantResult: ctrl.Result{RequeueAfter: testSuccessInterval}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) + }, + }, + { + name: "requeue result", + result: ResultRequeue, + recErr: nil, + wantResult: ctrl.Result{Requeue: true}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "requeue result", + result: ResultRequeue, + recErr: nil, + wantResult: ctrl.Result{Requeue: true}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "stalling error", + result: ResultEmpty, + recErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantResult: ctrl.Result{}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + }, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeTrue()) + }, + }, + { + name: "waiting error", + result: ResultEmpty, + recErr: &serror.Waiting{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantResult: ctrl.Result{}, + wantErr: false, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "random error", + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + }, + }, + { + name: "random error, Stalled=True, remove Stalled", + result: ResultEmpty, + recErr: fmt.Errorf("some error"), + wantResult: ctrl.Result{}, + wantErr: true, + afterFunc: func(t *WithT, obj conditions.Setter, patchOpts *patch.HelperOptions) { + t.Expect(patchOpts.IncludeStatusObservedGeneration).To(BeFalse()) + t.Expect(conditions.IsUnknown(obj, meta.StalledCondition)).To(BeTrue()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{} + obj.Name = "test-git-repo" + obj.Namespace = "default" + obj.Spec.Interval = metav1.Duration{Duration: testSuccessInterval} + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + rb := AlwaysRequeueResultBuilder{RequeueAfter: obj.Spec.Interval.Duration} + pOpts, result, err := ComputeReconcileResult(obj, tt.result, tt.recErr, rb) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(result).To(Equal(tt.wantResult)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + opts := &patch.HelperOptions{} + for _, o := range pOpts { + o.ApplyToHelper(opts) + } + tt.afterFunc(g, obj, opts) + }) + } +} diff --git a/internal/reconcile/summarize/matchers_test.go b/internal/reconcile/summarize/matchers_test.go new file mode 100644 index 00000000..b71aa99c --- /dev/null +++ b/internal/reconcile/summarize/matchers_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "fmt" + + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/fluxcd/source-controller/internal/object" +) + +// HaveStatusObservedGeneration returns a custom matcher to check if a +// runtime.Object has a given status observedGeneration value. +func HaveStatusObservedGeneration(expected int64) types.GomegaMatcher { + return &haveStatusObservedGeneration{ + expected: expected, + } +} + +type haveStatusObservedGeneration struct { + expected int64 + actual int64 +} + +func (m *haveStatusObservedGeneration) Match(actual interface{}) (success bool, err error) { + obj, ok := actual.(runtime.Object) + if !ok { + return false, fmt.Errorf("actual should be a runtime object") + } + + og, err := object.GetStatusObservedGeneration(obj) + if err != nil && err != object.ErrObservedGenerationNotFound { + return false, err + } + m.actual = og + + return Equal(m.expected).Match(og) +} + +func (m *haveStatusObservedGeneration) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%d\nto match\n\t%d\n", m.actual, m.expected) +} + +func (m *haveStatusObservedGeneration) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%d\nto not match\n\t%d\n", m.actual, m.expected) +} + +// HaveStatusLastHandledReconcileAt returns a custom matcher to check if a +// runtime.Object has a given status lastHandledReconcileAt value. +func HaveStatusLastHandledReconcileAt(expected string) types.GomegaMatcher { + return &haveStatusLastHandledReconcileAt{ + expected: expected, + } +} + +type haveStatusLastHandledReconcileAt struct { + expected string + actual string +} + +func (m *haveStatusLastHandledReconcileAt) Match(actual interface{}) (success bool, err error) { + obj, ok := actual.(runtime.Object) + if !ok { + return false, fmt.Errorf("actual should be a runtime object") + } + + ra, err := object.GetStatusLastHandledReconcileAt(obj) + if err != nil && err != object.ErrLastHandledReconcileAtNotFound { + return false, err + } + m.actual = ra + + return Equal(m.expected).Match(ra) +} + +func (m *haveStatusLastHandledReconcileAt) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%s\nto match\n\t%s\n", m.actual, m.expected) +} + +func (m *haveStatusLastHandledReconcileAt) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%s\nto not match\n\t%s\n", m.actual, m.expected) +} diff --git a/internal/reconcile/summarize/processor.go b/internal/reconcile/summarize/processor.go new file mode 100644 index 00000000..54e135e4 --- /dev/null +++ b/internal/reconcile/summarize/processor.go @@ -0,0 +1,66 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/apis/meta" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/object" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// ResultProcessor processes the results of reconciliation (the object, result +// and error). Any errors during processing need not result in the +// reconciliation failure. The errors can be recorded as logs and events. +type ResultProcessor func(context.Context, kuberecorder.EventRecorder, client.Object, reconcile.Result, error) + +// RecordContextualError is a ResultProcessor that records the contextual errors +// based on their types. +// An event is recorded for the errors that are returned to the runtime. The +// runtime handles the logging of the error. +// An event is recorded and an error is logged for errors that are known to be +// swallowed, not returned to the runtime. +func RecordContextualError(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, err error) { + switch e := err.(type) { + case *serror.Event: + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + case *serror.Waiting: + // Waiting errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Info("reconciliation waiting", "reason", e.Err, "duration", e.RequeueAfter) + recorder.Event(obj, corev1.EventTypeNormal, e.Reason, e.Error()) + case *serror.Stalling: + // Stalling errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + } +} + +// RecordReconcileReq is a ResultProcessor that checks the reconcile +// annotation value and sets it in the object status as +// status.lastHandledReconcileAt. +func RecordReconcileReq(ctx context.Context, recorder kuberecorder.EventRecorder, obj client.Object, _ reconcile.Result, _ error) { + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + object.SetStatusLastHandledReconcileAt(obj, v) + } +} diff --git a/internal/reconcile/summarize/processor_test.go b/internal/reconcile/summarize/processor_test.go new file mode 100644 index 00000000..dc6765d8 --- /dev/null +++ b/internal/reconcile/summarize/processor_test.go @@ -0,0 +1,91 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/fluxcd/pkg/apis/meta" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + "github.com/fluxcd/source-controller/internal/object" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +func TestRecordReconcileReq(t *testing.T) { + tests := []struct { + name string + beforeFunc func(obj client.Object) + afterFunc func(t *WithT, obj client.Object) + }{ + { + name: "no reconcile req", + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("")) + }, + }, + { + name: "no reconcile req, noop on existing value", + beforeFunc: func(obj client.Object) { + object.SetStatusLastHandledReconcileAt(obj, "zzz") + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("zzz")) + }, + }, + { + name: "with reconcile req", + beforeFunc: func(obj client.Object) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-obj", + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + RecordReconcileReq(ctx, record.NewFakeRecorder(32), obj, reconcile.ResultEmpty, nil) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + }) + } +} diff --git a/internal/reconcile/summarize/summary.go b/internal/reconcile/summarize/summary.go new file mode 100644 index 00000000..1c2f97aa --- /dev/null +++ b/internal/reconcile/summarize/summary.go @@ -0,0 +1,217 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + kerrors "k8s.io/apimachinery/pkg/util/errors" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// Conditions contains all the conditions information needed to summarize the +// target condition. +type Conditions struct { + // Target is the target condition, e.g.: Ready. + Target string + // Owned conditions are the conditions owned by the reconciler for this + // target condition. + Owned []string + // Summarize conditions are the conditions that the target condition depends + // on. + Summarize []string + // NegativePolarity conditions are the conditions in Summarize with negative + // polarity. + NegativePolarity []string +} + +// Helper is SummarizeAndPatch helper. +type Helper struct { + recorder kuberecorder.EventRecorder + patchHelper *patch.Helper +} + +// NewHelper returns an initialized Helper. +func NewHelper(recorder kuberecorder.EventRecorder, patchHelper *patch.Helper) *Helper { + return &Helper{ + recorder: recorder, + patchHelper: patchHelper, + } +} + +// HelperOptions contains options for SummarizeAndPatch. +// Summarizing and patching at the very end of a reconciliation involves +// computing the result of the reconciler. This requires providing the +// ReconcileResult, ReconcileError and a ResultBuilder in the context of the +// reconciliation. +// For using this to perform intermediate patching in the middle of a +// reconciliation, no ReconcileResult, ReconcileError or ResultBuilder should +// be provided. Only Conditions summary would be calculated and patched. +type HelperOptions struct { + // Conditions are conditions that needs to be summarized and persisted on + // the object. + Conditions []Conditions + // Processors are chain of ResultProcessors for processing the results. This + // can be used to analyze and modify the results. This enables injecting + // custom middlewares in the SummarizeAndPatch operation. + Processors []ResultProcessor + // IgnoreNotFound can be used to ignores any resource not found error during + // patching. + IgnoreNotFound bool + // ReconcileResult is the abstracted result of reconciliation. + ReconcileResult reconcile.Result + // ReconcileError is the reconciliation error. + ReconcileError error + // ResultBuilder defines how the reconciliation result is computed. + ResultBuilder reconcile.RuntimeResultBuilder + // PatchFieldOwner defines the field owner configuration for the Kubernetes + // patch operation. + PatchFieldOwner string +} + +// Option is configuration that modifies SummarizeAndPatch. +type Option func(*HelperOptions) + +// WithConditions sets the Conditions for which summary is calculated in +// SummarizeAndPatch. +func WithConditions(condns ...Conditions) Option { + return func(s *HelperOptions) { + s.Conditions = append(s.Conditions, condns...) + } +} + +// WithProcessors can be used to inject middlewares in the SummarizeAndPatch +// process, to be executed before the result calculation and patching. +func WithProcessors(rps ...ResultProcessor) Option { + return func(s *HelperOptions) { + s.Processors = append(s.Processors, rps...) + } +} + +// WithIgnoreNotFound skips any resource not found error during patching. +func WithIgnoreNotFound() Option { + return func(s *HelperOptions) { + s.IgnoreNotFound = true + } +} + +// WithResultBuilder sets the strategy for result computation in +// SummarizeAndPatch. +func WithResultBuilder(rb reconcile.RuntimeResultBuilder) Option { + return func(s *HelperOptions) { + s.ResultBuilder = rb + } +} + +// WithReconcileResult sets the value of input result used to calculate the +// results of reconciliation in SummarizeAndPatch. +func WithReconcileResult(rr reconcile.Result) Option { + return func(s *HelperOptions) { + s.ReconcileResult = rr + } +} + +// WithReconcileError sets the value of input error used to calculate the +// results reconciliation in SummarizeAndPatch. +func WithReconcileError(re error) Option { + return func(s *HelperOptions) { + s.ReconcileError = re + } +} + +// WithPatchFieldOwner sets the FieldOwner in the patch helper. +func WithPatchFieldOwner(fieldOwner string) Option { + return func(s *HelperOptions) { + s.PatchFieldOwner = fieldOwner + } +} + +// SummarizeAndPatch summarizes and patches the result to the target object. +// When used at the very end of a reconciliation, the result builder must be +// specified using the Option WithResultBuilder(). The returned result and error +// can be returned as the return values of the reconciliation. +// When used in the middle of a reconciliation, no result builder should be set +// and the result can be ignored. +func (h *Helper) SummarizeAndPatch(ctx context.Context, obj conditions.Setter, options ...Option) (ctrl.Result, error) { + // Calculate the options. + opts := &HelperOptions{} + for _, o := range options { + o(opts) + } + // Combined the owned conditions of all the conditions for the patcher. + ownedConditions := []string{} + for _, c := range opts.Conditions { + ownedConditions = append(ownedConditions, c.Owned...) + } + // Patch the object, prioritizing the conditions owned by the controller in + // case of any conflicts. + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: ownedConditions, + }, + } + if opts.PatchFieldOwner != "" { + patchOpts = append(patchOpts, patch.WithFieldOwner(opts.PatchFieldOwner)) + } + + // Process the results of reconciliation. + for _, processor := range opts.Processors { + processor(ctx, h.recorder, obj, opts.ReconcileResult, opts.ReconcileError) + } + + var result ctrl.Result + var recErr error + if opts.ResultBuilder != nil { + // Compute the reconcile results, obtain patch options and reconcile error. + var pOpts []patch.Option + pOpts, result, recErr = reconcile.ComputeReconcileResult(obj, opts.ReconcileResult, opts.ReconcileError, opts.ResultBuilder) + patchOpts = append(patchOpts, pOpts...) + } + + // Summarize conditions. This must be performed only after computing the + // reconcile result, since the object status is adjusted based on the + // reconcile result and error. + for _, c := range opts.Conditions { + conditions.SetSummary(obj, + c.Target, + conditions.WithConditions( + c.Summarize..., + ), + conditions.WithNegativePolarityConditions( + c.NegativePolarity..., + ), + ) + } + + // Finally, patch the resource. + if err := h.patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if opts.IgnoreNotFound && !obj.GetDeletionTimestamp().IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + + return result, recErr +} diff --git a/internal/reconcile/summarize/summary_test.go b/internal/reconcile/summarize/summary_test.go new file mode 100644 index 00000000..7d48ff49 --- /dev/null +++ b/internal/reconcile/summarize/summary_test.go @@ -0,0 +1,396 @@ +/* +Copyright 2022 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package summarize + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/darkowlzz/controller-check/status" + . "github.com/onsi/gomega" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + serror "github.com/fluxcd/source-controller/internal/error" + "github.com/fluxcd/source-controller/internal/reconcile" +) + +// This tests the scenario where SummarizeAndPatch is used at the very end of a +// reconciliation. +func TestSummarizeAndPatch(t *testing.T) { + var testReadyConditions = Conditions{ + Target: meta.ReadyCondition, + Owned: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + Summarize: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + NegativePolarity: []string{ + sourcev1.FetchFailedCondition, + sourcev1.ArtifactOutdatedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, + }, + } + var testFooConditions = Conditions{ + Target: "Foo", + Owned: []string{ + "Foo", + "AAA", + "BBB", + }, + Summarize: []string{ + "AAA", + "BBB", + }, + NegativePolarity: []string{ + "BBB", + }, + } + + tests := []struct { + name string + generation int64 + beforeFunc func(obj conditions.Setter) + result reconcile.Result + reconcileErr error + conditions []Conditions + wantErr bool + afterFunc func(t *WithT, obj client.Object) + assertConditions []metav1.Condition + }{ + // Success/Fail indicates if a reconciliation succeeded or failed. On + // a successful reconciliation, the object generation is expected to + // match the observed generation in the object status. + // All the cases have some Ready condition set, even if a test case is + // unrelated to the conditions, because it's neseccary for a valid + // status. + { + name: "Success, no extra conditions", + generation: 4, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + conditions: []Conditions{testReadyConditions}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(4)) + }, + }, + { + name: "Success, Ready=True", + generation: 5, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "created") + }, + conditions: []Conditions{testReadyConditions}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "created"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(5)) + }, + }, + { + name: "Success, removes reconciling for successful result", + generation: 2, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkReconciling(obj, "NewRevision", "new index version") + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "stored artifact") + }, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(2)) + }, + }, + { + name: "Success, record reconciliation request", + beforeFunc: func(obj conditions.Setter) { + annotations := map[string]string{ + meta.ReconcileRequestAnnotation: "now", + } + obj.SetAnnotations(annotations) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + }, + generation: 3, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultSuccess, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusLastHandledReconcileAt("now")) + t.Expect(obj).To(HaveStatusObservedGeneration(3)) + }, + }, + { + name: "Fail, with multiple conditions ArtifactOutdated=True,Reconciling=True", + generation: 7, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision") + conditions.MarkReconciling(obj, "NewRevision", "new index revision") + }, + conditions: []Conditions{testReadyConditions}, + reconcileErr: fmt.Errorf("failed to create dir"), + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).ToNot(HaveStatusObservedGeneration(7)) + }, + }, + { + name: "Success, with subreconciler stalled error", + generation: 9, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct client") + }, + conditions: []Conditions{testReadyConditions}, + reconcileErr: &serror.Stalling{Err: fmt.Errorf("some error"), Reason: "some reason"}, + wantErr: false, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, sourcev1.FetchFailedCondition, "failed to construct client"), + *conditions.TrueCondition(meta.StalledCondition, "some reason", "some error"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.FetchFailedCondition, "failed to construct client"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).To(HaveStatusObservedGeneration(9)) + }, + }, + { + name: "Fail, no error but requeue requested", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "test-msg") + }, + conditions: []Conditions{testReadyConditions}, + result: reconcile.ResultRequeue, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition(meta.ReadyCondition, meta.FailedReason, "test-msg"), + }, + afterFunc: func(t *WithT, obj client.Object) { + t.Expect(obj).ToNot(HaveStatusObservedGeneration(3)) + }, + }, + { + name: "Success, multiple conditions summary", + generation: 3, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "test-msg") + conditions.MarkTrue(obj, "AAA", "ZZZ", "zzz") // Positive polarity True. + conditions.MarkTrue(obj, "BBB", "YYY", "yyy") // Negative polarity True. + }, + conditions: []Conditions{testReadyConditions, testFooConditions}, + result: reconcile.ResultSuccess, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "test-msg"), + *conditions.FalseCondition("Foo", "YYY", "yyy"), // False summary. + *conditions.TrueCondition("BBB", "YYY", "yyy"), + *conditions.TrueCondition("AAA", "ZZZ", "zzz"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(sourcev1.AddToScheme(scheme)) + + builder := fakeclient.NewClientBuilder().WithScheme(scheme) + client := builder.Build() + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + Generation: tt.generation, + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: 5 * time.Second}, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + g.Expect(client.Create(ctx, obj)).To(Succeed()) + patchHelper, err := patch.NewHelper(obj, client) + g.Expect(err).ToNot(HaveOccurred()) + + summaryHelper := NewHelper(record.NewFakeRecorder(32), patchHelper) + summaryOpts := []Option{ + WithReconcileResult(tt.result), + WithReconcileError(tt.reconcileErr), + WithConditions(tt.conditions...), + WithIgnoreNotFound(), + WithProcessors(RecordContextualError, RecordReconcileReq), + WithResultBuilder(reconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.Spec.Interval.Duration}), + } + _, gotErr := summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) + g.Expect(gotErr != nil).To(Equal(tt.wantErr)) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj) + } + + // Check if the object status is valid as per kstatus. + condns := &status.Conditions{NegativePolarity: testReadyConditions.NegativePolarity} + checker := status.NewChecker(client, scheme, condns) + checker.CheckErr(ctx, obj) + }) + } +} + +// This tests the scenario where SummarizeAndPatch is used in the middle of +// reconciliation. +func TestSummarizeAndPatch_Intermediate(t *testing.T) { + var testStageAConditions = Conditions{ + Target: "StageA", + Owned: []string{"StageA", "A1", "A2", "A3"}, + Summarize: []string{"A1", "A2", "A3"}, + NegativePolarity: []string{"A3"}, + } + var testStageBConditions = Conditions{ + Target: "StageB", + Owned: []string{"StageB", "B1", "B2"}, + Summarize: []string{"B1", "B2"}, + NegativePolarity: []string{"B1"}, + } + + tests := []struct { + name string + conditions []Conditions + beforeFunc func(obj conditions.Setter) + assertConditions []metav1.Condition + }{ + { + name: "single Conditions, True summary", + conditions: []Conditions{testStageAConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A1", "ZZZ", "zzz") // Positive polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition("StageA", "ZZZ", "zzz"), // True summary. + *conditions.TrueCondition("A1", "ZZZ", "zzz"), + }, + }, + { + name: "single Conditions, False summary", + conditions: []Conditions{testStageAConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A1", "ZZZ", "zzz") // Positive polarity True. + conditions.MarkTrue(obj, "A3", "OOO", "ooo") // Negative polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "OOO", "ooo"), // False summary. + *conditions.TrueCondition("A3", "OOO", "ooo"), + *conditions.TrueCondition("A1", "ZZZ", "zzz"), + }, + }, + { + name: "multiple Conditions", + conditions: []Conditions{testStageAConditions, testStageBConditions}, + beforeFunc: func(obj conditions.Setter) { + conditions.MarkTrue(obj, "A3", "ZZZ", "zzz") // Negative polarity True. + conditions.MarkTrue(obj, "B2", "RRR", "rrr") // Positive polarity True. + }, + assertConditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "ZZZ", "zzz"), // False summary. + *conditions.TrueCondition("A3", "ZZZ", "zzz"), + *conditions.TrueCondition("StageB", "RRR", "rrr"), // True summary. + *conditions.TrueCondition("B2", "RRR", "rrr"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(sourcev1.AddToScheme(scheme)) + + builder := fakeclient.NewClientBuilder().WithScheme(scheme) + kclient := builder.Build() + + obj := &sourcev1.GitRepository{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + Spec: sourcev1.GitRepositorySpec{ + Interval: metav1.Duration{Duration: 5 * time.Second}, + }, + Status: sourcev1.GitRepositoryStatus{ + Conditions: []metav1.Condition{ + *conditions.FalseCondition("StageA", "QQQ", "qqq"), + }, + }, + } + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + ctx := context.TODO() + g.Expect(kclient.Create(ctx, obj)).To(Succeed()) + patchHelper, err := patch.NewHelper(obj, kclient) + g.Expect(err).ToNot(HaveOccurred()) + + summaryHelper := NewHelper(record.NewFakeRecorder(32), patchHelper) + summaryOpts := []Option{ + WithConditions(tt.conditions...), + } + _, err = summaryHelper.SummarizeAndPatch(ctx, obj, summaryOpts...) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} diff --git a/internal/util/temp.go b/internal/util/temp.go new file mode 100644 index 00000000..054b1280 --- /dev/null +++ b/internal/util/temp.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "os" + "path/filepath" + "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// TempDirForObj creates a new temporary directory in the directory dir +// in the format of 'Kind-Namespace-Name-*', and returns the +// pathname of the new directory. +func TempDirForObj(dir string, obj client.Object) (string, error) { + return os.MkdirTemp(dir, pattern(obj)) +} + +// TempPathForObj creates a temporary file path in the format of +// '/Kind-Namespace-Name-'. +// If the given dir is empty, os.TempDir is used as a default. +func TempPathForObj(dir, suffix string, obj client.Object) string { + if dir == "" { + dir = os.TempDir() + } + randBytes := make([]byte, 16) + rand.Read(randBytes) + return filepath.Join(dir, pattern(obj)+hex.EncodeToString(randBytes)+suffix) +} + +func pattern(obj client.Object) (p string) { + kind := strings.ToLower(obj.GetObjectKind().GroupVersionKind().Kind) + return fmt.Sprintf("%s-%s-%s-", kind, obj.GetNamespace(), obj.GetName()) +} diff --git a/internal/util/temp_test.go b/internal/util/temp_test.go new file mode 100644 index 00000000..2f98079c --- /dev/null +++ b/internal/util/temp_test.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "os" + "path/filepath" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestTempDirForObj(t *testing.T) { + g := NewWithT(t) + + got, err := TempDirForObj("", mockObj()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got).To(BeADirectory()) + defer os.RemoveAll(got) + + got2, err := TempDirForObj(got, mockObj()) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(got2).To(BeADirectory()) + defer os.RemoveAll(got2) + g.Expect(got2).To(ContainSubstring(got)) +} + +func TestTempPathForObj(t *testing.T) { + tests := []struct { + name string + dir string + suffix string + want string + }{ + { + name: "default", + want: filepath.Join(os.TempDir(), "secret-default-foo-"), + }, + { + name: "with directory", + dir: "/foo", + want: "/foo/secret-default-foo-", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got := TempPathForObj(tt.dir, tt.suffix, mockObj()) + g.Expect(got[:len(got)-32]).To(Equal(tt.want)) + }) + } +} + +func Test_pattern(t *testing.T) { + g := NewWithT(t) + g.Expect(pattern(mockObj())).To(Equal("secret-default-foo-")) +} + +func mockObj() client.Object { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + } +} diff --git a/main.go b/main.go index 67f00a92..19e6c35e 100644 --- a/main.go +++ b/main.go @@ -33,17 +33,16 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "github.com/fluxcd/pkg/runtime/client" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/controllers" "github.com/fluxcd/source-controller/internal/helm" // +kubebuilder:scaffold:imports @@ -114,6 +113,7 @@ func main() { clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) + flag.Parse() ctrl.SetLogger(logger.NewLogger(logOptions)) @@ -123,18 +123,6 @@ func main() { helm.MaxChartSize = helmChartLimit helm.MaxChartFileSize = helmChartFileLimit - var eventRecorder *events.Recorder - if eventsAddr != "" { - var err error - if eventRecorder, err = events.NewRecorder(eventsAddr, controllerName); err != nil { - setupLog.Error(err, "unable to create event recorder") - os.Exit(1) - } - } - - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) - watchNamespace := "" if !watchAllNamespaces { watchNamespace = os.Getenv("RUNTIME_NAMESPACE") @@ -163,18 +151,25 @@ func main() { probes.SetupChecks(mgr, setupLog) pprof.SetupHandlers(mgr, setupLog) + var eventRecorder *events.Recorder + if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) + } + + metricsH := helper.MustMakeMetrics(mgr) + if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) } storage := mustInitStorage(storagePath, storageAdvAddr, setupLog) if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, @@ -183,13 +178,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + Getters: getters, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -197,13 +191,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmChartReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + Metrics: metricsH, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -211,12 +204,11 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, + ControllerName: controllerName, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { diff --git a/pkg/gcp/gcp.go b/pkg/gcp/gcp.go index 9127fcde..f98e498c 100644 --- a/pkg/gcp/gcp.go +++ b/pkg/gcp/gcp.go @@ -73,7 +73,8 @@ func ValidateSecret(secret map[string][]byte, name string) error { func (c *GCPClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) if err == gcpstorage.ErrBucketNotExist { - return false, err + // Not returning error to be compatible with minio's API. + return false, nil } if err != nil { return false, err diff --git a/pkg/gcp/gcp_test.go b/pkg/gcp/gcp_test.go index 7f431a44..6c27accf 100644 --- a/pkg/gcp/gcp_test.go +++ b/pkg/gcp/gcp_test.go @@ -130,7 +130,7 @@ func TestBucketNotExists(t *testing.T) { Client: client, } exists, err := gcpClient.BucketExists(context.Background(), bucket) - assert.Error(t, err, gcpstorage.ErrBucketNotExist.Error()) + assert.NilError(t, err) assert.Assert(t, !exists) } diff --git a/pkg/git/options.go b/pkg/git/options.go index 64458f5e..9b186b39 100644 --- a/pkg/git/options.go +++ b/pkg/git/options.go @@ -129,3 +129,24 @@ func AuthOptionsFromSecret(URL string, secret *v1.Secret) (*AuthOptions, error) return opts, nil } + +// AuthOptionsWithoutSecret constructs a minimal AuthOptions object from the +// given URL and then validates the result. It returns the AuthOptions, or an +// error. +func AuthOptionsWithoutSecret(URL string) (*AuthOptions, error) { + u, err := url.Parse(URL) + if err != nil { + return nil, fmt.Errorf("failed to parse URL to determine auth strategy: %w", err) + } + + opts := &AuthOptions{ + Transport: TransportType(u.Scheme), + Host: u.Host, + } + + if err = opts.Validate(); err != nil { + return nil, err + } + + return opts, nil +} diff --git a/tests/fuzz/gitrepository_fuzzer.go b/tests/fuzz/gitrepository_fuzzer.go index 01c4cc94..a81ecdc4 100644 --- a/tests/fuzz/gitrepository_fuzzer.go +++ b/tests/fuzz/gitrepository_fuzzer.go @@ -60,7 +60,7 @@ import ( "github.com/fluxcd/pkg/gittestserver" "github.com/fluxcd/pkg/runtime/testenv" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/controllers" ) @@ -149,7 +149,6 @@ func ensureDependencies() error { startEnvServer(func(m manager.Manager) { utilruntime.Must((&controllers.GitRepositoryReconciler{ Client: m.GetClient(), - Scheme: scheme.Scheme, Storage: storage, }).SetupWithManager(m)) }) diff --git a/tests/fuzz/go.mod b/tests/fuzz/go.mod index 4a1e2499..d88f3ac5 100644 --- a/tests/fuzz/go.mod +++ b/tests/fuzz/go.mod @@ -1,3 +1,7 @@ module github.com/fluxcd/source-controller/tests/fuzz go 1.17 + +replace github.com/fluxcd/kustomize-controller/api => ../../api + +replace github.com/fluxcd/kustomize-controller => ../../