Merge pull request #1056 from fluxcd/gitrepository-v1

GA: Promote GitRepository API to `source.toolkit.fluxcd.io/v1`
This commit is contained in:
Hidde Beydals 2023-03-29 11:38:40 +02:00 committed by GitHub
commit 51dea22347
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
66 changed files with 4115 additions and 1314 deletions

View File

@ -117,7 +117,8 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc.
cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases"
api-docs: gen-crd-api-reference-docs ## Generate API reference documentation
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1beta2/source.md
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md
tidy: ## Run go mod tidy
cd api; rm -f go.sum; go mod tidy -compat=1.20

View File

@ -1,6 +1,9 @@
domain: toolkit.fluxcd.io
repo: github.com/fluxcd/source-controller
resources:
- group: source
kind: GitRepository
version: v1
- group: source
kind: GitRepository
version: v1beta2

View File

@ -4,8 +4,8 @@ go 1.18
require (
github.com/fluxcd/pkg/apis/acl v0.1.0
github.com/fluxcd/pkg/apis/meta v0.19.1
k8s.io/apimachinery v0.26.2
github.com/fluxcd/pkg/apis/meta v1.0.0
k8s.io/apimachinery v0.26.3
sigs.k8s.io/controller-runtime v0.14.5
)

View File

@ -3,8 +3,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fluxcd/pkg/apis/acl v0.1.0 h1:EoAl377hDQYL3WqanWCdifauXqXbMyFuK82NnX6pH4Q=
github.com/fluxcd/pkg/apis/acl v0.1.0/go.mod h1:zfEZzz169Oap034EsDhmCAGgnWlcWmIObZjYMusoXS8=
github.com/fluxcd/pkg/apis/meta v0.19.1 h1:fCI5CnTXpAqr67UlaI9q0H+OztMKB5kDTr6xV6vlAo0=
github.com/fluxcd/pkg/apis/meta v0.19.1/go.mod h1:ZPPMYrPnWwPQYNEGM/Uc0N4SurUPS3xNI3IIpCQEfuM=
github.com/fluxcd/pkg/apis/meta v1.0.0 h1:i9IGHd/VNEZELX7mepkiYFbJxs2J5znaB4cN9z2nPm8=
github.com/fluxcd/pkg/apis/meta v1.0.0/go.mod h1:04ZdpZYm1x+aL93K4daNHW1UX6E8K7Gyf5za9OhrE+U=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@ -74,8 +74,8 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ=
k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k=
k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=

93
api/v1/artifact_types.go Normal file
View File

@ -0,0 +1,93 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"path"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Artifact represents the output of a Source reconciliation.
type Artifact struct {
// Path is the relative file path of the Artifact. It can be used to locate
// the file in the root of the Artifact storage on the local file system of
// the controller managing the Source.
// +required
Path string `json:"path"`
// URL is the HTTP address of the Artifact as exposed by the controller
// managing the Source. It can be used to retrieve the Artifact for
// consumption, e.g. by another controller applying the Artifact contents.
// +required
URL string `json:"url"`
// Revision is a human-readable identifier traceable in the origin source
// system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
// +required
Revision string `json:"revision"`
// Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
// +optional
// +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
Digest string `json:"digest,omitempty"`
// LastUpdateTime is the timestamp corresponding to the last update of the
// Artifact.
// +required
LastUpdateTime metav1.Time `json:"lastUpdateTime"`
// Size is the number of bytes in the file.
// +optional
Size *int64 `json:"size,omitempty"`
// Metadata holds upstream information such as OCI annotations.
// +optional
Metadata map[string]string `json:"metadata,omitempty"`
}
// HasRevision returns if the given revision matches the current Revision of
// the Artifact.
func (in *Artifact) HasRevision(revision string) bool {
if in == nil {
return false
}
return in.Revision == revision
}
// HasDigest returns if the given digest matches the current Digest of the
// Artifact.
func (in *Artifact) HasDigest(digest string) bool {
if in == nil {
return false
}
return in.Digest == digest
}
// ArtifactDir returns the artifact dir path in the form of
// '<kind>/<namespace>/<name>'.
func ArtifactDir(kind, namespace, name string) string {
kind = strings.ToLower(kind)
return path.Join(kind, namespace, name)
}
// ArtifactPath returns the artifact path in the form of
// '<kind>/<namespace>/name>/<filename>'.
func ArtifactPath(kind, namespace, name, filename string) string {
return path.Join(ArtifactDir(kind, namespace, name), filename)
}

107
api/v1/condition_types.go Normal file
View File

@ -0,0 +1,107 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const SourceFinalizer = "finalizers.fluxcd.io"
const (
// ArtifactInStorageCondition indicates the availability of the Artifact in
// the storage.
// If True, the Artifact is stored successfully.
// This Condition is only present on the resource if the Artifact is
// successfully stored.
ArtifactInStorageCondition string = "ArtifactInStorage"
// ArtifactOutdatedCondition indicates the current Artifact of the Source
// is outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
ArtifactOutdatedCondition string = "ArtifactOutdated"
// SourceVerifiedCondition indicates the integrity verification of the
// Source.
// If True, the integrity check succeeded. If False, it failed.
// This Condition is only present on the resource if the integrity check
// is enabled.
SourceVerifiedCondition string = "SourceVerified"
// FetchFailedCondition indicates a transient or persistent fetch failure
// of an upstream Source.
// If True, observations on the upstream Source revision may be impossible,
// and the Artifact available for the Source may be outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
FetchFailedCondition string = "FetchFailed"
// BuildFailedCondition indicates a transient or persistent build failure
// of a Source's Artifact.
// If True, the Source can be in an ArtifactOutdatedCondition.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
BuildFailedCondition string = "BuildFailed"
// StorageOperationFailedCondition indicates a transient or persistent
// failure related to storage. If True, the reconciliation failed while
// performing some filesystem operation.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
StorageOperationFailedCondition string = "StorageOperationFailed"
)
// Reasons are provided as utility, and not part of the declarative API.
const (
// URLInvalidReason signals that a given Source has an invalid URL.
URLInvalidReason string = "URLInvalid"
// AuthenticationFailedReason signals that a Secret does not have the
// required fields, or the provided credentials do not match.
AuthenticationFailedReason string = "AuthenticationFailed"
// VerificationError signals that the Source's verification
// check failed.
VerificationError string = "VerificationError"
// DirCreationFailedReason signals a failure caused by a directory creation
// operation.
DirCreationFailedReason string = "DirectoryCreationFailed"
// StatOperationFailedReason signals a failure caused by a stat operation on
// a path.
StatOperationFailedReason string = "StatOperationFailed"
// ReadOperationFailedReason signals a failure caused by a read operation.
ReadOperationFailedReason string = "ReadOperationFailed"
// AcquireLockFailedReason signals a failure in acquiring lock.
AcquireLockFailedReason string = "AcquireLockFailed"
// InvalidPathReason signals a failure caused by an invalid path.
InvalidPathReason string = "InvalidPath"
// ArchiveOperationFailedReason signals a failure in archive operation.
ArchiveOperationFailedReason string = "ArchiveOperationFailed"
// SymlinkUpdateFailedReason signals a failure in updating a symlink.
SymlinkUpdateFailedReason string = "SymlinkUpdateFailed"
// ArtifactUpToDateReason signals that an existing Artifact is up-to-date
// with the Source.
ArtifactUpToDateReason string = "ArtifactUpToDate"
// CacheOperationFailedReason signals a failure in cache operation.
CacheOperationFailedReason string = "CacheOperationFailed"
)

20
api/v1/doc.go Normal file
View File

@ -0,0 +1,20 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 contains API Schema definitions for the source v1 API group
// +kubebuilder:object:generate=true
// +groupName=source.toolkit.fluxcd.io
package v1

View File

@ -0,0 +1,279 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// GitRepositoryKind is the string representation of a GitRepository.
GitRepositoryKind = "GitRepository"
)
const (
// IncludeUnavailableCondition indicates one of the includes is not
// available. For example, because it does not exist, or does not have an
// Artifact.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
IncludeUnavailableCondition string = "IncludeUnavailable"
)
// GitRepositorySpec specifies the required configuration to produce an
// Artifact for a Git repository.
type GitRepositorySpec struct {
// URL specifies the Git repository URL, it can be an HTTP/S or SSH address.
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$"
// +required
URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials for
// the GitRepository.
// For HTTPS repositories the Secret must contain 'username' and 'password'
// fields for basic auth or 'bearerToken' field for token auth.
// For SSH repositories the Secret must contain 'identity'
// and 'known_hosts' fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Interval at which to check the GitRepository for updates.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for Git operations like cloning, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Reference specifies the Git reference to resolve and monitor for
// changes, defaults to the 'master' branch.
// +optional
Reference *GitRepositoryRef `json:"ref,omitempty"`
// Verification specifies the configuration to verify the Git commit
// signature(s).
// +optional
Verification *GitRepositoryVerification `json:"verify,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// GitRepository.
// +optional
Suspend bool `json:"suspend,omitempty"`
// RecurseSubmodules enables the initialization of all submodules within
// the GitRepository as cloned from the URL, using their default settings.
// +optional
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
// Include specifies a list of GitRepository resources which Artifacts
// should be included in the Artifact produced for this GitRepository.
// +optional
Include []GitRepositoryInclude `json:"include,omitempty"`
}
// GitRepositoryInclude specifies a local reference to a GitRepository which
// Artifact (sub-)contents must be included, and where they should be placed.
type GitRepositoryInclude struct {
// GitRepositoryRef specifies the GitRepository which Artifact contents
// must be included.
// +required
GitRepositoryRef meta.LocalObjectReference `json:"repository"`
// FromPath specifies the path to copy contents from, defaults to the root
// of the Artifact.
// +optional
FromPath string `json:"fromPath,omitempty"`
// ToPath specifies the path to copy contents to, defaults to the name of
// the GitRepositoryRef.
// +optional
ToPath string `json:"toPath,omitempty"`
}
// GetFromPath returns the specified FromPath.
func (in *GitRepositoryInclude) GetFromPath() string {
return in.FromPath
}
// GetToPath returns the specified ToPath, falling back to the name of the
// GitRepositoryRef.
func (in *GitRepositoryInclude) GetToPath() string {
if in.ToPath == "" {
return in.GitRepositoryRef.Name
}
return in.ToPath
}
// GitRepositoryRef specifies the Git reference to resolve and checkout.
type GitRepositoryRef struct {
// Branch to check out, defaults to 'master' if no other field is defined.
// +optional
Branch string `json:"branch,omitempty"`
// Tag to check out, takes precedence over Branch.
// +optional
Tag string `json:"tag,omitempty"`
// SemVer tag expression to check out, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
//
// It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
// Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
// +optional
Name string `json:"name,omitempty"`
// Commit SHA to check out, takes precedence over all reference fields.
//
// This can be combined with Branch to shallow clone the branch, in which
// the commit is expected to exist.
// +optional
Commit string `json:"commit,omitempty"`
}
// GitRepositoryVerification specifies the Git commit signature verification
// strategy.
type GitRepositoryVerification struct {
// Mode specifies what Git object should be verified, currently ('head').
// +kubebuilder:validation:Enum=head
Mode string `json:"mode"`
// SecretRef specifies the Secret containing the public keys of trusted Git
// authors.
// +required
SecretRef meta.LocalObjectReference `json:"secretRef"`
}
// GitRepositoryStatus records the observed state of a Git repository.
type GitRepositoryStatus struct {
// ObservedGeneration is the last observed generation of the GitRepository
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the GitRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// Artifact represents the last successful GitRepository reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
// IncludedArtifacts contains a list of the last successfully included
// Artifacts as instructed by GitRepositorySpec.Include.
// +optional
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedRecurseSubmodules is the observed resource submodules
// configuration used to produce the current Artifact.
// +optional
ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
// ObservedInclude is the observed list of GitRepository resources used to
// produce the current Artifact.
// +optional
ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// GitOperationSucceedReason signals that a Git operation (e.g. clone,
// checkout, etc.) succeeded.
GitOperationSucceedReason string = "GitOperationSucceeded"
// GitOperationFailedReason signals that a Git operation (e.g. clone,
// checkout, etc.) failed.
GitOperationFailedReason string = "GitOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in GitRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *GitRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the GitRepository must be
// reconciled again.
func (in GitRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the GitRepository if present in
// the status sub-resource.
func (in *GitRepository) GetArtifact() *Artifact {
return in.Status.Artifact
}
// +genclient
// +genclient:Namespaced
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// GitRepository is the Schema for the gitrepositories API.
type GitRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GitRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status GitRepositoryStatus `json:"status,omitempty"`
}
// GitRepositoryList contains a list of GitRepository objects.
// +kubebuilder:object:root=true
type GitRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []GitRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{})
}

View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

45
api/v1/source.go Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// SourceIndexKey is the key used for indexing objects based on their
// referenced Source.
SourceIndexKey string = ".metadata.source"
)
// Source interface must be supported by all API types.
// Source is the interface that provides generic access to the Artifact and
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group.
//
// +k8s:deepcopy-gen=false
type Source interface {
runtime.Object
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
GetRequeueAfter() time.Duration
// GetArtifact returns the latest artifact from the source if present in
// the status sub-resource.
GetArtifact() *Artifact
}

View File

@ -0,0 +1,257 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/fluxcd/pkg/apis/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifact) DeepCopyInto(out *Artifact) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
if in.Size != nil {
in, out := &in.Size, &out.Size
*out = new(int64)
**out = **in
}
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
func (in *Artifact) DeepCopy() *Artifact {
if in == nil {
return nil
}
out := new(Artifact)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepository) DeepCopyInto(out *GitRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository.
func (in *GitRepository) DeepCopy() *GitRepository {
if in == nil {
return nil
}
out := new(GitRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) {
*out = *in
out.GitRepositoryRef = in.GitRepositoryRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude.
func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude {
if in == nil {
return nil
}
out := new(GitRepositoryInclude)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]GitRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList.
func (in *GitRepositoryList) DeepCopy() *GitRepositoryList {
if in == nil {
return nil
}
out := new(GitRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef.
func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef {
if in == nil {
return nil
}
out := new(GitRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(GitRepositoryRef)
**out = **in
}
if in.Verification != nil {
in, out := &in.Verification, &out.Verification
*out = new(GitRepositoryVerification)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec.
func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec {
if in == nil {
return nil
}
out := new(GitRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
if in.IncludedArtifacts != nil {
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
*out = make([]*Artifact, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
}
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedInclude != nil {
in, out := &in.ObservedInclude, &out.ObservedInclude
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus.
func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus {
if in == nil {
return nil
}
out := new(GitRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) {
*out = *in
out.SecretRef = in.SecretRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification.
func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification {
if in == nil {
return nil
}
out := new(GitRepositoryVerification)
in.DeepCopyInto(out)
return out
}

View File

@ -269,6 +269,7 @@ func (in *GitRepository) GetInterval() metav1.Duration {
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 GitRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""

View File

@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2022 The Flux authors
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -25,6 +25,9 @@ import (
)
// Artifact represents the output of a Source reconciliation.
//
// Deprecated: use Artifact from api/v1 instead. This type will be removed in
// a future release.
type Artifact struct {
// Path is the relative file path of the Artifact. It can be used to locate
// the file in the root of the Artifact storage on the local file system of

View File

@ -23,6 +23,7 @@ import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
@ -126,7 +127,7 @@ type BucketStatus struct {
// Artifact represents the last successful Bucket reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
@ -162,7 +163,7 @@ func (in Bucket) GetRequeueAfter() time.Duration {
}
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
func (in *Bucket) GetArtifact() *Artifact {
func (in *Bucket) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}

View File

@ -23,6 +23,8 @@ import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
@ -190,7 +192,7 @@ type GitRepositoryVerification struct {
// SecretRef specifies the Secret containing the public keys of trusted Git
// authors.
SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"`
SecretRef meta.LocalObjectReference `json:"secretRef"`
}
// GitRepositoryStatus records the observed state of a Git repository.
@ -212,12 +214,12 @@ type GitRepositoryStatus struct {
// Artifact represents the last successful GitRepository reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
// IncludedArtifacts contains a list of the last successfully included
// Artifacts as instructed by GitRepositorySpec.Include.
// +optional
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
IncludedArtifacts []*apiv1.Artifact `json:"includedArtifacts,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
@ -280,16 +282,16 @@ func (in GitRepository) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest Artifact from the GitRepository if present in
// the status sub-resource.
func (in *GitRepository) GetArtifact() *Artifact {
func (in *GitRepository) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}
// +genclient
// +genclient:Namespaced
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 GitRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""

View File

@ -23,6 +23,7 @@ import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
// HelmChartKind is the string representation of a HelmChart.
@ -151,7 +152,7 @@ type HelmChartStatus struct {
// Artifact represents the output of the last successful reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
@ -184,7 +185,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmChart) GetArtifact() *Artifact {
func (in *HelmChart) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}

View File

@ -23,6 +23,7 @@ import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
@ -124,7 +125,7 @@ type HelmRepositoryStatus struct {
// Artifact represents the last successful HelmRepository reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
@ -153,7 +154,7 @@ func (in HelmRepository) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmRepository) GetArtifact() *Artifact {
func (in *HelmRepository) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}

View File

@ -22,6 +22,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
@ -201,7 +202,7 @@ type OCIRepositoryStatus struct {
// Artifact represents the output of the last successful OCI Repository sync.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
@ -256,7 +257,7 @@ func (in OCIRepository) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest Artifact from the OCIRepository if present in
// the status sub-resource.
func (in *OCIRepository) GetArtifact() *Artifact {
func (in *OCIRepository) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}

View File

@ -33,6 +33,9 @@ const (
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group.
//
// Deprecated: use the Source interface from api/v1 instead. This type will be
// removed in a future release.
//
// +k8s:deepcopy-gen=false
type Source interface {
runtime.Object

View File

@ -2,7 +2,7 @@
// +build !ignore_autogenerated
/*
Copyright 2022 The Flux authors
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -24,6 +24,7 @@ package v1beta2
import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@ -163,7 +164,7 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
@ -337,16 +338,16 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
if in.IncludedArtifacts != nil {
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
*out = make([]*Artifact, len(*in))
*out = make([]*apiv1.Artifact, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Artifact)
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
}
@ -493,7 +494,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
@ -611,7 +612,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
@ -794,7 +795,7 @@ func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {

View File

@ -376,10 +376,6 @@ spec:
artifact:
description: Artifact represents the last successful Bucket reconciliation.
properties:
checksum:
description: 'Checksum is the SHA256 checksum of the Artifact
file. Deprecated: use Artifact.Digest instead.'
type: string
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
@ -415,7 +411,9 @@ spec:
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:

View File

@ -17,6 +17,391 @@ spec:
singular: gitrepository
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
description: GitRepository is the Schema for the gitrepositories API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: GitRepositorySpec specifies the required configuration to
produce an Artifact for a Git repository.
properties:
ignore:
description: Ignore overrides the set of excluded patterns in the
.sourceignore format (which is the same as .gitignore). If not provided,
a default will be used, consult the documentation for your version
to find out what those are.
type: string
include:
description: Include specifies a list of GitRepository resources which
Artifacts should be included in the Artifact produced for this GitRepository.
items:
description: GitRepositoryInclude specifies a local reference to
a GitRepository which Artifact (sub-)contents must be included,
and where they should be placed.
properties:
fromPath:
description: FromPath specifies the path to copy contents from,
defaults to the root of the Artifact.
type: string
repository:
description: GitRepositoryRef specifies the GitRepository which
Artifact contents must be included.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
toPath:
description: ToPath specifies the path to copy contents to,
defaults to the name of the GitRepositoryRef.
type: string
required:
- repository
type: object
type: array
interval:
description: Interval at which to check the GitRepository for updates.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
recurseSubmodules:
description: RecurseSubmodules enables the initialization of all submodules
within the GitRepository as cloned from the URL, using their default
settings.
type: boolean
ref:
description: Reference specifies the Git reference to resolve and
monitor for changes, defaults to the 'master' branch.
properties:
branch:
description: Branch to check out, defaults to 'master' if no other
field is defined.
type: string
commit:
description: "Commit SHA to check out, takes precedence over all
reference fields. \n This can be combined with Branch to shallow
clone the branch, in which the commit is expected to exist."
type: string
name:
description: "Name of the reference to check out; takes precedence
over Branch, Tag and SemVer. \n It must be a valid Git reference:
https://git-scm.com/docs/git-check-ref-format#_description Examples:
\"refs/heads/main\", \"refs/tags/v0.1.0\", \"refs/pull/420/head\",
\"refs/merge-requests/1/head\""
type: string
semver:
description: SemVer tag expression to check out, takes precedence
over Tag.
type: string
tag:
description: Tag to check out, takes precedence over Branch.
type: string
type: object
secretRef:
description: SecretRef specifies the Secret containing authentication
credentials for the GitRepository. For HTTPS repositories the Secret
must contain 'username' and 'password' fields for basic auth or
'bearerToken' field for token auth. For SSH repositories the Secret
must contain 'identity' and 'known_hosts' fields.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
suspend:
description: Suspend tells the controller to suspend the reconciliation
of this GitRepository.
type: boolean
timeout:
default: 60s
description: Timeout for Git operations like cloning, defaults to
60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
description: URL specifies the Git repository URL, it can be an HTTP/S
or SSH address.
pattern: ^(http|https|ssh)://.*$
type: string
verify:
description: Verification specifies the configuration to verify the
Git commit signature(s).
properties:
mode:
description: Mode specifies what Git object should be verified,
currently ('head').
enum:
- head
type: string
secretRef:
description: SecretRef specifies the Secret containing the public
keys of trusted Git authors.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- mode
- secretRef
type: object
required:
- interval
- url
type: object
status:
default:
observedGeneration: -1
description: GitRepositoryStatus records the observed state of a Git repository.
properties:
artifact:
description: Artifact represents the last successful GitRepository
reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: LastUpdateTime is the timestamp corresponding to
the last update of the Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: Path is the relative file path of the Artifact. It
can be used to locate the file in the root of the Artifact storage
on the local file system of the controller managing the Source.
type: string
revision:
description: Revision is a human-readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git
tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: URL is the HTTP address of the Artifact as exposed
by the controller managing the Source. It can be used to retrieve
the Artifact for consumption, e.g. by another controller applying
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the GitRepository.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
\n type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are: \"Available\",
\"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
// +listType=map // +listMapKey=type Conditions []metav1.Condition
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
includedArtifacts:
description: IncludedArtifacts contains a list of the last successfully
included Artifacts as instructed by GitRepositorySpec.Include.
items:
description: Artifact represents the output of a Source reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of
'<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: LastUpdateTime is the timestamp corresponding to
the last update of the Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI
annotations.
type: object
path:
description: Path is the relative file path of the Artifact.
It can be used to locate the file in the root of the Artifact
storage on the local file system of the controller managing
the Source.
type: string
revision:
description: Revision is a human-readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git
tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: URL is the HTTP address of the Artifact as exposed
by the controller managing the Source. It can be used to retrieve
the Artifact for consumption, e.g. by another controller applying
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
type: array
lastHandledReconcileAt:
description: LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value can
be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation of
the GitRepository object.
format: int64
type: integer
observedIgnore:
description: ObservedIgnore is the observed exclusion patterns used
for constructing the source artifact.
type: string
observedInclude:
description: ObservedInclude is the observed list of GitRepository
resources used to produce the current Artifact.
items:
description: GitRepositoryInclude specifies a local reference to
a GitRepository which Artifact (sub-)contents must be included,
and where they should be placed.
properties:
fromPath:
description: FromPath specifies the path to copy contents from,
defaults to the root of the Artifact.
type: string
repository:
description: GitRepositoryRef specifies the GitRepository which
Artifact contents must be included.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
toPath:
description: ToPath specifies the path to copy contents to,
defaults to the name of the GitRepositoryRef.
type: string
required:
- repository
type: object
type: array
observedRecurseSubmodules:
description: ObservedRecurseSubmodules is the observed resource submodules
configuration used to produce the current Artifact.
type: boolean
type: object
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
@ -30,6 +415,8 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: v1beta1 GitRepository is deprecated, upgrade to v1
name: v1beta1
schema:
openAPIV3Schema:
@ -360,6 +747,8 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
deprecated: true
deprecationWarning: v1beta2 GitRepository is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
@ -542,6 +931,7 @@ spec:
type: object
required:
- mode
- secretRef
type: object
required:
- interval
@ -556,10 +946,6 @@ spec:
description: Artifact represents the last successful GitRepository
reconciliation.
properties:
checksum:
description: 'Checksum is the SHA256 checksum of the Artifact
file. Deprecated: use Artifact.Digest instead.'
type: string
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
@ -595,7 +981,9 @@ spec:
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
@ -683,10 +1071,6 @@ spec:
items:
description: Artifact represents the output of a Source reconciliation.
properties:
checksum:
description: 'Checksum is the SHA256 checksum of the Artifact
file. Deprecated: use Artifact.Digest instead.'
type: string
digest:
description: Digest is the digest of the file in the form of
'<algorithm>:<checksum>'.
@ -725,7 +1109,9 @@ spec:
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
type: array
@ -785,6 +1171,6 @@ spec:
type: object
type: object
served: true
storage: true
storage: false
subresources:
status: {}

View File

@ -451,10 +451,6 @@ spec:
description: Artifact represents the output of the last successful
reconciliation.
properties:
checksum:
description: 'Checksum is the SHA256 checksum of the Artifact
file. Deprecated: use Artifact.Digest instead.'
type: string
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
@ -490,7 +486,9 @@ spec:
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:

View File

@ -368,10 +368,6 @@ spec:
description: Artifact represents the last successful HelmRepository
reconciliation.
properties:
checksum:
description: 'Checksum is the SHA256 checksum of the Artifact
file. Deprecated: use Artifact.Digest instead.'
type: string
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
@ -407,7 +403,9 @@ spec:
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:

View File

@ -194,10 +194,6 @@ spec:
description: Artifact represents the output of the last successful
OCI Repository sync.
properties:
checksum:
description: 'Checksum is the SHA256 checksum of the Artifact
file. Deprecated: use Artifact.Digest instead.'
type: string
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
@ -233,7 +229,9 @@ spec:
the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1beta2
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: gitrepository-sample

View File

@ -16,7 +16,7 @@ limitations under the License.
package controllers
import sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
import sourcev1 "github.com/fluxcd/source-controller/api/v1"
type artifactSet []*sourcev1.Artifact

View File

@ -19,7 +19,7 @@ package controllers
import (
"fmt"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
)
@ -51,9 +51,6 @@ func (m matchArtifact) Match(actual interface{}) (success bool, err error) {
if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok {
return ok, err
}
if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok {
return ok, err
}
if ok, err = Equal(m.expected.Size).Match(actualArtifact.Size); !ok {
return ok, err
}

View File

@ -49,7 +49,8 @@ import (
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
"github.com/fluxcd/pkg/sourceignore"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
bucketv1 "github.com/fluxcd/source-controller/api/v1beta2"
intdigest "github.com/fluxcd/source-controller/internal/digest"
serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/index"
@ -155,7 +156,7 @@ type BucketProvider interface {
// bucketReconcileFunc is the function type for all the v1beta2.Bucket
// (sub)reconcile functions. The type implementations are grouped and
// executed serially to perform the complete reconcile of the object.
type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error)
type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error)
func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{})
@ -166,7 +167,7 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc
recoverPanic := true
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.Bucket{}).
For(&bucketv1.Bucket{}).
WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})).
WithOptions(controller.Options{
MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
@ -181,7 +182,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
log := ctrl.LoggerFrom(ctx)
// Fetch the Bucket
obj := &sourcev1.Bucket{}
obj := &bucketv1.Bucket{}
if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
@ -251,7 +252,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
// reconcile iterates through the bucketReconcileFunc tasks for the
// object. It returns early on the first call that returns
// reconcile.ResultRequeue, or produces an error.
func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) {
func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) {
oldObj := obj.DeepCopy()
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress")
@ -322,27 +323,19 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche
}
// notify emits notification related to the reconciliation.
func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) {
func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *bucketv1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any
// failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
annotations := map[string]string{
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision,
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaChecksumKey): newObj.Status.Artifact.Checksum,
}
if newObj.Status.Artifact.Digest != "" {
annotations[sourcev1.GroupVersion.Group+"/"+eventv1.MetaDigestKey] = newObj.Status.Artifact.Digest
}
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest,
}
message := fmt.Sprintf("stored artifact with %d fetched files from '%s' bucket", index.Len(), newObj.Spec.BucketName)
// Notify on new artifact and failure recovery.
if oldChecksum != newObj.GetArtifact().Checksum {
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
"NewArtifact", message)
ctrl.LoggerFrom(ctx).Info(message)
@ -368,7 +361,7 @@ func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.
// condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime.
func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) {
func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj)
@ -409,7 +402,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria
// When a SecretRef is defined, it attempts to fetch the Secret before calling
// the provider. If this fails, it records v1beta2.FetchFailedCondition=True on
// the object and returns early.
func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) {
func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) {
secret, err := r.getBucketSecret(ctx, obj)
if err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
@ -421,7 +414,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
// Construct provider client
var provider BucketProvider
switch obj.Spec.Provider {
case sourcev1.GoogleBucketProvider:
case bucketv1.GoogleBucketProvider:
if err = gcp.ValidateSecret(secret); err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
@ -432,7 +425,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
case sourcev1.AzureBucketProvider:
case bucketv1.AzureBucketProvider:
if err = azure.ValidateSecret(secret); err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
@ -458,7 +451,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
// Fetch etag index
if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason}
e := &serror.Event{Err: err, Reason: bucketv1.BucketOperationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
@ -466,7 +459,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
// Check if index has changed compared to current Artifact revision.
var changed bool
if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" {
curRev := digest.Digest(sourcev1.TransformLegacyRevision(artifact.Revision))
curRev := digest.Digest(artifact.Revision)
changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm())
}
@ -490,7 +483,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
}()
if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil {
e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason}
e := &serror.Event{Err: err, Reason: bucketv1.BucketOperationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e
}
@ -509,7 +502,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
// early.
// On a successful archive, the Artifact in the Status of the object is set,
// and the symlink in the Storage is updated to its path.
func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) {
func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) {
// Calculate revision
revision := index.Digest(intdigest.Canonical)
@ -519,7 +512,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri
// Set the ArtifactInStorageCondition if there's no drift.
defer func() {
if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" {
curRev := digest.Digest(sourcev1.TransformLegacyRevision(curArtifact.Revision))
curRev := digest.Digest(curArtifact.Revision)
if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev {
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
@ -530,7 +523,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri
// The artifact is up-to-date
if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" {
curRev := digest.Digest(sourcev1.TransformLegacyRevision(curArtifact.Revision))
curRev := digest.Digest(curArtifact.Revision)
if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
return sreconcile.ResultSuccess, nil
@ -602,7 +595,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri
// reconcileDelete handles the deletion of the object.
// It first garbage collects all Artifacts for the object from the Storage.
// Removing the finalizer from the object if successful.
func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) {
func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *bucketv1.Bucket) (sreconcile.Result, error) {
// Garbage collect the resource's artifacts
if err := r.garbageCollect(ctx, obj); err != nil {
// Return the error so we retry the failed garbage collection
@ -621,7 +614,7 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu
// It removes all but the current Artifact from the Storage, unless the
// deletion timestamp on the object is set. Which will result in the
// removal of all Artifacts for the objects.
func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error {
func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *bucketv1.Bucket) error {
if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return &serror.Event{
@ -654,7 +647,7 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc
// getBucketSecret attempts to fetch the Secret reference if specified on the
// obj. It returns any client error.
func (r *BucketReconciler) getBucketSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) {
func (r *BucketReconciler) getBucketSecret(ctx context.Context, obj *bucketv1.Bucket) (*corev1.Secret, error) {
if obj.Spec.SecretRef == nil {
return nil, nil
}
@ -699,7 +692,7 @@ func (r *BucketReconciler) annotatedEventLogf(ctx context.Context,
// bucket using the given provider, while filtering them using .sourceignore
// rules. After fetching an object, the etag value in the index is updated to
// the current value to ensure accuracy.
func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error {
func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *bucketv1.Bucket, index *index.Digester, tempDir string) error {
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()
@ -753,7 +746,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.
// using the given provider, and stores them into tempDir. It downloads in
// parallel, but limited to the maxConcurrentBucketFetches.
// Given an index is provided, the bucket is assumed to exist.
func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error {
func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *bucketv1.Bucket, index *index.Digester, tempDir string) error {
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()

View File

@ -42,7 +42,8 @@ import (
conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check"
"github.com/fluxcd/pkg/runtime/patch"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
bucketv1 "github.com/fluxcd/source-controller/api/v1beta2"
intdigest "github.com/fluxcd/source-controller/internal/digest"
"github.com/fluxcd/source-controller/internal/index"
gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs"
@ -86,12 +87,12 @@ func TestBucketReconciler_Reconcile(t *testing.T) {
g.Expect(testEnv.Create(ctx, secret)).To(Succeed())
defer testEnv.Delete(ctx, secret)
origObj := &sourcev1.Bucket{
origObj := &bucketv1.Bucket{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "bucket-reconcile-",
Namespace: "default",
},
Spec: sourcev1.BucketSpec{
Spec: bucketv1.BucketSpec{
Provider: "generic",
BucketName: s3Server.BucketName,
Endpoint: u.Host,
@ -159,7 +160,7 @@ func TestBucketReconciler_Reconcile(t *testing.T) {
func TestBucketReconciler_reconcileStorage(t *testing.T) {
tests := []struct {
name string
beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error
beforeFunc func(obj *bucketv1.Bucket, storage *Storage) error
want sreconcile.Result
wantErr bool
assertArtifact *sourcev1.Artifact
@ -168,7 +169,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) {
}{
{
name: "garbage collects",
beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error {
beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error {
revisions := []string{"a", "b", "c", "d"}
for n := range revisions {
v := revisions[n]
@ -193,7 +194,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) {
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/d.txt",
Revision: "d",
Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
URL: testStorage.Hostname + "/reconcile-storage/d.txt",
Size: int64p(int64(len("d"))),
},
@ -218,7 +219,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) {
},
{
name: "notices missing artifact in storage",
beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error {
beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error {
obj.Status.Artifact = &sourcev1.Artifact{
Path: fmt.Sprintf("/reconcile-storage/invalid.txt"),
Revision: "d",
@ -237,11 +238,11 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) {
},
{
name: "updates hostname on diff from current",
beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error {
beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error {
obj.Status.Artifact = &sourcev1.Artifact{
Path: fmt.Sprintf("/reconcile-storage/hostname.txt"),
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: "http://outdated.com/reconcile-storage/hostname.txt",
}
if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
@ -260,7 +261,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) {
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/hostname.txt",
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
Size: int64p(int64(len("file"))),
},
@ -284,7 +285,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) {
patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"),
}
obj := &sourcev1.Bucket{
obj := &bucketv1.Bucket{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-",
Generation: 1,
@ -335,7 +336,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
bucketObjects []*s3mock.Object
middleware http.Handler
secret *corev1.Secret
beforeFunc func(obj *sourcev1.Bucket)
beforeFunc func(obj *bucketv1.Bucket)
want sreconcile.Result
wantErr bool
assertIndex *index.Digester
@ -369,7 +370,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
{
name: "Observes non-existing secretRef",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.SecretRef = &meta.LocalObjectReference{
Name: "dummy",
}
@ -392,7 +393,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
Name: "dummy",
},
},
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.SecretRef = &meta.LocalObjectReference{
Name: "dummy",
}
@ -410,7 +411,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
{
name: "Observes non-existing bucket name",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.BucketName = "invalid"
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -418,14 +419,14 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
wantErr: true,
assertIndex: index.NewDigester(),
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"),
*conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "bucket 'invalid' not found"),
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
},
{
name: "Transient bucket name API failure",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.Endpoint = "transient.example.com"
obj.Spec.BucketName = "unavailable"
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
@ -434,7 +435,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
wantErr: true,
assertIndex: index.NewDigester(),
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"),
*conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"),
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
@ -474,7 +475,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
{
name: "spec.ignore overrides .sourceignore",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
ignore := "!ignored/file.txt"
obj.Spec.Ignore = &ignore
},
@ -511,9 +512,9 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
{
name: "Up-to-date artifact",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{
Revision: "b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479",
Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479",
}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -538,8 +539,8 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
{
name: "Removes FetchFailedCondition after reconciling source",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file")
beforeFunc: func(obj *bucketv1.Bucket) {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to read test file")
},
bucketObjects: []*s3mock.Object{
{
@ -569,7 +570,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
LastModified: time.Now(),
},
},
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "some-path",
Revision: "some-rev",
@ -602,15 +603,15 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
}
tmpDir := t.TempDir()
obj := &sourcev1.Bucket{
obj := &bucketv1.Bucket{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.BucketKind,
Kind: bucketv1.BucketKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-bucket",
Generation: 1,
},
Spec: sourcev1.BucketSpec{
Spec: bucketv1.BucketSpec{
Timeout: &metav1.Duration{Duration: timeout},
},
}
@ -663,7 +664,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
bucketName string
bucketObjects []*gcsmock.Object
secret *corev1.Secret
beforeFunc func(obj *sourcev1.Bucket)
beforeFunc func(obj *bucketv1.Bucket)
want sreconcile.Result
wantErr bool
assertIndex *index.Digester
@ -690,7 +691,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
"serviceaccount": []byte("testsa"),
},
},
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.SecretRef = &meta.LocalObjectReference{
Name: "dummy",
}
@ -707,7 +708,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
{
name: "Observes non-existing secretRef",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.SecretRef = &meta.LocalObjectReference{
Name: "dummy",
}
@ -731,7 +732,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
Name: "dummy",
},
},
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.SecretRef = &meta.LocalObjectReference{
Name: "dummy",
}
@ -750,7 +751,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
{
name: "Observes non-existing bucket name",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.BucketName = "invalid"
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -759,14 +760,14 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
wantErr: true,
assertIndex: index.NewDigester(),
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"),
*conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "bucket 'invalid' not found"),
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
},
{
name: "Transient bucket name API failure",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Spec.Endpoint = "transient.example.com"
obj.Spec.BucketName = "unavailable"
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
@ -776,7 +777,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
wantErr: true,
assertIndex: index.NewDigester(),
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"),
*conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"),
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
@ -816,7 +817,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
{
name: "spec.ignore overrides .sourceignore",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
ignore := "!ignored/file.txt"
obj.Spec.Ignore = &ignore
},
@ -853,9 +854,9 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
{
name: "Up-to-date artifact",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{
Revision: "b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479",
Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479",
}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -880,8 +881,8 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
{
name: "Removes FetchFailedCondition after reconciling source",
bucketName: "dummy",
beforeFunc: func(obj *sourcev1.Bucket) {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file")
beforeFunc: func(obj *bucketv1.Bucket) {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to read test file")
},
bucketObjects: []*gcsmock.Object{
{
@ -911,7 +912,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
Generation: 3,
},
},
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "some-path",
Revision: "some-rev",
@ -946,18 +947,18 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
tmpDir := t.TempDir()
// Test bucket object.
obj := &sourcev1.Bucket{
obj := &bucketv1.Bucket{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.BucketKind,
Kind: bucketv1.BucketKind,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-bucket",
Generation: 1,
},
Spec: sourcev1.BucketSpec{
Spec: bucketv1.BucketSpec{
BucketName: tt.bucketName,
Timeout: &metav1.Duration{Duration: timeout},
Provider: sourcev1.GoogleBucketProvider,
Provider: bucketv1.GoogleBucketProvider,
},
}
@ -1007,15 +1008,15 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
func TestBucketReconciler_reconcileArtifact(t *testing.T) {
tests := []struct {
name string
beforeFunc func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string)
afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string)
beforeFunc func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string)
afterFunc func(t *WithT, obj *bucketv1.Bucket, dir string)
want sreconcile.Result
wantErr bool
assertConditions []metav1.Condition
}{
{
name: "Archiving artifact to storage makes ArtifactInStorage=True",
beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) {
beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -1029,7 +1030,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Up-to-date artifact should not persist and update status",
beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) {
beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) {
revision := index.Digest(intdigest.Canonical)
obj.Spec.Interval = metav1.Duration{Duration: interval}
// Incomplete artifact
@ -1037,7 +1038,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
},
afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) {
afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) {
// Still incomplete
t.Expect(obj.Status.URL).To(BeEmpty())
},
@ -1050,7 +1051,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Removes ArtifactOutdatedCondition after creating a new artifact",
beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) {
beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
@ -1065,12 +1066,12 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Creates latest symlink to the created artifact",
beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) {
beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
},
afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) {
afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) {
localPath := testStorage.LocalPath(*obj.GetArtifact())
symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz")
targetFile, err := os.Readlink(symlinkPath)
@ -1086,7 +1087,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Dir path deleted",
beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) {
beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) {
t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred())
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -1101,7 +1102,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Dir path is not a directory",
beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) {
beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) {
// Remove the given directory and create a file for the same
// path.
t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred())
@ -1111,7 +1112,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
},
afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) {
afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) {
t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred())
},
want: sreconcile.ResultEmpty,
@ -1137,16 +1138,16 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
tmpDir := t.TempDir()
obj := &sourcev1.Bucket{
obj := &bucketv1.Bucket{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.BucketKind,
Kind: bucketv1.BucketKind,
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-bucket-",
Generation: 1,
Namespace: "default",
},
Spec: sourcev1.BucketSpec{
Spec: bucketv1.BucketSpec{
Timeout: &metav1.Duration{Duration: timeout},
},
}
@ -1186,12 +1187,12 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) {
func TestBucketReconciler_statusConditions(t *testing.T) {
tests := []struct {
name string
beforeFunc func(obj *sourcev1.Bucket)
beforeFunc func(obj *bucketv1.Bucket)
assertConditions []metav1.Condition
}{
{
name: "positive conditions only",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
},
assertConditions: []metav1.Condition{
@ -1201,7 +1202,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) {
},
{
name: "multiple failures",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error")
@ -1215,7 +1216,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) {
},
{
name: "mixed positive and negative conditions",
beforeFunc: func(obj *sourcev1.Bucket) {
beforeFunc: func(obj *bucketv1.Bucket) {
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
},
@ -1231,9 +1232,9 @@ func TestBucketReconciler_statusConditions(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
obj := &sourcev1.Bucket{
obj := &bucketv1.Bucket{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.BucketKind,
Kind: bucketv1.BucketKind,
APIVersion: "source.toolkit.fluxcd.io/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
@ -1278,8 +1279,8 @@ func TestBucketReconciler_notify(t *testing.T) {
name string
res sreconcile.Result
resErr error
oldObjBeforeFunc func(obj *sourcev1.Bucket)
newObjBeforeFunc func(obj *sourcev1.Bucket)
oldObjBeforeFunc func(obj *bucketv1.Bucket)
newObjBeforeFunc func(obj *bucketv1.Bucket)
wantEvent string
}{
{
@ -1291,8 +1292,8 @@ func TestBucketReconciler_notify(t *testing.T) {
name: "new artifact",
res: sreconcile.ResultSuccess,
resErr: nil,
newObjBeforeFunc: func(obj *sourcev1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
newObjBeforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
},
wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from",
},
@ -1300,13 +1301,13 @@ func TestBucketReconciler_notify(t *testing.T) {
name: "recovery from failure",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
oldObjBeforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
newObjBeforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal Succeeded stored artifact with 2 fetched files from",
@ -1315,13 +1316,13 @@ func TestBucketReconciler_notify(t *testing.T) {
name: "recovery and new artifact",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
oldObjBeforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"}
newObjBeforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from",
@ -1330,12 +1331,12 @@ func TestBucketReconciler_notify(t *testing.T) {
name: "no updates",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
oldObjBeforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
newObjBeforeFunc: func(obj *sourcev1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
newObjBeforeFunc: func(obj *bucketv1.Bucket) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
},
@ -1347,8 +1348,8 @@ func TestBucketReconciler_notify(t *testing.T) {
recorder := record.NewFakeRecorder(32)
oldObj := &sourcev1.Bucket{
Spec: sourcev1.BucketSpec{
oldObj := &bucketv1.Bucket{
Spec: bucketv1.BucketSpec{
BucketName: "test-bucket",
},
}

View File

@ -53,7 +53,8 @@ import (
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
"github.com/fluxcd/pkg/sourceignore"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/features"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
@ -325,15 +326,7 @@ func (r *GitRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *so
if r.shouldNotify(oldObj, newObj, res, resErr) {
annotations := map[string]string{
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision,
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaChecksumKey): newObj.Status.Artifact.Checksum,
}
if newObj.Status.Artifact.Digest != "" {
annotations[sourcev1.GroupVersion.Group+"/"+eventv1.MetaDigestKey] = newObj.Status.Artifact.Digest
}
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest,
}
// A partial commit due to no-op clone doesn't contain the commit
@ -346,7 +339,7 @@ func (r *GitRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *so
}
// Notify on new artifact and failure recovery.
if oldChecksum != newObj.GetArtifact().Checksum {
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
"NewArtifact", message)
ctrl.LoggerFrom(ctx).Info(message)
@ -389,8 +382,8 @@ func (r *GitRepositoryReconciler) shouldNotify(oldObj, newObj *sourcev1.GitRepos
// it is removed from the object.
// If the object does not have an Artifact in its Status, a Reconciling
// condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime.
// The hostname of the Artifact in the Status of the object is updated, to
// ensure it matches the Storage server hostname of current runtime.
func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage
@ -400,7 +393,6 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc
var artifactMissing bool
if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
obj.Status.Artifact = nil
obj.Status.URL = ""
artifactMissing = true
// Remove the condition as the artifact doesn't exist.
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
@ -423,7 +415,6 @@ func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc
// Always update URLs to ensure hostname is up-to-date
// TODO(hidde): we may want to send out an event only if we notice the URL has changed
r.Storage.SetArtifactURL(obj.GetArtifact())
obj.Status.URL = r.Storage.SetHostname(obj.Status.URL)
return sreconcile.ResultSuccess, nil
}
@ -616,8 +607,7 @@ func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
// Source ignore patterns are loaded, and the given directory is archived while
// taking these patterns into account.
// On a successful archive, the Artifact, Includes, observed ignore, recurse
// submodules and observed include in the Status of the object are set, and the
// symlink in the Storage is updated to its path.
// submodules and observed include in the Status of the object are set.
func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
@ -704,20 +694,23 @@ func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat
// Record the observations on the object.
obj.Status.Artifact = artifact.DeepCopy()
obj.Status.IncludedArtifacts = *includes
obj.Status.ContentConfigChecksum = "" // To be removed in the next API version.
obj.Status.ObservedIgnore = obj.Spec.Ignore
obj.Status.ObservedRecurseSubmodules = obj.Spec.RecurseSubmodules
obj.Status.ObservedInclude = obj.Spec.Include
// Update symlink on a "best effort" basis
url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
if err != nil {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason,
"failed to update status URL symlink: %s", err)
}
if url != "" {
obj.Status.URL = url
// Remove the deprecated symlink.
// TODO(hidde): remove 2 minor versions from introduction of v1.
symArtifact := artifact.DeepCopy()
symArtifact.Path = filepath.Join(filepath.Dir(symArtifact.Path), "latest.tar.gz")
if fi, err := os.Lstat(r.Storage.LocalPath(artifact)); err == nil {
if fi.Mode()&os.ModeSymlink != 0 {
if err := os.Remove(r.Storage.LocalPath(*symArtifact)); err != nil {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason,
"failed to remove (deprecated) symlink: %s", err)
}
}
}
conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
return sreconcile.ResultSuccess, nil
}
@ -1019,7 +1012,7 @@ func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet)
observedInclArtifact := obj.Status.IncludedArtifacts[index]
currentIncl := artifacts[index]
// Check if the include are the same in spec and status.
// Check if include is the same in spec and status.
if !gitRepositoryIncludeEqual(incl, observedIncl) {
return true
}
@ -1028,7 +1021,7 @@ func gitContentConfigChanged(obj *sourcev1.GitRepository, includes *artifactSet)
if !observedInclArtifact.HasRevision(currentIncl.Revision) {
return true
}
if observedInclArtifact.Checksum != currentIncl.Checksum {
if !observedInclArtifact.HasDigest(currentIncl.Digest) {
return true
}
}

View File

@ -62,7 +62,7 @@ import (
"github.com/fluxcd/pkg/runtime/controller"
"github.com/fluxcd/pkg/runtime/testenv"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
)
var (

View File

@ -56,7 +56,7 @@ import (
"github.com/fluxcd/pkg/testserver"
"github.com/fluxcd/pkg/git"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/features"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
@ -732,27 +732,6 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
wantRevision: "staging@sha1:<commit>",
wantReconciling: false,
},
{
name: "Optimized clone (legacy revision format)",
reference: &sourcev1.GitRepositoryRef{
Branch: "staging",
},
beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) {
// Add existing artifact on the object and storage.
obj.Status = sourcev1.GitRepositoryStatus{
Artifact: &sourcev1.Artifact{
Revision: "staging/" + latestRev,
Path: randStringRunes(10),
},
}
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo")
},
want: sreconcile.ResultEmpty,
wantErr: true,
wantRevision: "staging@sha1:<commit>",
wantReconciling: false,
},
{
name: "Optimized clone different ignore",
reference: &sourcev1.GitRepositoryRef{
@ -775,28 +754,6 @@ func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T)
wantRevision: "staging@sha1:<commit>",
wantReconciling: false,
},
{
name: "Optimized clone different ignore (legacy revision format)",
reference: &sourcev1.GitRepositoryRef{
Branch: "staging",
},
beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) {
// Set new ignore value.
obj.Spec.Ignore = pointer.StringPtr("foo")
// Add existing artifact on the object and storage.
obj.Status = sourcev1.GitRepositoryStatus{
Artifact: &sourcev1.Artifact{
Revision: "staging/" + latestRev,
Path: randStringRunes(10),
},
}
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "foo")
},
want: sreconcile.ResultSuccess,
wantRevision: "staging@sha1:<commit>",
wantReconciling: false,
},
}
server, err := gittestserver.NewTempGitServer()
@ -907,7 +864,6 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.GetArtifact()).ToNot(BeNil())
t.Expect(obj.Status.URL).ToNot(BeEmpty())
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
@ -926,9 +882,8 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.GetArtifact()).ToNot(BeNil())
t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172"))
t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172"))
t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty())
t.Expect(obj.Status.URL).ToNot(BeEmpty())
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
@ -938,46 +893,21 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
{
name: "Up-to-date artifact should not update status",
dir: "testdata/git/repository",
includes: artifactSet{&sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Checksum: "some-checksum"}},
includes: artifactSet{&sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}},
beforeFunc: func(obj *sourcev1.GitRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
obj.Spec.Include = []sourcev1.GitRepositoryInclude{
{GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}},
}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}
obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Checksum: "some-checksum"}}
obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Digest: "some-checksum"}}
obj.Status.ObservedInclude = obj.Spec.Include
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.Status.URL).To(BeEmpty())
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"),
},
},
{
name: "Up-to-date artifact with legacy revision format should not update status",
dir: "testdata/git/repository",
includes: artifactSet{&sourcev1.Artifact{Revision: "main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Checksum: "some-checksum"}},
beforeFunc: func(obj *sourcev1.GitRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
obj.Spec.Include = []sourcev1.GitRepositoryInclude{
{GitRepositoryRef: meta.LocalObjectReference{Name: "foo"}},
}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"}
obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/b9b3feadba509cb9b22e968a5d27e96c2bc2ff91", Checksum: "some-checksum"}}
obj.Status.ObservedInclude = obj.Spec.Include
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.Status.URL).To(BeEmpty())
t.Expect(obj.Status.Artifact.Revision).To(Equal("main/b9b3feadba509cb9b22e968a5d27e96c2bc2ff91"))
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"),
},
},
{
name: "Spec ignore overwrite is taken into account",
dir: "testdata/git/repository",
@ -987,7 +917,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.GetArtifact()).ToNot(BeNil())
t.Expect(obj.GetArtifact().Checksum).To(Equal("11f7f007dce5619bd79e6c57688261058d09f5271e802463ac39f2b9ead7cabd"))
t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:11f7f007dce5619bd79e6c57688261058d09f5271e802463ac39f2b9ead7cabd"))
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
@ -1002,7 +932,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.GetArtifact()).ToNot(BeNil())
t.Expect(obj.GetArtifact().Checksum).To(Equal("29186e024dde5a414cfc990829c6b2e85f6b3bd2d950f50ca9f418f5d2261d79"))
t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:29186e024dde5a414cfc990829c6b2e85f6b3bd2d950f50ca9f418f5d2261d79"))
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
@ -1018,28 +948,7 @@ func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.GetArtifact()).ToNot(BeNil())
t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172"))
t.Expect(obj.Status.URL).ToNot(BeEmpty())
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main@sha1:b9b3feadba509cb9b22e968a5d27e96c2bc2ff91'"),
},
},
{
name: "Creates latest symlink to the created artifact",
dir: "testdata/git/repository",
beforeFunc: func(obj *sourcev1.GitRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
},
afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
t.Expect(obj.GetArtifact()).ToNot(BeNil())
localPath := testStorage.LocalPath(*obj.GetArtifact())
symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz")
targetFile, err := os.Readlink(symlinkPath)
t.Expect(err).NotTo(HaveOccurred())
t.Expect(localPath).To(Equal(targetFile))
t.Expect(obj.GetArtifact().Digest).To(Equal("sha256:60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172"))
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
@ -1333,7 +1242,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) {
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/d.txt",
Revision: "d",
Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
URL: testStorage.Hostname + "/reconcile-storage/d.txt",
Size: int64p(int64(len("d"))),
},
@ -1381,7 +1290,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "/reconcile-storage/hostname.txt",
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: "http://outdated.com/reconcile-storage/hostname.txt",
}
if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
@ -1400,7 +1309,7 @@ func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) {
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/hostname.txt",
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
Size: int64p(int64(len("file"))),
},
@ -1740,10 +1649,9 @@ func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) {
Finalizers: []string{sourcev1.SourceFinalizer},
},
Spec: sourcev1.GitRepositorySpec{
URL: server.HTTPAddress() + repoPath,
GitImplementation: sourcev1.GoGitImplementation,
Interval: metav1.Duration{Duration: interval},
Timeout: &metav1.Duration{Duration: timeout},
URL: server.HTTPAddress() + repoPath,
Interval: metav1.Duration{Duration: interval},
Timeout: &metav1.Duration{Duration: timeout},
},
}
@ -2052,7 +1960,7 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
res: sreconcile.ResultSuccess,
resErr: nil,
newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
},
commit: concreteCommit,
wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'",
@ -2062,12 +1970,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
commit: concreteCommit,
@ -2078,12 +1986,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
commit: concreteCommit,
@ -2094,11 +2002,11 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
},
@ -2107,12 +2015,12 @@ func TestGitRepositoryReconciler_notify(t *testing.T) {
res: sreconcile.ResultEmpty,
resErr: noopErr,
oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
commit: partialCommit, // no-op will always result in partial commit.
@ -2484,11 +2392,11 @@ func TestGitContentConfigChanged(t *testing.T) {
ToPath: "baz",
},
},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Checksum: "bbb"}},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}},
},
},
artifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "aaa", Digest: "bbb"},
},
want: false,
},
@ -2512,16 +2420,16 @@ func TestGitContentConfigChanged(t *testing.T) {
ToPath: "baz",
},
},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Checksum: "bbb"}},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}},
},
},
artifacts: []*sourcev1.Artifact{
{Revision: "ccc", Checksum: "bbb"},
{Revision: "ccc", Digest: "bbb"},
},
want: true,
},
{
name: "observed include but different artifact checksum",
name: "observed include but different artifact digest",
obj: sourcev1.GitRepository{
Spec: sourcev1.GitRepositorySpec{
Include: []sourcev1.GitRepositoryInclude{
@ -2540,11 +2448,11 @@ func TestGitContentConfigChanged(t *testing.T) {
ToPath: "baz",
},
},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Checksum: "bbb"}},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}},
},
},
artifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "ddd"},
{Revision: "aaa", Digest: "ddd"},
},
want: true,
},
@ -2568,11 +2476,11 @@ func TestGitContentConfigChanged(t *testing.T) {
ToPath: "baz",
},
},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Checksum: "bbb"}},
IncludedArtifacts: []*sourcev1.Artifact{{Revision: "aaa", Digest: "bbb"}},
},
},
artifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "aaa", Digest: "bbb"},
},
want: true,
},
@ -2595,14 +2503,14 @@ func TestGitContentConfigChanged(t *testing.T) {
},
Status: sourcev1.GitRepositoryStatus{
IncludedArtifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "ccc", Checksum: "ccc"},
{Revision: "aaa", Digest: "bbb"},
{Revision: "ccc", Digest: "ccc"},
},
},
},
artifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "ccc", Checksum: "ddd"},
{Revision: "aaa", Digest: "bbb"},
{Revision: "ccc", Digest: "ddd"},
},
want: true,
},
@ -2637,13 +2545,13 @@ func TestGitContentConfigChanged(t *testing.T) {
},
},
IncludedArtifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "ccc", Checksum: "ccc"},
{Revision: "aaa", Digest: "bbb"},
{Revision: "ccc", Digest: "ccc"},
},
},
},
artifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "aaa", Digest: "bbb"},
},
want: true,
},
@ -2678,13 +2586,13 @@ func TestGitContentConfigChanged(t *testing.T) {
},
},
IncludedArtifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "aaa", Digest: "bbb"},
},
},
},
artifacts: []*sourcev1.Artifact{
{Revision: "aaa", Checksum: "bbb"},
{Revision: "ccc", Checksum: "ccc"},
{Revision: "aaa", Digest: "bbb"},
{Revision: "ccc", Digest: "ccc"},
},
want: true,
},

View File

@ -63,7 +63,8 @@ import (
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
"github.com/fluxcd/pkg/untar"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
helmv1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/cache"
serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/helm/chart"
@ -153,27 +154,27 @@ type HelmChartReconcilerOptions struct {
// helmChartReconcileFunc is the function type for all the v1beta2.HelmChart
// (sub)reconcile functions. The type implementations are grouped and
// executed serially to perform the complete reconcile of the object.
type helmChartReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error)
type helmChartReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, build *chart.Build) (sreconcile.Result, error)
func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmChartReconcilerOptions) error {
r.patchOptions = getPatchOptions(helmChartReadyCondition.Owned, r.ControllerName)
if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey,
if err := mgr.GetCache().IndexField(context.TODO(), &helmv1.HelmRepository{}, helmv1.HelmRepositoryURLIndexKey,
r.indexHelmRepositoryByURL); err != nil {
return fmt.Errorf("failed setting index fields: %w", err)
}
if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmChart{}, sourcev1.SourceIndexKey,
if err := mgr.GetCache().IndexField(context.TODO(), &helmv1.HelmChart{}, sourcev1.SourceIndexKey,
r.indexHelmChartBySource); err != nil {
return fmt.Errorf("failed setting index fields: %w", err)
}
recoverPanic := true
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.HelmChart{}, builder.WithPredicates(
For(&helmv1.HelmChart{}, builder.WithPredicates(
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
)).
Watches(
&source.Kind{Type: &sourcev1.HelmRepository{}},
&source.Kind{Type: &helmv1.HelmRepository{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForHelmRepositoryChange),
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
@ -183,7 +184,7 @@ func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
Watches(
&source.Kind{Type: &sourcev1.Bucket{}},
&source.Kind{Type: &helmv1.Bucket{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForBucketChange),
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
@ -200,7 +201,7 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
log := ctrl.LoggerFrom(ctx)
// Fetch the HelmChart
obj := &sourcev1.HelmChart{}
obj := &helmv1.HelmChart{}
if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
@ -272,7 +273,7 @@ func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// reconcile iterates through the helmChartReconcileFunc tasks for the
// object. It returns early on the first call that returns
// reconcile.ResultRequeue, or produces an error.
func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, reconcilers []helmChartReconcileFunc) (sreconcile.Result, error) {
func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, reconcilers []helmChartReconcileFunc) (sreconcile.Result, error) {
oldObj := obj.DeepCopy()
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress")
@ -325,25 +326,17 @@ func (r *HelmChartReconciler) reconcile(ctx context.Context, sp *patch.SerialPat
}
// notify emits notification related to the reconciliation.
func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmChart, build *chart.Build, res sreconcile.Result, resErr error) {
func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *helmv1.HelmChart, build *chart.Build, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any
// failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
annotations := map[string]string{
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision,
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaChecksumKey): newObj.Status.Artifact.Checksum,
}
if newObj.Status.Artifact.Digest != "" {
annotations[sourcev1.GroupVersion.Group+"/"+eventv1.MetaDigestKey] = newObj.Status.Artifact.Digest
}
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest,
}
// Notify on new artifact and failure recovery.
if oldChecksum != newObj.GetArtifact().Checksum {
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
reasonForBuild(build), build.Summary())
ctrl.LoggerFrom(ctx).Info(build.Summary())
@ -369,7 +362,7 @@ func (r *HelmChartReconciler) notify(ctx context.Context, oldObj, newObj *source
// condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime.
func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (sreconcile.Result, error) {
func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, build *chart.Build) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj)
@ -405,7 +398,7 @@ func (r *HelmChartReconciler) reconcileStorage(ctx context.Context, sp *patch.Se
return sreconcile.ResultSuccess, nil
}
func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) {
func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmChart, build *chart.Build) (_ sreconcile.Result, retErr error) {
// Remove any failed verification condition.
// The reason is that a failing verification should be recalculated.
if conditions.IsFalse(obj, sourcev1.SourceVerifiedCondition) {
@ -435,7 +428,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser
// Assert source has an artifact
if s.GetArtifact() == nil || !r.Storage.ArtifactExist(*s.GetArtifact()) {
// Set the condition to indicate that the source has no artifact for all types except OCI HelmRepository
if helmRepo, ok := s.(*sourcev1.HelmRepository); !ok || helmRepo.Spec.Type != sourcev1.HelmRepositoryTypeOCI {
if helmRepo, ok := s.(*helmv1.HelmRepository); !ok || helmRepo.Spec.Type != helmv1.HelmRepositoryTypeOCI {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "NoSourceArtifact",
"no artifact available for %s source '%s'", obj.Spec.SourceRef.Kind, obj.Spec.SourceRef.Name)
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "NoSourceArtifact",
@ -482,9 +475,9 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser
// Perform the build for the chart source type
switch typedSource := s.(type) {
case *sourcev1.HelmRepository:
case *helmv1.HelmRepository:
return r.buildFromHelmRepository(ctx, obj, typedSource, build)
case *sourcev1.GitRepository, *sourcev1.Bucket:
case *sourcev1.GitRepository, *helmv1.Bucket:
return r.buildFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build)
default:
// Ending up here should generally not be possible
@ -498,8 +491,8 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser
// objects.
// In case of a failure it records v1beta2.FetchFailedCondition on the chart
// object, and returns early.
func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *sourcev1.HelmChart,
repo *sourcev1.HelmRepository, b *chart.Build) (sreconcile.Result, error) {
func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *helmv1.HelmChart,
repo *helmv1.HelmRepository, b *chart.Build) (sreconcile.Result, error) {
var (
tlsConfig *tls.Config
authenticator authn.Authenticator
@ -555,7 +548,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *
// Requeue as content of secret might change
return sreconcile.ResultEmpty, e
}
} else if repo.Spec.Provider != sourcev1.GenericOCIProvider && repo.Spec.Type == sourcev1.HelmRepositoryTypeOCI {
} else if repo.Spec.Provider != helmv1.GenericOCIProvider && repo.Spec.Type == helmv1.HelmRepositoryTypeOCI {
auth, authErr := oidcAuth(ctxTimeout, repo.Spec.URL, repo.Spec.Provider)
if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) {
e := &serror.Event{
@ -583,7 +576,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *
// Initialize the chart repository
var chartRepo repository.Downloader
switch repo.Spec.Type {
case sourcev1.HelmRepositoryTypeOCI:
case helmv1.HelmRepositoryTypeOCI:
if !helmreg.IsOCI(normalizedURL) {
err := fmt.Errorf("invalid OCI registry URL: %s", normalizedURL)
return chartRepoConfigErrorReturn(err, obj)
@ -725,7 +718,7 @@ func (r *HelmChartReconciler) buildFromHelmRepository(ctx context.Context, obj *
// v1beta2.Artifact.
// In case of a failure it records v1beta2.FetchFailedCondition on the chart
// object, and returns early.
func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *sourcev1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) {
func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj *helmv1.HelmChart, source sourcev1.Artifact, b *chart.Build) (sreconcile.Result, error) {
// Create temporary working directory
tmpDir, err := util.TempDirForObj("", obj)
if err != nil {
@ -795,17 +788,17 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj
}
// Configure revision metadata for chart build if we should react to revision changes
if obj.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision {
if obj.Spec.ReconcileStrategy == helmv1.ReconcileStrategyRevision {
rev := source.Revision
if obj.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind {
rev = git.ExtractHashFromRevision(rev).String()
}
if obj.Spec.SourceRef.Kind == sourcev1.BucketKind {
if dig := digest.Digest(sourcev1.TransformLegacyRevision(rev)); dig.Validate() == nil {
rev = dig.Hex()
if obj.Spec.SourceRef.Kind == helmv1.BucketKind {
if dig := digest.Digest(rev); dig.Validate() == nil {
rev = dig.Encoded()
}
}
if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind {
if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == helmv1.BucketKind {
// The SemVer from the metadata is at times used in e.g. the label metadata for a resource
// in a chart, which has a limited length of 63 characters.
// To not fill most of this space with a full length SHA hex (40 characters for SHA-1, and
@ -852,7 +845,7 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj
// early.
// On a successful archive, the Artifact in the Status of the object is set,
// and the symlink in the Storage is updated to its path.
func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.SerialPatcher, obj *sourcev1.HelmChart, b *chart.Build) (sreconcile.Result, error) {
func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.SerialPatcher, obj *helmv1.HelmChart, b *chart.Build) (sreconcile.Result, error) {
// Without a complete chart build, there is little to reconcile
if !b.Complete() {
return sreconcile.ResultRequeue, nil
@ -927,15 +920,15 @@ func (r *HelmChartReconciler) reconcileArtifact(ctx context.Context, _ *patch.Se
// getSource returns the v1beta1.Source for the given object, or an error describing why the source could not be
// returned.
func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmChart) (sourcev1.Source, error) {
func (r *HelmChartReconciler) getSource(ctx context.Context, obj *helmv1.HelmChart) (sourcev1.Source, error) {
namespacedName := types.NamespacedName{
Namespace: obj.GetNamespace(),
Name: obj.Spec.SourceRef.Name,
}
var s sourcev1.Source
switch obj.Spec.SourceRef.Kind {
case sourcev1.HelmRepositoryKind:
var repo sourcev1.HelmRepository
case helmv1.HelmRepositoryKind:
var repo helmv1.HelmRepository
if err := r.Client.Get(ctx, namespacedName, &repo); err != nil {
return nil, err
}
@ -946,15 +939,15 @@ func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmC
return nil, err
}
s = &repo
case sourcev1.BucketKind:
var bucket sourcev1.Bucket
case helmv1.BucketKind:
var bucket helmv1.Bucket
if err := r.Client.Get(ctx, namespacedName, &bucket); err != nil {
return nil, err
}
s = &bucket
default:
return nil, fmt.Errorf("unsupported source kind '%s', must be one of: %v", obj.Spec.SourceRef.Kind, []string{
sourcev1.HelmRepositoryKind, sourcev1.GitRepositoryKind, sourcev1.BucketKind})
helmv1.HelmRepositoryKind, sourcev1.GitRepositoryKind, helmv1.BucketKind})
}
return s, nil
}
@ -962,7 +955,7 @@ func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmC
// reconcileDelete handles the deletion of the object.
// It first garbage collects all Artifacts for the object from the Storage.
// Removing the finalizer from the object if successful.
func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmChart) (sreconcile.Result, error) {
func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *helmv1.HelmChart) (sreconcile.Result, error) {
// Garbage collect the resource's artifacts
if err := r.garbageCollect(ctx, obj); err != nil {
// Return the error so we retry the failed garbage collection
@ -981,7 +974,7 @@ func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, obj *sourcev1
// It removes all but the current Artifact from the Storage, unless the
// deletion timestamp on the object is set. Which will result in the
// removal of all Artifacts for the objects.
func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmChart) error {
func (r *HelmChartReconciler) garbageCollect(ctx context.Context, obj *helmv1.HelmChart) error {
if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return &serror.Event{
@ -1034,8 +1027,8 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont
if apierrs.ReasonForError(err) != metav1.StatusReasonUnknown {
return nil, err
}
obj = &sourcev1.HelmRepository{
Spec: sourcev1.HelmRepositorySpec{
obj = &helmv1.HelmRepository{
Spec: helmv1.HelmRepositorySpec{
URL: url,
Timeout: &metav1.Duration{Duration: 60 * time.Second},
},
@ -1070,7 +1063,7 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont
return nil, fmt.Errorf("failed to create login options for HelmRepository '%s': %w", obj.Name, err)
}
} else if obj.Spec.Provider != sourcev1.GenericOCIProvider && obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI {
} else if obj.Spec.Provider != helmv1.GenericOCIProvider && obj.Spec.Type == helmv1.HelmRepositoryTypeOCI {
auth, authErr := oidcAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider)
if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) {
return nil, fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr)
@ -1155,13 +1148,13 @@ func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Cont
}
}
func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) {
func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*helmv1.HelmRepository, error) {
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url},
client.MatchingFields{helmv1.HelmRepositoryURLIndexKey: url},
client.Limit(1),
}
var list sourcev1.HelmRepositoryList
var list helmv1.HelmRepositoryList
err := r.Client.List(ctx, &list, listOpts...)
if err != nil {
return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err)
@ -1186,7 +1179,7 @@ func (r *HelmChartReconciler) clientOptionsFromSecret(secret *corev1.Secret, nor
return opts, tlsConfig, nil
}
func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) {
func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *helmv1.HelmRepository) (*corev1.Secret, error) {
if repository.Spec.SecretRef == nil {
return nil, nil
}
@ -1203,7 +1196,7 @@ func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repos
}
func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string {
repo, ok := o.(*sourcev1.HelmRepository)
repo, ok := o.(*helmv1.HelmRepository)
if !ok {
panic(fmt.Sprintf("Expected a HelmRepository, got %T", o))
}
@ -1215,7 +1208,7 @@ func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string
}
func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string {
hc, ok := o.(*sourcev1.HelmChart)
hc, ok := o.(*helmv1.HelmChart)
if !ok {
panic(fmt.Sprintf("Expected a HelmChart, got %T", o))
}
@ -1223,7 +1216,7 @@ func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string {
}
func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) []reconcile.Request {
repo, ok := o.(*sourcev1.HelmRepository)
repo, ok := o.(*helmv1.HelmRepository)
if !ok {
panic(fmt.Sprintf("Expected a HelmRepository, got %T", o))
}
@ -1233,9 +1226,9 @@ func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) [
}
ctx := context.Background()
var list sourcev1.HelmChartList
var list helmv1.HelmChartList
if err := r.List(ctx, &list, client.MatchingFields{
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.HelmRepositoryKind, repo.Name),
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", helmv1.HelmRepositoryKind, repo.Name),
}); err != nil {
return nil
}
@ -1260,7 +1253,7 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) []
return nil
}
var list sourcev1.HelmChartList
var list helmv1.HelmChartList
if err := r.List(context.TODO(), &list, client.MatchingFields{
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.GitRepositoryKind, repo.Name),
}); err != nil {
@ -1277,7 +1270,7 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) []
}
func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconcile.Request {
bucket, ok := o.(*sourcev1.Bucket)
bucket, ok := o.(*helmv1.Bucket)
if !ok {
panic(fmt.Sprintf("Expected a Bucket, got %T", o))
}
@ -1287,9 +1280,9 @@ func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconci
return nil
}
var list sourcev1.HelmChartList
var list helmv1.HelmChartList
if err := r.List(context.TODO(), &list, client.MatchingFields{
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.BucketKind, bucket.Name),
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", helmv1.BucketKind, bucket.Name),
}); err != nil {
return nil
}
@ -1320,7 +1313,7 @@ func (r *HelmChartReconciler) eventLogf(ctx context.Context, obj runtime.Object,
}
// observeChartBuild records the observation on the given given build and error on the object.
func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []patch.Option, obj *sourcev1.HelmChart, build *chart.Build, err error) {
func observeChartBuild(ctx context.Context, sp *patch.SerialPatcher, pOpts []patch.Option, obj *helmv1.HelmChart, build *chart.Build, err error) {
if build.HasMetadata() {
if build.Name != obj.Status.ObservedChartName || !obj.GetArtifact().HasRevision(build.Version) {
if obj.GetArtifact() != nil {
@ -1373,12 +1366,12 @@ func reasonForBuild(build *chart.Build) string {
return ""
}
if build.Packaged {
return sourcev1.ChartPackageSucceededReason
return helmv1.ChartPackageSucceededReason
}
return sourcev1.ChartPullSucceededReason
return helmv1.ChartPullSucceededReason
}
func chartRepoConfigErrorReturn(err error, obj *sourcev1.HelmChart) (sreconcile.Result, error) {
func chartRepoConfigErrorReturn(err error, obj *helmv1.HelmChart) (sreconcile.Result, error) {
switch err.(type) {
case *url.Error:
e := &serror.Stalling{
@ -1398,7 +1391,7 @@ func chartRepoConfigErrorReturn(err error, obj *sourcev1.HelmChart) (sreconcile.
}
// makeVerifiers returns a list of verifiers for the given chart.
func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *sourcev1.HelmChart, auth authn.Authenticator, keychain authn.Keychain) ([]soci.Verifier, error) {
func (r *HelmChartReconciler) makeVerifiers(ctx context.Context, obj *helmv1.HelmChart, auth authn.Authenticator, keychain authn.Keychain) ([]soci.Verifier, error) {
var verifiers []soci.Verifier
verifyOpts := []remote.Option{}
if auth != nil {

File diff suppressed because it is too large Load Diff

View File

@ -46,7 +46,8 @@ import (
"github.com/fluxcd/pkg/runtime/predicates"
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
helmv1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/cache"
intdigest "github.com/fluxcd/source-controller/internal/digest"
serror "github.com/fluxcd/source-controller/internal/error"
@ -125,7 +126,7 @@ type HelmRepositoryReconcilerOptions struct {
// v1beta2.HelmRepository (sub)reconcile functions. The type implementations
// are grouped and executed serially to perform the complete reconcile of the
// object.
type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error)
type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error)
func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{})
@ -136,11 +137,11 @@ func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager,
recoverPanic := true
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.HelmRepository{}).
For(&helmv1.HelmRepository{}).
WithEventFilter(
predicate.And(
predicate.Or(
intpredicates.HelmRepositoryTypePredicate{RepositoryType: sourcev1.HelmRepositoryTypeDefault},
intpredicates.HelmRepositoryTypePredicate{RepositoryType: helmv1.HelmRepositoryTypeDefault},
intpredicates.HelmRepositoryTypePredicate{RepositoryType: ""},
),
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
@ -159,7 +160,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
log := ctrl.LoggerFrom(ctx)
// Fetch the HelmRepository
obj := &sourcev1.HelmRepository{}
obj := &helmv1.HelmRepository{}
if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
@ -206,7 +207,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// Examine if the object is under deletion
// or if a type change has happened
if !obj.ObjectMeta.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) {
if !obj.ObjectMeta.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != helmv1.HelmRepositoryTypeDefault) {
recResult, retErr = r.reconcileDelete(ctx, obj)
return
}
@ -232,7 +233,7 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// object. It returns early on the first call that returns
// reconcile.ResultRequeue, or produces an error.
func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) {
obj *helmv1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) {
oldObj := obj.DeepCopy()
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress")
@ -285,16 +286,13 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri
}
// notify emits notification related to the reconciliation.
func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) {
func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *helmv1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any
// failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
annotations := map[string]string{
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision,
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaChecksumKey): newObj.Status.Artifact.Checksum,
}
if newObj.Status.Artifact.Digest != "" {
annotations[sourcev1.GroupVersion.Group+"/"+eventv1.MetaDigestKey] = newObj.Status.Artifact.Digest
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest,
}
humanReadableSize := "unknown size"
@ -302,15 +300,10 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s
humanReadableSize = fmt.Sprintf("size %s", units.HumanSize(float64(*size)))
}
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
}
message := fmt.Sprintf("stored fetched index of %s from '%s'", humanReadableSize, chartRepo.URL)
// Notify on new artifact and failure recovery.
if oldChecksum != newObj.GetArtifact().Checksum {
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
"NewArtifact", message)
ctrl.LoggerFrom(ctx).Info(message)
@ -337,7 +330,7 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s
// The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime.
func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) {
obj *helmv1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj)
@ -382,7 +375,7 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *pat
// v1beta2.FetchFailedCondition is removed, and the repository.ChartRepository
// pointer is set to the newly fetched index.
func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
var tlsConfig *tls.Config
// Configure Helm client to access repository
@ -470,9 +463,6 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
// Early comparison to current Artifact.
if curArtifact := obj.GetArtifact(); curArtifact != nil {
curDig := digest.Digest(curArtifact.Digest)
if curDig == "" {
curDig = digest.Digest(sourcev1.TransformLegacyRevision(curArtifact.Checksum))
}
if curDig.Validate() == nil {
// Short-circuit based on the fetched index being an exact match to the
// stored Artifact.
@ -488,7 +478,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
if err := chartRepo.LoadFromPath(); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to load Helm repository from index YAML: %w", err),
Reason: sourcev1.IndexationFailedReason,
Reason: helmv1.IndexationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -499,7 +489,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
// Check if index has changed compared to current Artifact revision.
var changed bool
if artifact := obj.Status.Artifact; artifact != nil {
curRev := digest.Digest(sourcev1.TransformLegacyRevision(artifact.Revision))
curRev := digest.Digest(artifact.Revision)
changed = curRev.Validate() != nil || curRev != chartRepo.Digest(curRev.Algorithm())
}
@ -508,7 +498,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
if revision.Validate() != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to calculate revision: %w", err),
Reason: sourcev1.IndexationFailedReason,
Reason: helmv1.IndexationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -531,7 +521,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
*artifact = r.Storage.NewArtifactFor(obj.Kind,
obj.ObjectMeta.GetObjectMeta(),
revision.String(),
fmt.Sprintf("index-%s.yaml", revision.Hex()),
fmt.Sprintf("index-%s.yaml", revision.Encoded()),
)
return sreconcile.ResultSuccess, nil
@ -546,7 +536,7 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
// early.
// On a successful archive, the Artifact in the Status of the object is set,
// and the symlink in the Storage is updated to its path.
func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
// Set the ArtifactInStorageCondition if there's no drift.
defer func() {
if obj.GetArtifact().HasRevision(artifact.Revision) {
@ -559,7 +549,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pa
}
}()
if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasChecksum(artifact.Checksum) {
if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasDigest(artifact.Digest) {
// Extend TTL of the Index in the cache (if present).
if r.Cache != nil {
r.Cache.SetExpiration(artifact.Path, r.TTL)
@ -629,7 +619,7 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pa
// reconcileDelete handles the deletion of the object.
// It first garbage collects all Artifacts for the object from the Storage.
// Removing the finalizer from the object if successful.
func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (sreconcile.Result, error) {
func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *helmv1.HelmRepository) (sreconcile.Result, error) {
// Garbage collect the resource's artifacts
if err := r.garbageCollect(ctx, obj); err != nil {
// Return the error so we retry the failed garbage collection
@ -651,8 +641,8 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou
// - the deletion timestamp on the object is set
// - the obj.Spec.Type has changed and artifacts are not supported by the new type
// Which will result in the removal of all Artifacts for the objects.
func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error {
if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) {
func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *helmv1.HelmRepository) error {
if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != helmv1.HelmRepositoryTypeDefault) {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return &serror.Event{
Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err),

View File

@ -49,8 +49,8 @@ import (
"github.com/fluxcd/pkg/runtime/predicates"
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
"github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
helmv1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/helm/registry"
"github.com/fluxcd/source-controller/internal/helm/repository"
"github.com/fluxcd/source-controller/internal/object"
@ -106,10 +106,10 @@ func (r *HelmRepositoryOCIReconciler) SetupWithManagerAndOptions(mgr ctrl.Manage
recoverPanic := true
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.HelmRepository{}).
For(&helmv1.HelmRepository{}).
WithEventFilter(
predicate.And(
intpredicates.HelmRepositoryTypePredicate{RepositoryType: sourcev1.HelmRepositoryTypeOCI},
intpredicates.HelmRepositoryTypePredicate{RepositoryType: helmv1.HelmRepositoryTypeOCI},
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
),
).
@ -126,7 +126,7 @@ func (r *HelmRepositoryOCIReconciler) Reconcile(ctx context.Context, req ctrl.Re
log := ctrl.LoggerFrom(ctx)
// Fetch the HelmRepository
obj := &sourcev1.HelmRepository{}
obj := &helmv1.HelmRepository{}
if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
@ -196,7 +196,7 @@ func (r *HelmRepositoryOCIReconciler) Reconcile(ctx context.Context, req ctrl.Re
}
// Examine if a type change has happened and act accordingly
if obj.Spec.Type != sourcev1.HelmRepositoryTypeOCI {
if obj.Spec.Type != helmv1.HelmRepositoryTypeOCI {
// Remove any stale condition and ignore the object if the type has
// changed.
obj.Status.Conditions = nil
@ -213,7 +213,7 @@ func (r *HelmRepositoryOCIReconciler) Reconcile(ctx context.Context, req ctrl.Re
// status conditions and the returned results are evaluated in the deferred
// block at the very end to summarize the conditions to be in a consistent
// state.
func (r *HelmRepositoryOCIReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *v1beta2.HelmRepository) (result ctrl.Result, retErr error) {
func (r *HelmRepositoryOCIReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository) (result ctrl.Result, retErr error) {
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()
@ -320,7 +320,7 @@ func (r *HelmRepositoryOCIReconciler) reconcile(ctx context.Context, sp *patch.S
result, retErr = ctrl.Result{}, err
return
}
} else if obj.Spec.Provider != sourcev1.GenericOCIProvider && obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI {
} else if obj.Spec.Provider != helmv1.GenericOCIProvider && obj.Spec.Type == helmv1.HelmRepositoryTypeOCI {
auth, authErr := oidcAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider)
if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) {
e := fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr)
@ -387,7 +387,7 @@ func (r *HelmRepositoryOCIReconciler) reconcile(ctx context.Context, sp *patch.S
return
}
func (r *HelmRepositoryOCIReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) {
func (r *HelmRepositoryOCIReconciler) reconcileDelete(ctx context.Context, obj *helmv1.HelmRepository) (ctrl.Result, error) {
// Remove our finalizer from the list
controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
@ -413,7 +413,7 @@ func (r *HelmRepositoryOCIReconciler) eventLogf(ctx context.Context, obj runtime
// authFromSecret returns an authn.Keychain for the given HelmRepository.
// If the HelmRepository does not specify a secretRef, an anonymous keychain is returned.
func authFromSecret(ctx context.Context, client client.Client, obj *sourcev1.HelmRepository) (authn.Keychain, error) {
func authFromSecret(ctx context.Context, client client.Client, obj *helmv1.HelmRepository) (authn.Keychain, error) {
// Attempt to retrieve secret.
name := types.NamespacedName{
Namespace: obj.GetNamespace(),

View File

@ -36,7 +36,8 @@ import (
conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check"
"github.com/fluxcd/pkg/runtime/patch"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
helmv1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/helm/registry"
)
@ -89,19 +90,19 @@ func TestHelmRepositoryOCIReconciler_Reconcile(t *testing.T) {
g.Expect(testEnv.CreateAndWait(ctx, secret)).To(Succeed())
origObj := &sourcev1.HelmRepository{
origObj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "helmrepository-oci-reconcile-",
Namespace: ns.Name,
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
URL: fmt.Sprintf("oci://%s", testRegistryServer.registryHost),
SecretRef: &meta.LocalObjectReference{
Name: secret.Name,
},
Provider: sourcev1.GenericOCIProvider,
Type: sourcev1.HelmRepositoryTypeOCI,
Provider: helmv1.GenericOCIProvider,
Type: helmv1.HelmRepositoryTypeOCI,
},
}
obj := origObj.DeepCopy()
@ -249,16 +250,16 @@ func TestHelmRepositoryOCIReconciler_authStrategy(t *testing.T) {
server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts)
g.Expect(err).NotTo(HaveOccurred())
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "auth-strategy-",
Generation: 1,
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
Timeout: &metav1.Duration{Duration: timeout},
Type: sourcev1.HelmRepositoryTypeOCI,
Provider: sourcev1.GenericOCIProvider,
Type: helmv1.HelmRepositoryTypeOCI,
Provider: helmv1.GenericOCIProvider,
URL: fmt.Sprintf("oci://%s", server.registryHost),
},
}

View File

@ -47,7 +47,8 @@ import (
conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check"
"github.com/fluxcd/pkg/runtime/patch"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
helmv1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/cache"
intdigest "github.com/fluxcd/source-controller/internal/digest"
"github.com/fluxcd/source-controller/internal/helm/getter"
@ -69,12 +70,12 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) {
testServer.Start()
defer testServer.Stop()
origObj := &sourcev1.HelmRepository{
origObj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "helmrepository-reconcile-",
Namespace: "default",
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
URL: testServer.URL(),
},
@ -135,7 +136,7 @@ func TestHelmRepositoryReconciler_Reconcile(t *testing.T) {
func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
tests := []struct {
name string
beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error
beforeFunc func(obj *helmv1.HelmRepository, storage *Storage) error
want sreconcile.Result
wantErr bool
assertArtifact *sourcev1.Artifact
@ -144,7 +145,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
}{
{
name: "garbage collects",
beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error {
revisions := []string{"a", "b", "c", "d"}
for n := range revisions {
v := revisions[n]
@ -169,7 +170,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/d.txt",
Revision: "d",
Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
Digest: "sha256:18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
URL: testStorage.Hostname + "/reconcile-storage/d.txt",
Size: int64p(int64(len("d"))),
},
@ -194,7 +195,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
},
{
name: "notices missing artifact in storage",
beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "/reconcile-storage/invalid.txt",
Revision: "d",
@ -213,11 +214,11 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
},
{
name: "updates hostname on diff from current",
beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
beforeFunc: func(obj *helmv1.HelmRepository, storage *Storage) error {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "/reconcile-storage/hostname.txt",
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: "http://outdated.com/reconcile-storage/hostname.txt",
}
if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
@ -236,7 +237,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/hostname.txt",
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
Digest: "sha256:3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
Size: int64p(int64(len("file"))),
},
@ -256,7 +257,7 @@ func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"),
}
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-",
Generation: 1,
@ -316,8 +317,8 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
server options
url string
secret *corev1.Secret
beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, revision, digest digest.Digest)
afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository)
beforeFunc func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest)
afterFunc func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository)
want sreconcile.Result
wantErr bool
assertConditions []metav1.Condition
@ -348,10 +349,9 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"),
*conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
t.Expect(chartRepo.Path).ToNot(BeEmpty())
t.Expect(chartRepo.Index).ToNot(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).ToNot(BeEmpty())
},
},
@ -371,7 +371,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
"password": []byte("1234"),
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"}
},
want: sreconcile.ResultSuccess,
@ -379,10 +379,9 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"),
*conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
t.Expect(chartRepo.Path).ToNot(BeEmpty())
t.Expect(chartRepo.Index).ToNot(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).ToNot(BeEmpty())
},
},
@ -402,7 +401,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
"caFile": tlsCA,
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"}
},
want: sreconcile.ResultSuccess,
@ -410,10 +409,9 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"),
*conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
t.Expect(chartRepo.Path).ToNot(BeEmpty())
t.Expect(chartRepo.Index).ToNot(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).ToNot(BeEmpty())
},
},
@ -433,7 +431,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
"caFile": []byte("invalid"),
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -444,18 +442,17 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
// No repo index due to fetch fail.
t.Expect(chartRepo.Path).To(BeEmpty())
t.Expect(chartRepo.Index).To(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).To(BeEmpty())
},
},
{
name: "Invalid URL makes FetchFailed=True and returns stalling error",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "")
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -467,18 +464,17 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
// No repo index due to fetch fail.
t.Expect(chartRepo.Path).To(BeEmpty())
t.Expect(chartRepo.Index).To(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).To(BeEmpty())
},
},
{
name: "Unsupported scheme makes FetchFailed=True and returns stalling error",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://")
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -490,18 +486,17 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
// No repo index due to fetch fail.
t.Expect(chartRepo.Path).To(BeEmpty())
t.Expect(chartRepo.Index).To(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).To(BeEmpty())
},
},
{
name: "Missing secret returns FetchFailed=True and returns error",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -512,11 +507,10 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
// No repo index due to fetch fail.
t.Expect(chartRepo.Path).To(BeEmpty())
t.Expect(chartRepo.Index).To(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).To(BeEmpty())
},
},
@ -531,7 +525,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
"username": []byte("git"),
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
@ -542,22 +536,20 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
// No repo index due to fetch fail.
t.Expect(chartRepo.Path).To(BeEmpty())
t.Expect(chartRepo.Index).To(BeNil())
t.Expect(artifact.Checksum).To(BeEmpty())
t.Expect(artifact.Revision).To(BeEmpty())
},
},
{
name: "Stored index with same digest and revision",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, digest digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Status.Artifact = &sourcev1.Artifact{
Revision: revision.String(),
Digest: digest.String(),
Checksum: digest.Hex(),
Revision: rev.String(),
Digest: dig.String(),
}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
@ -568,32 +560,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
t.Expect(chartRepo.Path).ToNot(BeEmpty())
t.Expect(chartRepo.Index).To(BeNil())
t.Expect(&artifact).To(BeEquivalentTo(obj.Status.Artifact))
},
want: sreconcile.ResultSuccess,
},
{
name: "Stored index with same checksum and (legacy) revision",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, digest digest.Digest) {
obj.Status.Artifact = &sourcev1.Artifact{
Revision: revision.Hex(),
Checksum: digest.Hex(),
}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "foo", "bar")
},
assertConditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
t.Expect(chartRepo.Path).ToNot(BeEmpty())
t.Expect(chartRepo.Index).To(BeNil())
@ -604,11 +571,10 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
{
name: "Stored index with different digest and same revision",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, digest digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Status.Artifact = &sourcev1.Artifact{
Revision: revision.String(),
Revision: rev.String(),
Digest: "sha256:80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86",
Checksum: "80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86",
}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
@ -619,23 +585,21 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"),
*conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
t.Expect(chartRepo.Path).ToNot(BeEmpty())
t.Expect(chartRepo.Index).ToNot(BeNil())
t.Expect(artifact.Revision).To(Equal(obj.Status.Artifact.Revision))
t.Expect(artifact.Digest).ToNot(Equal(obj.Status.Artifact.Digest))
t.Expect(artifact.Checksum).ToNot(Equal(obj.Status.Artifact.Checksum))
},
want: sreconcile.ResultSuccess,
},
{
name: "Stored index with different revision and digest",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Status.Artifact = &sourcev1.Artifact{
Revision: "80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86",
Checksum: "80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86",
Digest: "sha256:80bb3dd67c63095d985850459834ea727603727a370079de90d221191d375a86",
}
conditions.MarkReconciling(obj, meta.ProgressingReason, "foo")
@ -646,21 +610,20 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
*conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "building artifact: new index revision"),
*conditions.UnknownCondition(meta.ReadyCondition, meta.ProgressingReason, "building artifact: new index revision"),
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, chartRepo *repository.ChartRepository) {
t.Expect(chartRepo.Path).ToNot(BeEmpty())
t.Expect(chartRepo.Index).ToNot(BeNil())
t.Expect(artifact.Path).To(Not(BeEmpty()))
t.Expect(artifact.Revision).ToNot(Equal(obj.Status.Artifact.Revision))
t.Expect(artifact.Digest).ToNot(Equal(obj.Status.Artifact.Digest))
t.Expect(artifact.Checksum).ToNot(Equal(obj.Status.Artifact.Checksum))
},
want: sreconcile.ResultSuccess,
},
{
name: "Existing artifact makes ArtifactOutdated=True",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, revision, checksum digest.Digest) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, rev, dig digest.Digest) {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "some-path",
Revision: "some-rev",
@ -676,12 +639,12 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
}
for _, tt := range tests {
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "auth-strategy-",
Generation: 1,
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
Timeout: &metav1.Duration{Duration: timeout},
},
@ -735,7 +698,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
builder.WithObjects(secret.DeepCopy())
}
// Calculate the artifact checksum for valid repos configurations.
// Calculate the artifact digest for valid repos configurations.
clientOpts := []helmgetter.Option{
helmgetter.WithURL(server.URL()),
}
@ -744,7 +707,7 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
validSecret := true
if secret != nil {
// Extract the client options from secret, ignoring any invalid
// value. validSecret is used to determine if the indexChecksum
// value. validSecret is used to determine if the index digest
// should be calculated below.
var cOpts []helmgetter.Option
var serr error
@ -767,18 +730,18 @@ func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
}
g.Expect(err).ToNot(HaveOccurred())
// NOTE: checksum will be empty in beforeFunc for invalid repo
// NOTE: digest will be empty in beforeFunc for invalid repo
// configurations as the client can't get the repo.
var revision, checksum digest.Digest
var rev, dig digest.Digest
if validSecret {
g.Expect(newChartRepo.CacheIndex()).To(Succeed())
checksum = newChartRepo.Digest(intdigest.Canonical)
dig = newChartRepo.Digest(intdigest.Canonical)
g.Expect(newChartRepo.LoadFromPath()).To(Succeed())
revision = newChartRepo.Digest(intdigest.Canonical)
rev = newChartRepo.Digest(intdigest.Canonical)
}
if tt.beforeFunc != nil {
tt.beforeFunc(g, obj, revision, checksum)
tt.beforeFunc(g, obj, rev, dig)
}
r := &HelmRepositoryReconciler{
@ -820,15 +783,15 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
tests := []struct {
name string
cache *cache.Cache
beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository)
afterFunc func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache)
beforeFunc func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository)
afterFunc func(t *WithT, obj *helmv1.HelmRepository, cache *cache.Cache)
want sreconcile.Result
wantErr bool
assertConditions []metav1.Condition
}{
{
name: "Archiving artifact to storage makes ArtifactInStorage=True",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
},
want: sreconcile.ResultSuccess,
@ -839,7 +802,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
{
name: "Archiving (loaded) artifact to storage adds to cache",
cache: cache.New(10, time.Minute),
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
index.Index = &repo.IndexFile{
APIVersion: "v1",
Generated: time.Now(),
@ -847,7 +810,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
},
want: sreconcile.ResultSuccess,
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, cache *cache.Cache) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, cache *cache.Cache) {
i, ok := cache.Get(obj.GetArtifact().Path)
t.Expect(ok).To(BeTrue())
t.Expect(i).To(BeAssignableToTypeOf(&repo.IndexFile{}))
@ -858,11 +821,11 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Up-to-date artifact should not update status",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
obj.Status.Artifact = artifact.DeepCopy()
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, _ *cache.Cache) {
t.Expect(obj.Status.URL).To(BeEmpty())
},
want: sreconcile.ResultSuccess,
@ -872,7 +835,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Removes ArtifactOutdatedCondition after creating a new artifact",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
},
@ -883,10 +846,10 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
},
{
name: "Creates latest symlink to the created artifact",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
beforeFunc: func(t *WithT, obj *helmv1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, _ *cache.Cache) {
afterFunc: func(t *WithT, obj *helmv1.HelmRepository, _ *cache.Cache) {
localPath := testStorage.LocalPath(*obj.GetArtifact())
symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml")
targetFile, err := os.Readlink(symlinkPath)
@ -913,16 +876,16 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"),
}
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.HelmRepositoryKind,
Kind: helmv1.HelmRepositoryKind,
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-bucket-",
Generation: 1,
Namespace: "default",
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
Timeout: &metav1.Duration{Duration: timeout},
URL: "https://example.com/index.yaml",
},
@ -941,8 +904,8 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
chartRepo.Path = cachePath
artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz")
// Checksum of the index file calculated by the ChartRepository.
artifact.Checksum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
// Digest of the index file calculated by the ChartRepository.
artifact.Digest = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
if tt.beforeFunc != nil {
tt.beforeFunc(g, obj, artifact, chartRepo)
@ -970,7 +933,7 @@ func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) {
// Helper to build simple helmRepositoryReconcileFunc with result and error.
buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc {
return func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
return func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
return r, e
}
}
@ -1025,11 +988,11 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) {
{
name: "multiple object status conditions mutations",
reconcileFuncs: []helmRepositoryReconcileFunc{
func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision")
return sreconcile.ResultSuccess, nil
},
func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
func(ctx context.Context, sp *patch.SerialPatcher, obj *helmv1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
conditions.MarkTrue(obj, meta.ReconcilingCondition, meta.ProgressingReason, "creating artifact")
return sreconcile.ResultSuccess, nil
},
@ -1080,12 +1043,12 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) {
Client: fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()).Build(),
patchOptions: getPatchOptions(helmRepositoryReadyCondition.Owned, "sc"),
}
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-",
Generation: tt.generation,
},
Status: sourcev1.HelmRepositoryStatus{
Status: helmv1.HelmRepositoryStatus{
ObservedGeneration: tt.observedGeneration,
},
}
@ -1110,12 +1073,12 @@ func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) {
func TestHelmRepositoryReconciler_statusConditions(t *testing.T) {
tests := []struct {
name string
beforeFunc func(obj *sourcev1.HelmRepository)
beforeFunc func(obj *helmv1.HelmRepository)
assertConditions []metav1.Condition
}{
{
name: "positive conditions only",
beforeFunc: func(obj *sourcev1.HelmRepository) {
beforeFunc: func(obj *helmv1.HelmRepository) {
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
},
assertConditions: []metav1.Condition{
@ -1125,7 +1088,7 @@ func TestHelmRepositoryReconciler_statusConditions(t *testing.T) {
},
{
name: "multiple failures",
beforeFunc: func(obj *sourcev1.HelmRepository) {
beforeFunc: func(obj *helmv1.HelmRepository) {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error")
@ -1139,7 +1102,7 @@ func TestHelmRepositoryReconciler_statusConditions(t *testing.T) {
},
{
name: "mixed positive and negative conditions",
beforeFunc: func(obj *sourcev1.HelmRepository) {
beforeFunc: func(obj *helmv1.HelmRepository) {
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
},
@ -1155,9 +1118,9 @@ func TestHelmRepositoryReconciler_statusConditions(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.HelmRepositoryKind,
Kind: helmv1.HelmRepositoryKind,
APIVersion: "source.toolkit.fluxcd.io/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
@ -1203,8 +1166,8 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) {
name string
res sreconcile.Result
resErr error
oldObjBeforeFunc func(obj *sourcev1.HelmRepository)
newObjBeforeFunc func(obj *sourcev1.HelmRepository)
oldObjBeforeFunc func(obj *helmv1.HelmRepository)
newObjBeforeFunc func(obj *helmv1.HelmRepository)
wantEvent string
}{
{
@ -1216,8 +1179,8 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) {
name: "new artifact with nil size",
res: sreconcile.ResultSuccess,
resErr: nil,
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: nil}
newObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: nil}
},
wantEvent: "Normal NewArtifact stored fetched index of unknown size",
},
@ -1225,8 +1188,8 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) {
name: "new artifact",
res: sreconcile.ResultSuccess,
resErr: nil,
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
newObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize}
},
wantEvent: "Normal NewArtifact stored fetched index of size",
},
@ -1234,13 +1197,13 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) {
name: "recovery from failure",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
oldObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
newObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal Succeeded stored fetched index of size",
@ -1249,13 +1212,13 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) {
name: "recovery and new artifact",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
oldObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb", Size: &aSize}
newObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal NewArtifact stored fetched index of size",
@ -1264,12 +1227,12 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) {
name: "no updates",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
oldObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
newObjBeforeFunc: func(obj *helmv1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
},
@ -1280,7 +1243,7 @@ func TestHelmRepositoryReconciler_notify(t *testing.T) {
g := NewWithT(t)
recorder := record.NewFakeRecorder(32)
oldObj := &sourcev1.HelmRepository{}
oldObj := &helmv1.HelmRepository{}
newObj := oldObj.DeepCopy()
if tt.oldObjBeforeFunc != nil {
@ -1327,12 +1290,12 @@ func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing.
testServer.Start()
defer testServer.Stop()
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "helmrepository-reconcile-",
Namespace: "default",
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
URL: testServer.URL(),
},
@ -1388,7 +1351,7 @@ func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing.
}
g.Expect(testEnv.CreateAndWait(ctx, secret)).To(Succeed())
obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI
obj.Spec.Type = helmv1.HelmRepositoryTypeOCI
obj.Spec.URL = fmt.Sprintf("oci://%s", testRegistryServer.registryHost)
obj.Spec.SecretRef = &meta.LocalObjectReference{
Name: secret.Name,
@ -1444,12 +1407,12 @@ func TestHelmRepositoryReconciler_ReconcileSpecUpdatePredicateFilter(t *testing.
testServer.Start()
defer testServer.Stop()
obj := &sourcev1.HelmRepository{
obj := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "helmrepository-reconcile-",
Namespace: "default",
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
URL: testServer.URL(),
},
@ -1546,12 +1509,12 @@ func TestHelmRepositoryReconciler_InMemoryCaching(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())
defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
helmRepo := &sourcev1.HelmRepository{
helmRepo := &helmv1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "helmrepository-",
Namespace: ns.Name,
},
Spec: sourcev1.HelmRepositorySpec{
Spec: helmv1.HelmRepositorySpec{
URL: testServer.URL(),
},
}

View File

@ -66,7 +66,8 @@ import (
"github.com/fluxcd/pkg/untar"
"github.com/fluxcd/pkg/version"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
ociv1 "github.com/fluxcd/source-controller/api/v1beta2"
serror "github.com/fluxcd/source-controller/internal/error"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
"github.com/fluxcd/source-controller/internal/reconcile/summarize"
@ -122,7 +123,7 @@ func (e invalidOCIURLError) Error() string {
// ociRepositoryReconcileFunc is the function type for all the v1beta2.OCIRepository
// (sub)reconcile functions. The type implementations are grouped and
// executed serially to perform the complete reconcile of the object.
type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error)
type ociRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error)
// OCIRepositoryReconciler reconciles a v1beta2.OCIRepository object
type OCIRepositoryReconciler struct {
@ -155,7 +156,7 @@ func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, o
recoverPanic := true
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.OCIRepository{}, builder.WithPredicates(
For(&ociv1.OCIRepository{}, builder.WithPredicates(
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
)).
WithOptions(controller.Options{
@ -176,7 +177,7 @@ func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques
log := ctrl.LoggerFrom(ctx)
// Fetch the OCIRepository
obj := &sourcev1.OCIRepository{}
obj := &ociv1.OCIRepository{}
if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
@ -247,7 +248,7 @@ func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques
// reconcile iterates through the ociRepositoryReconcileFunc tasks for the
// object. It returns early on the first call that returns
// reconcile.ResultRequeue, or produces an error.
func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) {
func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *ociv1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) {
oldObj := obj.DeepCopy()
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress")
@ -321,7 +322,7 @@ func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seria
// reconcileSource fetches the upstream OCI artifact metadata and content.
// If this fails, it records v1beta2.FetchFailedCondition=True on the object and returns early.
func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
var auth authn.Authenticator
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
@ -346,7 +347,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
return sreconcile.ResultEmpty, e
}
if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != sourcev1.GenericOCIProvider && ok {
if _, ok := keychain.(soci.Anonymous); obj.Spec.Provider != ociv1.GenericOCIProvider && ok {
var authErr error
auth, authErr = oidcAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider)
if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) {
@ -395,7 +396,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to determine artifact digest: %w", err),
sourcev1.OCIPullFailedReason,
ociv1.OCIPullFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -468,7 +469,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to pull artifact from '%s': %w", obj.Spec.URL, err),
sourcev1.OCIPullFailedReason,
ociv1.OCIPullFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -479,7 +480,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to parse artifact manifest: %w", err),
sourcev1.OCILayerOperationFailedReason,
ociv1.OCILayerOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -489,29 +490,29 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
// Extract the compressed content from the selected layer
blob, err := r.selectLayer(obj, img)
if err != nil {
e := serror.NewGeneric(err, sourcev1.OCILayerOperationFailedReason)
e := serror.NewGeneric(err, ociv1.OCILayerOperationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Persist layer content to storage using the specified operation
switch obj.GetLayerOperation() {
case sourcev1.OCILayerExtract:
case ociv1.OCILayerExtract:
if _, err = untar.Untar(blob, dir); err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to extract layer contents from artifact: %w", err),
sourcev1.OCILayerOperationFailedReason,
ociv1.OCILayerOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
case sourcev1.OCILayerCopy:
case ociv1.OCILayerCopy:
metadata.Path = fmt.Sprintf("%s.tgz", r.digestFromRevision(metadata.Revision))
file, err := os.Create(filepath.Join(dir, metadata.Path))
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to create file to copy layer to: %w", err),
sourcev1.OCILayerOperationFailedReason,
ociv1.OCILayerOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -522,7 +523,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
if err != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to copy layer from artifact: %w", err),
sourcev1.OCILayerOperationFailedReason,
ociv1.OCILayerOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -530,7 +531,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
default:
e := serror.NewGeneric(
fmt.Errorf("unsupported layer operation: %s", obj.GetLayerOperation()),
sourcev1.OCILayerOperationFailedReason,
ociv1.OCILayerOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
@ -542,7 +543,7 @@ func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch
// selectLayer finds the matching layer and returns its compressed contents.
// If no layer selector was provided, we pick the first layer from the OCI artifact.
func (r *OCIRepositoryReconciler) selectLayer(obj *sourcev1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) {
func (r *OCIRepositoryReconciler) selectLayer(obj *ociv1.OCIRepository, image gcrv1.Image) (io.ReadCloser, error) {
layers, err := image.Layers()
if err != nil {
return nil, fmt.Errorf("failed to parse artifact layers: %w", err)
@ -626,7 +627,7 @@ func (r *OCIRepositoryReconciler) digestFromRevision(revision string) string {
// verifySignature verifies the authenticity of the given image reference URL.
// First, it tries to use a key if a Secret with a valid public key is provided.
// If not, it falls back to a keyless approach for verification.
func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *sourcev1.OCIRepository, url string, opt ...remote.Option) error {
func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *ociv1.OCIRepository, url string, opt ...remote.Option) error {
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()
@ -705,12 +706,12 @@ func (r *OCIRepositoryReconciler) verifySignature(ctx context.Context, obj *sour
}
// parseRepositoryURL validates and extracts the repository URL.
func (r *OCIRepositoryReconciler) parseRepositoryURL(obj *sourcev1.OCIRepository) (string, error) {
if !strings.HasPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) {
func (r *OCIRepositoryReconciler) parseRepositoryURL(obj *ociv1.OCIRepository) (string, error) {
if !strings.HasPrefix(obj.Spec.URL, ociv1.OCIRepositoryPrefix) {
return "", fmt.Errorf("URL must be in format 'oci://<domain>/<org>/<repo>'")
}
url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix)
url := strings.TrimPrefix(obj.Spec.URL, ociv1.OCIRepositoryPrefix)
ref, err := name.ParseReference(url)
if err != nil {
return "", err
@ -725,7 +726,7 @@ func (r *OCIRepositoryReconciler) parseRepositoryURL(obj *sourcev1.OCIRepository
}
// getArtifactURL determines which tag or revision should be used and returns the OCI artifact FQN.
func (r *OCIRepositoryReconciler) getArtifactURL(obj *sourcev1.OCIRepository, options []crane.Option) (string, error) {
func (r *OCIRepositoryReconciler) getArtifactURL(obj *ociv1.OCIRepository, options []crane.Option) (string, error) {
url, err := r.parseRepositoryURL(obj)
if err != nil {
return "", invalidOCIURLError{err}
@ -788,7 +789,7 @@ func (r *OCIRepositoryReconciler) getTagBySemver(url, exp string, options []cran
// keychain generates the credential keychain based on the resource
// configuration. If no auth is specified a default keychain with
// anonymous access is returned
func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Keychain, error) {
func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *ociv1.OCIRepository) (authn.Keychain, error) {
pullSecretNames := sets.NewString()
// lookup auth secret
@ -832,7 +833,7 @@ func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OC
// transport clones the default transport from remote and when a certSecretRef is specified,
// the returned transport will include the TLS client and/or CA certificates.
func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.OCIRepository) (http.RoundTripper, error) {
func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *ociv1.OCIRepository) (http.RoundTripper, error) {
if obj.Spec.CertSecretRef == nil || obj.Spec.CertSecretRef.Name == "" {
return nil, nil
}
@ -875,7 +876,7 @@ func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.O
// oidcAuth generates the OIDC credential authenticator based on the specified cloud provider.
func oidcAuth(ctx context.Context, url, provider string) (authn.Authenticator, error) {
u := strings.TrimPrefix(url, sourcev1.OCIRepositoryPrefix)
u := strings.TrimPrefix(url, ociv1.OCIRepositoryPrefix)
ref, err := name.ParseReference(u)
if err != nil {
return nil, fmt.Errorf("failed to parse URL '%s': %w", u, err)
@ -883,11 +884,11 @@ func oidcAuth(ctx context.Context, url, provider string) (authn.Authenticator, e
opts := login.ProviderOptions{}
switch provider {
case sourcev1.AmazonOCIProvider:
case ociv1.AmazonOCIProvider:
opts.AwsAutoLogin = true
case sourcev1.AzureOCIProvider:
case ociv1.AzureOCIProvider:
opts.AzureAutoLogin = true
case sourcev1.GoogleOCIProvider:
case ociv1.GoogleOCIProvider:
opts.GcpAutoLogin = true
}
@ -907,7 +908,7 @@ func oidcAuth(ctx context.Context, url, provider string) (authn.Authenticator, e
// The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime.
func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.OCIRepository, _ *sourcev1.Artifact, _ string) (sreconcile.Result, error) {
obj *ociv1.OCIRepository, _ *sourcev1.Artifact, _ string) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj)
@ -952,7 +953,7 @@ func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patc
// On a successful archive, the Artifact in the Status of the object is set,
// and the symlink in the Storage is updated to its path.
func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher,
obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
obj *ociv1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
// Create artifact
artifact := r.Storage.NewArtifactFor(obj.Kind, obj, metadata.Revision,
fmt.Sprintf("%s.tar.gz", r.digestFromRevision(metadata.Revision)))
@ -1009,7 +1010,7 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat
defer unlock()
switch obj.GetLayerOperation() {
case sourcev1.OCILayerCopy:
case ociv1.OCILayerCopy:
if err = r.Storage.CopyFromPath(&artifact, filepath.Join(dir, metadata.Path)); err != nil {
e := serror.NewGeneric(
fmt.Errorf("unable to copy artifact to storage: %w", err),
@ -1065,7 +1066,7 @@ func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pat
// reconcileDelete handles the deletion of the object.
// It first garbage collects all Artifacts for the object from the Storage.
// Removing the finalizer from the object if successful.
func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.OCIRepository) (sreconcile.Result, error) {
func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *ociv1.OCIRepository) (sreconcile.Result, error) {
// Garbage collect the resource's artifacts
if err := r.garbageCollect(ctx, obj); err != nil {
// Return the error so we retry the failed garbage collection
@ -1084,7 +1085,7 @@ func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sour
// It removes all but the current Artifact from the Storage, unless the
// deletion timestamp on the object is set. Which will result in the
// removal of all Artifacts for the objects.
func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.OCIRepository) error {
func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *ociv1.OCIRepository) error {
if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return serror.NewGeneric(
@ -1132,21 +1133,13 @@ func (r *OCIRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Obj
}
// notify emits notification related to the reconciliation.
func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.OCIRepository, res sreconcile.Result, resErr error) {
func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *ociv1.OCIRepository, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any
// failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
annotations := map[string]string{
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision,
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaChecksumKey): newObj.Status.Artifact.Checksum,
}
if newObj.Status.Artifact.Digest != "" {
annotations[sourcev1.GroupVersion.Group+"/"+eventv1.MetaDigestKey] = newObj.Status.Artifact.Digest
}
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest,
}
message := fmt.Sprintf("stored artifact with revision '%s' from '%s'", newObj.Status.Artifact.Revision, newObj.Spec.URL)
@ -1166,7 +1159,7 @@ func (r *OCIRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *so
}
// Notify on new artifact and failure recovery.
if oldChecksum != newObj.GetArtifact().Checksum {
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
"NewArtifact", message)
ctrl.LoggerFrom(ctx).Info(message)
@ -1197,7 +1190,7 @@ func craneOptions(ctx context.Context, insecure bool) []crane.Option {
// makeRemoteOptions returns a remoteOptions struct with the authentication and transport options set.
// The returned struct can be used to interact with a remote registry using go-containerregistry based libraries.
func makeRemoteOptions(ctxTimeout context.Context, obj *sourcev1.OCIRepository, transport http.RoundTripper,
func makeRemoteOptions(ctxTimeout context.Context, obj *ociv1.OCIRepository, transport http.RoundTripper,
keychain authn.Keychain, auth authn.Authenticator) remoteOptions {
o := remoteOptions{
craneOpts: craneOptions(ctxTimeout, obj.Spec.Insecure),
@ -1233,7 +1226,7 @@ type remoteOptions struct {
// ociContentConfigChanged evaluates the current spec with the observations
// of the artifact in the status to determine if artifact content configuration
// has changed and requires rebuilding the artifact.
func ociContentConfigChanged(obj *sourcev1.OCIRepository) bool {
func ociContentConfigChanged(obj *ociv1.OCIRepository) bool {
if !pointer.StringEqual(obj.Spec.Ignore, obj.Status.ObservedIgnore) {
return true
}
@ -1248,7 +1241,7 @@ func ociContentConfigChanged(obj *sourcev1.OCIRepository) bool {
// Returns true if both arguments are nil or both arguments
// dereference to the same value.
// Based on k8s.io/utils/pointer/pointer.go pointer value equality.
func layerSelectorEqual(a, b *sourcev1.OCILayerSelector) bool {
func layerSelectorEqual(a, b *ociv1.OCILayerSelector) bool {
if (a == nil) != (b == nil) {
return false
}

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
)
type SourceRevisionChangePredicate struct {

View File

@ -20,9 +20,7 @@ import (
"archive/tar"
"compress/gzip"
"context"
"crypto/sha256"
"fmt"
"hash"
"io"
"io/fs"
"net/url"
@ -34,7 +32,6 @@ import (
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/fluxcd/go-git/v5/plumbing/format/gitignore"
"github.com/opencontainers/go-digest"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"
@ -42,7 +39,7 @@ import (
"github.com/fluxcd/pkg/sourceignore"
"github.com/fluxcd/pkg/untar"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/api/v1"
intdigest "github.com/fluxcd/source-controller/internal/digest"
sourcefs "github.com/fluxcd/source-controller/internal/fs"
)
@ -50,9 +47,9 @@ import (
const GarbageCountLimit = 1000
const (
// defaultFileMode is the permission mode applied to all files inside of an artifact archive.
// defaultFileMode is the permission mode applied to all files inside an artifact archive.
defaultFileMode int64 = 0o644
// defaultDirMode is the permission mode applied to all directories inside of an artifact archive.
// defaultDirMode is the permission mode applied to all directories inside an artifact archive.
defaultDirMode int64 = 0o755
)
@ -86,10 +83,10 @@ func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Dura
}, nil
}
// NewArtifactFor returns a new v1beta1.Artifact.
func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact {
path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
artifact := sourcev1.Artifact{
// NewArtifactFor returns a new v1.Artifact.
func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) v1.Artifact {
path := v1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
artifact := v1.Artifact{
Path: path,
Revision: revision,
}
@ -97,8 +94,8 @@ func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision,
return artifact
}
// SetArtifactURL sets the URL on the given v1beta1.Artifact.
func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) {
// SetArtifactURL sets the URL on the given v1.Artifact.
func (s Storage) SetArtifactURL(artifact *v1.Artifact) {
if artifact.Path == "" {
return
}
@ -119,14 +116,14 @@ func (s Storage) SetHostname(URL string) string {
return u.String()
}
// MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir.
func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error {
// MkdirAll calls os.MkdirAll for the given v1.Artifact base dir.
func (s *Storage) MkdirAll(artifact v1.Artifact) error {
dir := filepath.Dir(s.LocalPath(artifact))
return os.MkdirAll(dir, 0o700)
}
// RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir.
func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) {
// RemoveAll calls os.RemoveAll for the given v1.Artifact base dir.
func (s *Storage) RemoveAll(artifact v1.Artifact) (string, error) {
var deletedDir string
dir := filepath.Dir(s.LocalPath(artifact))
// Check if the dir exists.
@ -137,8 +134,8 @@ func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) {
return deletedDir, os.RemoveAll(dir)
}
// RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one.
func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) {
// RemoveAllButCurrent removes all files for the given v1.Artifact base dir, excluding the current one.
func (s *Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) {
deletedFiles := []string{}
localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath)
@ -171,7 +168,7 @@ func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, err
// 1. collect all artifact files with an expired ttl
// 2. if we satisfy maxItemsToBeRetained, then return
// 3. else, collect all artifact files till the latest n files remain, where n=maxItemsToBeRetained
func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) {
func (s *Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) {
localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath)
artifactFilesWithCreatedTs := make(map[time.Time]string)
@ -222,7 +219,7 @@ func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, m
return garbageFiles, nil
}
// sort all timestamps in an ascending order.
// sort all timestamps in ascending order.
sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) })
for _, ts := range creationTimestamps {
path, ok := artifactFilesWithCreatedTs[ts]
@ -236,7 +233,7 @@ func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, m
noOfGarbageFiles := len(garbageFiles)
for _, path := range sortedPaths {
if path != localPath && filepath.Ext(path) != ".lock" && !stringInSlice(path, garbageFiles) {
// If we previously collected a few garbage files with an expired ttl, then take that into account
// If we previously collected some garbage files with an expired ttl, then take that into account
// when checking whether we need to remove more files to satisfy the max no. of items allowed
// in the filesystem, along with the no. of files already removed in this loop.
if noOfGarbageFiles > 0 {
@ -256,9 +253,9 @@ func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, m
return garbageFiles, nil
}
// GarbageCollect removes all garabge files in the artifact dir according to the provided
// GarbageCollect removes all garbage files in the artifact dir according to the provided
// retention options.
func (s *Storage) GarbageCollect(ctx context.Context, artifact sourcev1.Artifact, timeout time.Duration) ([]string, error) {
func (s *Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeout time.Duration) ([]string, error) {
delFilesChan := make(chan []string)
errChan := make(chan error)
// Abort if it takes more than the provided timeout duration.
@ -319,8 +316,8 @@ func stringInSlice(a string, list []string) bool {
return false
}
// ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file.
func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool {
// ArtifactExist returns a boolean indicating whether the v1.Artifact exists in storage and is a regular file.
func (s *Storage) ArtifactExist(artifact v1.Artifact) bool {
fi, err := os.Lstat(s.LocalPath(artifact))
if err != nil {
return false
@ -346,11 +343,11 @@ func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilt
}
}
// Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact path, excluding
// Archive atomically archives the given directory as a tarball to the given v1.Artifact path, excluding
// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example,
// the user and group name) is stripped from file headers.
// If successful, it sets the checksum and last update time on the artifact.
func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
// If successful, it sets the digest and last update time on the artifact.
func (s *Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {
return fmt.Errorf("invalid dir path: %s", dir)
}
@ -367,12 +364,9 @@ func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter Archiv
}
}()
md, err := intdigest.NewMultiDigester(intdigest.Canonical, digest.SHA256)
if err != nil {
return fmt.Errorf("failed to create digester: %w", err)
}
d := intdigest.Canonical.Digester()
sz := &writeCounter{}
mw := io.MultiWriter(md, tf, sz)
mw := io.MultiWriter(d.Hash(), tf, sz)
gw := gzip.NewWriter(mw)
tw := tar.NewWriter(gw)
@ -466,17 +460,16 @@ func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter Archiv
return err
}
artifact.Digest = md.Digest(intdigest.Canonical).String()
artifact.Checksum = md.Digest(digest.SHA256).Encoded()
artifact.Digest = d.Digest().String()
artifact.LastUpdateTime = metav1.Now()
artifact.Size = &sz.written
return nil
}
// AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path.
// If successful, it sets the checksum and last update time on the artifact.
func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
// AtomicWriteFile atomically writes the io.Reader contents to the v1.Artifact path.
// If successful, it sets the digest and last update time on the artifact.
func (s *Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil {
@ -489,12 +482,9 @@ func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader,
}
}()
md, err := intdigest.NewMultiDigester(intdigest.Canonical, digest.SHA256)
if err != nil {
return fmt.Errorf("failed to create digester: %w", err)
}
d := intdigest.Canonical.Digester()
sz := &writeCounter{}
mw := io.MultiWriter(md, tf, sz)
mw := io.MultiWriter(tf, d.Hash(), sz)
if _, err := io.Copy(mw, reader); err != nil {
tf.Close()
@ -512,17 +502,16 @@ func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader,
return err
}
artifact.Digest = md.Digest(intdigest.Canonical).String()
artifact.Checksum = md.Digest(digest.SHA256).Encoded()
artifact.Digest = d.Digest().String()
artifact.LastUpdateTime = metav1.Now()
artifact.Size = &sz.written
return nil
}
// Copy atomically copies the io.Reader contents to the v1beta1.Artifact path.
// If successful, it sets the checksum and last update time on the artifact.
func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) {
// Copy atomically copies the io.Reader contents to the v1.Artifact path.
// If successful, it sets the digest and last update time on the artifact.
func (s *Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) {
localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil {
@ -535,12 +524,9 @@ func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error
}
}()
md, err := intdigest.NewMultiDigester(intdigest.Canonical, digest.SHA256)
if err != nil {
return fmt.Errorf("failed to create digester: %w", err)
}
d := intdigest.Canonical.Digester()
sz := &writeCounter{}
mw := io.MultiWriter(md, tf, sz)
mw := io.MultiWriter(tf, d.Hash(), sz)
if _, err := io.Copy(mw, reader); err != nil {
tf.Close()
@ -554,17 +540,16 @@ func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error
return err
}
artifact.Digest = md.Digest(intdigest.Canonical).String()
artifact.Checksum = md.Digest(digest.SHA256).Encoded()
artifact.Digest = d.Digest().String()
artifact.LastUpdateTime = metav1.Now()
artifact.Size = &sz.written
return nil
}
// CopyFromPath atomically copies the contents of the given path to the path of the v1beta1.Artifact.
// If successful, the checksum and last update time on the artifact is set.
func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) {
// CopyFromPath atomically copies the contents of the given path to the path of the v1.Artifact.
// If successful, the digest and last update time on the artifact is set.
func (s *Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) {
f, err := os.Open(path)
if err != nil {
return err
@ -579,7 +564,7 @@ func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err er
}
// CopyToPath copies the contents in the (sub)path of the given artifact to the given path.
func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string) error {
func (s *Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error {
// create a tmp directory to store artifact
tmp, err := os.MkdirTemp("", "flux-include-")
if err != nil {
@ -617,8 +602,8 @@ func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string
return nil
}
// Symlink creates or updates a symbolic link for the given v1beta1.Artifact and returns the URL for the symlink.
func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) {
// Symlink creates or updates a symbolic link for the given v1.Artifact and returns the URL for the symlink.
func (s *Storage) Symlink(artifact v1.Artifact, linkName string) (string, error) {
localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath)
link := filepath.Join(dir, linkName)
@ -636,26 +621,18 @@ func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string,
return "", err
}
url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName))
return url, nil
return fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)), nil
}
// Checksum returns the SHA256 checksum for the data of the given io.Reader as a string.
func (s *Storage) Checksum(reader io.Reader) string {
h := newHash()
_, _ = io.Copy(h, reader)
return fmt.Sprintf("%x", h.Sum(nil))
}
// Lock creates a file lock for the given v1beta1.Artifact.
func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) {
// Lock creates a file lock for the given v1.Artifact.
func (s *Storage) Lock(artifact v1.Artifact) (unlock func(), err error) {
lockFile := s.LocalPath(artifact) + ".lock"
mutex := lockedfile.MutexAt(lockFile)
return mutex.Lock()
}
// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath).
func (s *Storage) LocalPath(artifact sourcev1.Artifact) string {
func (s *Storage) LocalPath(artifact v1.Artifact) string {
if artifact.Path == "" {
return ""
}
@ -666,12 +643,7 @@ func (s *Storage) LocalPath(artifact sourcev1.Artifact) string {
return path
}
// newHash returns a new SHA256 hash.
func newHash() hash.Hash {
return sha256.New()
}
// writecounter is an implementation of io.Writer that only records the number
// writeCounter is an implementation of io.Writer that only records the number
// of bytes written.
type writeCounter struct {
written int64

View File

@ -31,7 +31,7 @@ import (
"github.com/fluxcd/go-git/v5/plumbing/format/gitignore"
. "github.com/onsi/gomega"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
)
func TestStorageConstructor(t *testing.T) {

View File

@ -48,7 +48,8 @@ import (
"github.com/fluxcd/pkg/runtime/testenv"
"github.com/fluxcd/pkg/testserver"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
sourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/cache"
"github.com/fluxcd/source-controller/internal/features"
"github.com/fluxcd/source-controller/internal/helm/registry"
@ -204,6 +205,7 @@ func TestMain(m *testing.M) {
initTestTLS()
utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme))
utilruntime.Must(sourcev1beta2.AddToScheme(scheme.Scheme))
testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases")))

841
docs/api/v1/source.md Normal file
View File

@ -0,0 +1,841 @@
<h1>Source API reference v1</h1>
<p>Packages:</p>
<ul class="simple">
<li>
<a href="#source.toolkit.fluxcd.io%2fv1">source.toolkit.fluxcd.io/v1</a>
</li>
</ul>
<h2 id="source.toolkit.fluxcd.io/v1">source.toolkit.fluxcd.io/v1</h2>
<p>Package v1 contains API Schema definitions for the source v1 API group</p>
Resource Types:
<ul class="simple"><li>
<a href="#source.toolkit.fluxcd.io/v1.GitRepository">GitRepository</a>
</li></ul>
<h3 id="source.toolkit.fluxcd.io/v1.GitRepository">GitRepository
</h3>
<p>GitRepository is the Schema for the gitrepositories API.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>apiVersion</code><br>
string</td>
<td>
<code>source.toolkit.fluxcd.io/v1</code>
</td>
</tr>
<tr>
<td>
<code>kind</code><br>
string
</td>
<td>
<code>GitRepository</code>
</td>
</tr>
<tr>
<td>
<code>metadata</code><br>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#objectmeta-v1-meta">
Kubernetes meta/v1.ObjectMeta
</a>
</em>
</td>
<td>
Refer to the Kubernetes API documentation for the fields of the
<code>metadata</code> field.
</td>
</tr>
<tr>
<td>
<code>spec</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositorySpec">
GitRepositorySpec
</a>
</em>
</td>
<td>
<br/>
<br/>
<table>
<tr>
<td>
<code>url</code><br>
<em>
string
</em>
</td>
<td>
<p>URL specifies the Git repository URL, it can be an HTTP/S or SSH address.</p>
</td>
</tr>
<tr>
<td>
<code>secretRef</code><br>
<em>
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>SecretRef specifies the Secret containing authentication credentials for
the GitRepository.
For HTTPS repositories the Secret must contain &lsquo;username&rsquo; and &lsquo;password&rsquo;
fields for basic auth or &lsquo;bearerToken&rsquo; field for token auth.
For SSH repositories the Secret must contain &lsquo;identity&rsquo;
and &lsquo;known_hosts&rsquo; fields.</p>
</td>
</tr>
<tr>
<td>
<code>interval</code><br>
<em>
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
</td>
<td>
<p>Interval at which to check the GitRepository for updates.</p>
</td>
</tr>
<tr>
<td>
<code>timeout</code><br>
<em>
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Timeout for Git operations like cloning, defaults to 60s.</p>
</td>
</tr>
<tr>
<td>
<code>ref</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryRef">
GitRepositoryRef
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Reference specifies the Git reference to resolve and monitor for
changes, defaults to the &lsquo;master&rsquo; branch.</p>
</td>
</tr>
<tr>
<td>
<code>verify</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryVerification">
GitRepositoryVerification
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Verification specifies the configuration to verify the Git commit
signature(s).</p>
</td>
</tr>
<tr>
<td>
<code>ignore</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.</p>
</td>
</tr>
<tr>
<td>
<code>suspend</code><br>
<em>
bool
</em>
</td>
<td>
<em>(Optional)</em>
<p>Suspend tells the controller to suspend the reconciliation of this
GitRepository.</p>
</td>
</tr>
<tr>
<td>
<code>recurseSubmodules</code><br>
<em>
bool
</em>
</td>
<td>
<em>(Optional)</em>
<p>RecurseSubmodules enables the initialization of all submodules within
the GitRepository as cloned from the URL, using their default settings.</p>
</td>
</tr>
<tr>
<td>
<code>include</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryInclude">
[]GitRepositoryInclude
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Include specifies a list of GitRepository resources which Artifacts
should be included in the Artifact produced for this GitRepository.</p>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td>
<code>status</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryStatus">
GitRepositoryStatus
</a>
</em>
</td>
<td>
</td>
</tr>
</tbody>
</table>
</div>
</div>
<h3 id="source.toolkit.fluxcd.io/v1.Artifact">Artifact
</h3>
<p>
(<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryStatus">GitRepositoryStatus</a>)
</p>
<p>Artifact represents the output of a Source reconciliation.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>path</code><br>
<em>
string
</em>
</td>
<td>
<p>Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.</p>
</td>
</tr>
<tr>
<td>
<code>url</code><br>
<em>
string
</em>
</td>
<td>
<p>URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.</p>
</td>
</tr>
<tr>
<td>
<code>revision</code><br>
<em>
string
</em>
</td>
<td>
<p>Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.</p>
</td>
</tr>
<tr>
<td>
<code>digest</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Digest is the digest of the file in the form of &lsquo;<algorithm>:<checksum>&rsquo;.</p>
</td>
</tr>
<tr>
<td>
<code>lastUpdateTime</code><br>
<em>
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#time-v1-meta">
Kubernetes meta/v1.Time
</a>
</em>
</td>
<td>
<p>LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.</p>
</td>
</tr>
<tr>
<td>
<code>size</code><br>
<em>
int64
</em>
</td>
<td>
<em>(Optional)</em>
<p>Size is the number of bytes in the file.</p>
</td>
</tr>
<tr>
<td>
<code>metadata</code><br>
<em>
map[string]string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Metadata holds upstream information such as OCI annotations.</p>
</td>
</tr>
</tbody>
</table>
</div>
</div>
<h3 id="source.toolkit.fluxcd.io/v1.GitRepositoryInclude">GitRepositoryInclude
</h3>
<p>
(<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositorySpec">GitRepositorySpec</a>,
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryStatus">GitRepositoryStatus</a>)
</p>
<p>GitRepositoryInclude specifies a local reference to a GitRepository which
Artifact (sub-)contents must be included, and where they should be placed.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>repository</code><br>
<em>
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
</td>
<td>
<p>GitRepositoryRef specifies the GitRepository which Artifact contents
must be included.</p>
</td>
</tr>
<tr>
<td>
<code>fromPath</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>FromPath specifies the path to copy contents from, defaults to the root
of the Artifact.</p>
</td>
</tr>
<tr>
<td>
<code>toPath</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>ToPath specifies the path to copy contents to, defaults to the name of
the GitRepositoryRef.</p>
</td>
</tr>
</tbody>
</table>
</div>
</div>
<h3 id="source.toolkit.fluxcd.io/v1.GitRepositoryRef">GitRepositoryRef
</h3>
<p>
(<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositorySpec">GitRepositorySpec</a>)
</p>
<p>GitRepositoryRef specifies the Git reference to resolve and checkout.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>branch</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Branch to check out, defaults to &lsquo;master&rsquo; if no other field is defined.</p>
</td>
</tr>
<tr>
<td>
<code>tag</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Tag to check out, takes precedence over Branch.</p>
</td>
</tr>
<tr>
<td>
<code>semver</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>SemVer tag expression to check out, takes precedence over Tag.</p>
</td>
</tr>
<tr>
<td>
<code>name</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Name of the reference to check out; takes precedence over Branch, Tag and SemVer.</p>
<p>It must be a valid Git reference: <a href="https://git-scm.com/docs/git-check-ref-format#_description">https://git-scm.com/docs/git-check-ref-format#_description</a>
Examples: &ldquo;refs/heads/main&rdquo;, &ldquo;refs/tags/v0.1.0&rdquo;, &ldquo;refs/pull/420/head&rdquo;, &ldquo;refs/merge-requests/1/head&rdquo;</p>
</td>
</tr>
<tr>
<td>
<code>commit</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Commit SHA to check out, takes precedence over all reference fields.</p>
<p>This can be combined with Branch to shallow clone the branch, in which
the commit is expected to exist.</p>
</td>
</tr>
</tbody>
</table>
</div>
</div>
<h3 id="source.toolkit.fluxcd.io/v1.GitRepositorySpec">GitRepositorySpec
</h3>
<p>
(<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepository">GitRepository</a>)
</p>
<p>GitRepositorySpec specifies the required configuration to produce an
Artifact for a Git repository.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>url</code><br>
<em>
string
</em>
</td>
<td>
<p>URL specifies the Git repository URL, it can be an HTTP/S or SSH address.</p>
</td>
</tr>
<tr>
<td>
<code>secretRef</code><br>
<em>
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>SecretRef specifies the Secret containing authentication credentials for
the GitRepository.
For HTTPS repositories the Secret must contain &lsquo;username&rsquo; and &lsquo;password&rsquo;
fields for basic auth or &lsquo;bearerToken&rsquo; field for token auth.
For SSH repositories the Secret must contain &lsquo;identity&rsquo;
and &lsquo;known_hosts&rsquo; fields.</p>
</td>
</tr>
<tr>
<td>
<code>interval</code><br>
<em>
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
</td>
<td>
<p>Interval at which to check the GitRepository for updates.</p>
</td>
</tr>
<tr>
<td>
<code>timeout</code><br>
<em>
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Timeout for Git operations like cloning, defaults to 60s.</p>
</td>
</tr>
<tr>
<td>
<code>ref</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryRef">
GitRepositoryRef
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Reference specifies the Git reference to resolve and monitor for
changes, defaults to the &lsquo;master&rsquo; branch.</p>
</td>
</tr>
<tr>
<td>
<code>verify</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryVerification">
GitRepositoryVerification
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Verification specifies the configuration to verify the Git commit
signature(s).</p>
</td>
</tr>
<tr>
<td>
<code>ignore</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.</p>
</td>
</tr>
<tr>
<td>
<code>suspend</code><br>
<em>
bool
</em>
</td>
<td>
<em>(Optional)</em>
<p>Suspend tells the controller to suspend the reconciliation of this
GitRepository.</p>
</td>
</tr>
<tr>
<td>
<code>recurseSubmodules</code><br>
<em>
bool
</em>
</td>
<td>
<em>(Optional)</em>
<p>RecurseSubmodules enables the initialization of all submodules within
the GitRepository as cloned from the URL, using their default settings.</p>
</td>
</tr>
<tr>
<td>
<code>include</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryInclude">
[]GitRepositoryInclude
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Include specifies a list of GitRepository resources which Artifacts
should be included in the Artifact produced for this GitRepository.</p>
</td>
</tr>
</tbody>
</table>
</div>
</div>
<h3 id="source.toolkit.fluxcd.io/v1.GitRepositoryStatus">GitRepositoryStatus
</h3>
<p>
(<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepository">GitRepository</a>)
</p>
<p>GitRepositoryStatus records the observed state of a Git repository.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>observedGeneration</code><br>
<em>
int64
</em>
</td>
<td>
<em>(Optional)</em>
<p>ObservedGeneration is the last observed generation of the GitRepository
object.</p>
</td>
</tr>
<tr>
<td>
<code>conditions</code><br>
<em>
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
[]Kubernetes meta/v1.Condition
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Conditions holds the conditions for the GitRepository.</p>
</td>
</tr>
<tr>
<td>
<code>artifact</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.Artifact">
Artifact
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>Artifact represents the last successful GitRepository reconciliation.</p>
</td>
</tr>
<tr>
<td>
<code>includedArtifacts</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.Artifact">
[]Artifact
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>IncludedArtifacts contains a list of the last successfully included
Artifacts as instructed by GitRepositorySpec.Include.</p>
</td>
</tr>
<tr>
<td>
<code>observedIgnore</code><br>
<em>
string
</em>
</td>
<td>
<em>(Optional)</em>
<p>ObservedIgnore is the observed exclusion patterns used for constructing
the source artifact.</p>
</td>
</tr>
<tr>
<td>
<code>observedRecurseSubmodules</code><br>
<em>
bool
</em>
</td>
<td>
<em>(Optional)</em>
<p>ObservedRecurseSubmodules is the observed resource submodules
configuration used to produce the current Artifact.</p>
</td>
</tr>
<tr>
<td>
<code>observedInclude</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositoryInclude">
[]GitRepositoryInclude
</a>
</em>
</td>
<td>
<em>(Optional)</em>
<p>ObservedInclude is the observed list of GitRepository resources used to
produce the current Artifact.</p>
</td>
</tr>
<tr>
<td>
<code>ReconcileRequestStatus</code><br>
<em>
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
</a>
</em>
</td>
<td>
<p>
(Members of <code>ReconcileRequestStatus</code> are embedded into this type.)
</p>
</td>
</tr>
</tbody>
</table>
</div>
</div>
<h3 id="source.toolkit.fluxcd.io/v1.GitRepositoryVerification">GitRepositoryVerification
</h3>
<p>
(<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1.GitRepositorySpec">GitRepositorySpec</a>)
</p>
<p>GitRepositoryVerification specifies the Git commit signature verification
strategy.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
<thead>
<tr>
<th>Field</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>
<code>mode</code><br>
<em>
string
</em>
</td>
<td>
<p>Mode specifies what Git object should be verified, currently (&lsquo;head&rsquo;).</p>
</td>
</tr>
<tr>
<td>
<code>secretRef</code><br>
<em>
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
</td>
<td>
<p>SecretRef specifies the Secret containing the public keys of trusted Git
authors.</p>
</td>
</tr>
</tbody>
</table>
</div>
</div>
<h3 id="source.toolkit.fluxcd.io/v1.Source">Source
</h3>
<p>Source interface must be supported by all API types.
Source is the interface that provides generic access to the Artifact and
interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
API group.</p>
<div class="admonition note">
<p class="last">This page was automatically generated with <code>gen-crd-api-reference-docs</code></p>
</div>

View File

@ -1,4 +1,4 @@
<h1>Source API reference</h1>
<h1>Source API reference v1beta2</h1>
<p>Packages:</p>
<ul class="simple">
<li>
@ -140,7 +140,7 @@ string
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -155,7 +155,7 @@ for the Bucket.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -168,7 +168,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -209,7 +209,7 @@ Bucket.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -312,7 +312,7 @@ string
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -331,7 +331,7 @@ and &lsquo;known_hosts&rsquo; fields.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -344,7 +344,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -457,7 +457,7 @@ should be included in the Artifact produced for this GitRepository.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -587,7 +587,7 @@ LocalHelmChartSourceReference
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -659,7 +659,7 @@ source.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -781,7 +781,7 @@ host.</p>
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -817,7 +817,7 @@ in credentials getting stolen in a MITM-attack.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -830,7 +830,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -859,7 +859,7 @@ HelmRepository.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -1033,7 +1033,7 @@ When not specified, defaults to &lsquo;generic&rsquo;.</p>
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -1079,7 +1079,7 @@ the image pull if the service account has attached pull secrets. For more inform
<td>
<code>certSecretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -1103,7 +1103,7 @@ you are using a self-signed server certificate.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -1116,7 +1116,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -1185,15 +1185,9 @@ OCIRepositoryStatus
</div>
<h3 id="source.toolkit.fluxcd.io/v1beta2.Artifact">Artifact
</h3>
<p>
(<em>Appears on:</em>
<a href="#source.toolkit.fluxcd.io/v1beta2.BucketStatus">BucketStatus</a>,
<a href="#source.toolkit.fluxcd.io/v1beta2.GitRepositoryStatus">GitRepositoryStatus</a>,
<a href="#source.toolkit.fluxcd.io/v1beta2.HelmChartStatus">HelmChartStatus</a>,
<a href="#source.toolkit.fluxcd.io/v1beta2.HelmRepositoryStatus">HelmRepositoryStatus</a>,
<a href="#source.toolkit.fluxcd.io/v1beta2.OCIRepositoryStatus">OCIRepositoryStatus</a>)
</p>
<p>Artifact represents the output of a Source reconciliation.</p>
<p>Deprecated: use Artifact from api/v1 instead. This type will be removed in
a future release.</p>
<div class="md-typeset__scrollwrap">
<div class="md-typeset__table">
<table>
@ -1392,7 +1386,7 @@ string
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -1407,7 +1401,7 @@ for the Bucket.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -1420,7 +1414,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -1461,7 +1455,7 @@ Bucket.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -1510,7 +1504,7 @@ int64
<td>
<code>conditions</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
[]Kubernetes meta/v1.Condition
</a>
</em>
@ -1538,8 +1532,8 @@ BucketStatus.Artifact data is recommended.</p>
<td>
<code>artifact</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Artifact">
Artifact
<a href="https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#Artifact">
github.com/fluxcd/source-controller/api/v1.Artifact
</a>
</em>
</td>
@ -1565,7 +1559,7 @@ the source artifact.</p>
<td>
<code>ReconcileRequestStatus</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
</a>
</em>
@ -1603,7 +1597,7 @@ Artifact (sub-)contents must be included, and where they should be placed.</p>
<td>
<code>repository</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -1761,7 +1755,7 @@ string
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -1780,7 +1774,7 @@ and &lsquo;known_hosts&rsquo; fields.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -1793,7 +1787,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -1906,7 +1900,7 @@ should be included in the Artifact produced for this GitRepository.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -1956,7 +1950,7 @@ object.</p>
<td>
<code>conditions</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
[]Kubernetes meta/v1.Condition
</a>
</em>
@ -1984,8 +1978,8 @@ GitRepositoryStatus.Artifact data is recommended.</p>
<td>
<code>artifact</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Artifact">
Artifact
<a href="https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#Artifact">
github.com/fluxcd/source-controller/api/v1.Artifact
</a>
</em>
</td>
@ -1998,8 +1992,8 @@ Artifact
<td>
<code>includedArtifacts</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Artifact">
[]Artifact
<a href="https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#Artifact">
[]github.com/fluxcd/source-controller/api/v1.Artifact
</a>
</em>
</td>
@ -2076,7 +2070,7 @@ to produce the current Artifact.</p>
<td>
<code>ReconcileRequestStatus</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
</a>
</em>
@ -2124,7 +2118,7 @@ string
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -2197,7 +2191,7 @@ LocalHelmChartSourceReference
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -2269,7 +2263,7 @@ source.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -2363,7 +2357,7 @@ resolved chart reference.</p>
<td>
<code>conditions</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
[]Kubernetes meta/v1.Condition
</a>
</em>
@ -2391,8 +2385,8 @@ BucketStatus.Artifact data is recommended.</p>
<td>
<code>artifact</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Artifact">
Artifact
<a href="https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#Artifact">
github.com/fluxcd/source-controller/api/v1.Artifact
</a>
</em>
</td>
@ -2405,7 +2399,7 @@ Artifact
<td>
<code>ReconcileRequestStatus</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
</a>
</em>
@ -2454,7 +2448,7 @@ host.</p>
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -2490,7 +2484,7 @@ in credentials getting stolen in a MITM-attack.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -2503,7 +2497,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -2532,7 +2526,7 @@ HelmRepository.</p>
<td>
<code>accessFrom</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/acl#AccessFrom">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#AccessFrom">
github.com/fluxcd/pkg/apis/acl.AccessFrom
</a>
</em>
@ -2609,7 +2603,7 @@ object.</p>
<td>
<code>conditions</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
[]Kubernetes meta/v1.Condition
</a>
</em>
@ -2637,8 +2631,8 @@ HelmRepositoryStatus.Artifact data is recommended.</p>
<td>
<code>artifact</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Artifact">
Artifact
<a href="https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#Artifact">
github.com/fluxcd/source-controller/api/v1.Artifact
</a>
</em>
</td>
@ -2651,7 +2645,7 @@ Artifact
<td>
<code>ReconcileRequestStatus</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
</a>
</em>
@ -2909,7 +2903,7 @@ When not specified, defaults to &lsquo;generic&rsquo;.</p>
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -2955,7 +2949,7 @@ the image pull if the service account has attached pull secrets. For more inform
<td>
<code>certSecretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -2979,7 +2973,7 @@ you are using a self-signed server certificate.</p>
<td>
<code>interval</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -2992,7 +2986,7 @@ Kubernetes meta/v1.Duration
<td>
<code>timeout</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration">
Kubernetes meta/v1.Duration
</a>
</em>
@ -3077,7 +3071,7 @@ int64
<td>
<code>conditions</code><br>
<em>
<a href="https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
<a href="https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition">
[]Kubernetes meta/v1.Condition
</a>
</em>
@ -3103,8 +3097,8 @@ string
<td>
<code>artifact</code><br>
<em>
<a href="#source.toolkit.fluxcd.io/v1beta2.Artifact">
Artifact
<a href="https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#Artifact">
github.com/fluxcd/source-controller/api/v1.Artifact
</a>
</em>
</td>
@ -3166,7 +3160,7 @@ the source artifact.</p>
<td>
<code>ReconcileRequestStatus</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#ReconcileRequestStatus">
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
</a>
</em>
@ -3214,7 +3208,7 @@ string
<td>
<code>secretRef</code><br>
<em>
<a href="https://godoc.org/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
<a href="https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#LocalObjectReference">
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
</a>
</em>
@ -3235,6 +3229,8 @@ trusted public keys.</p>
Source is the interface that provides generic access to the Artifact and
interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
API group.</p>
<p>Deprecated: use the Source interface from api/v1 instead. This type will be
removed in a future release.</p>
<div class="admonition note">
<p class="last">This page was automatically generated with <code>gen-crd-api-reference-docs</code></p>
</div>

View File

@ -1,66 +1,7 @@
# Source Controller
The main goal is to define a set of Kubernetes objects that cluster
admins and various automated operators can interact with to offload
the sources (e.g. Git and Helm repositories) registration, authentication,
verification and resource fetching to a dedicated controller.
## Motivation
Each Flux and each Helm operator mirrors the Git repositories they are
using, in the same way, using the same code. But other components
might benefit from access to the source mirrors, and Flux and the Helm
operator could work more in sympathy with Kubernetes by factoring it out.
If "sources" (usually git repos, but also Helm charts and potentially
other things) existed in their own right as Kubernetes resources,
components like Flux and Helm operator could use standard Kubernetes
mechanisms to build on them; and, they could be managed independently
of the components using them.
## API Specification
* [v1](v1/README.md)
* [v1beta2](v1beta2/README.md)
* [v1beta1](v1beta1/README.md)
## Implementation
The controller implementation will watch for source objects in a cluster and act on them.
The actions performed by the source controller could be:
* validate source definitions
* authenticate to sources and validate authenticity
* detect source changes based on update policies (semver)
* fetch resources on-demand and on-a-schedule
* package the fetched resources into a well known format (tar.gz, yaml)
* store the artifacts locally
* make the artifacts addressable by their source identifier (sha, version, ts)
* make the artifacts available in-cluster to interested 3rd parties
* notify interested 3rd parties of source changes and availability (status conditions, events, hooks)
## Impact to Flux
Having a dedicated controller that manages Git repositories defined with Kubernetes custom resources would:
* simplify Flux configuration as fluxd could subscribe to Git sources in-cluster and pull the artifacts
automatically without manual intervention from users to reconfigure and redeploy FLux
* improve the installation experience as users will not have to patch fluxd's deployment to inject
the HTTPS basic auth credentials, change the source URL or other Git and PGP related settings
* enable fluxd to compose the desired state of a cluster from multiple sources by applying all artifacts present in flux namespace
* enable fluxd to apply manifests coming from other sources than Git, e.g. S3 buckets
* allow fluxd to run under a non-root user as it wouldn't need to shell out to ssh-keygen, git or pgp
* enable fluxd to apply manifests coming from the most recent semver tag of a Git repository
* allow user to pin the cluster desired state to a specific Git commit or Git tag
## Impact to Helm Operator
Having a dedicated controller that manages Helm repositories and charts defined with Kubernetes custom
resources would:
* simplify the Helm Operator configuration as repository and chart definitions can be re-used across
`HelmRelease` resources (see [fluxcd/helm-operator#142](https://github.com/fluxcd/helm-operator/issues/142))
* improve the user experience as repositories requiring authentication will no longer require a
`repositories.yaml` import / file mount
* simplify the architecture of the Helm Operator as it allows the operator to work with a single
source type (`HelmChart`) and way of preparing and executing installations and/or upgrades
* allow the Helm Operator to run under a non-root user as it wouldn't need to shell out to git

17
docs/spec/v1/README.md Normal file
View File

@ -0,0 +1,17 @@
# source.toolkit.fluxcd.io/v1beta2
This is the v1 API specification for defining the desired state sources of Kubernetes clusters.
## Specification
* Source kinds:
+ [GitRepository](gitrepositories.md)
## Implementation
* [source-controller](https://github.com/fluxcd/source-controller/)
## Consumers
* [kustomize-controller](https://github.com/fluxcd/kustomize-controller/)
* [helm-controller](https://github.com/fluxcd/helm-controller/)

View File

@ -0,0 +1,953 @@
# Git Repositories
<!-- menuweight:10 -->
The `GitRepository` API defines a Source to produce an Artifact for a Git
repository revision.
## Example
The following is an example of a GitRepository. It creates a tarball
(`.tar.gz`) Artifact with the fetched data from a Git repository for the
resolved reference.
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: podinfo
namespace: default
spec:
interval: 5m0s
url: https://github.com/stefanprodan/podinfo
ref:
branch: master
```
In the above example:
- A GitRepository named `podinfo` is created, indicated by the
`.metadata.name` field.
- The source-controller checks the Git repository every five minutes, indicated
by the `.spec.interval` field.
- It clones the `master` branch of the `https://github.com/stefanprodan/podinfo`
repository, indicated by the `.spec.ref.branch` and `.spec.url` fields.
- The specified branch and resolved HEAD revision are used as the Artifact
revision, reported in-cluster in the `.status.artifact.revision` field.
- When the current GitRepository revision differs from the latest fetched
revision, a new Artifact is archived.
- The new Artifact is reported in the `.status.artifact` field.
You can run this example by saving the manifest into `gitrepository.yaml`.
1. Apply the resource on the cluster:
```sh
kubectl apply -f gitrepository.yaml
```
2. Run `kubectl get gitrepository` to see the GitRepository:
```console
NAME URL AGE READY STATUS
podinfo https://github.com/stefanprodan/podinfo 5s True stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc'
```
3. Run `kubectl describe gitrepository podinfo` to see the [Artifact](#artifact)
and [Conditions](#conditions) in the GitRepository's Status:
```console
...
Status:
Artifact:
Digest: sha256:95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2
Last Update Time: 2022-02-14T11:23:36Z
Path: gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz
Revision: master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc
Size: 91318
URL: http://source-controller.source-system.svc.cluster.local./gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz
Conditions:
Last Transition Time: 2022-02-14T11:23:36Z
Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc'
Observed Generation: 1
Reason: Succeeded
Status: True
Type: Ready
Last Transition Time: 2022-02-14T11:23:36Z
Message: stored artifact for revision 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc'
Observed Generation: 1
Reason: Succeeded
Status: True
Type: ArtifactInStorage
Observed Generation: 1
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal NewArtifact 62s source-controller stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3'
```
## Writing a GitRepository spec
As with all other Kubernetes config, a GitRepository needs `apiVersion`,
`kind`, and `metadata` fields. The name of a GitRepository object must be a
valid [DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names).
A GitRepository also needs a
[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status).
### URL
`.spec.url` is a required field that specifies the HTTP/S or SSH address of the
Git repository.
**Note:** Unlike using `git`, the
[shorter scp-like syntax](https://git-scm.com/book/en/v2/Git-on-the-Server-The-Protocols#_the_ssh_protocol)
is not supported for SSH addresses (e.g. `user@example.com:repository.git`).
Instead, the valid URL format is `ssh://user@example.com:22/repository.git`.
### Secret reference
`.spec.secretRef.name` is an optional field to specify a name reference to a
Secret in the same namespace as the GitRepository, containing authentication
credentials for the Git repository.
The required fields in the Secret depend on the specified protocol in the
[URL](#url).
#### Basic access authentication
To authenticate towards a Git repository over HTTPS using basic access
authentication (in other words: using a username and password), the referenced
Secret is expected to contain `.data.username` and `.data.password` values.
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: basic-access-auth
type: Opaque
data:
username: <BASE64>
password: <BASE64>
```
#### Bearer token authentication
To authenticate towards a Git repository over HTTPS using bearer token
authentication (in other words: using a `Authorization: Bearer` header), the referenced
Secret is expected to contain the token in `.data.bearerToken`.
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: bearer-token-auth
type: Opaque
data:
bearerToken: <BASE64>
```
#### HTTPS Certificate Authority
To provide a Certificate Authority to trust while connecting with a Git
repository over HTTPS, the referenced Secret can contain a `.data.caFile`
value.
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: https-ca-credentials
namespace: default
type: Opaque
data:
caFile: <BASE64>
```
#### SSH authentication
To authenticate towards a Git repository over SSH, the referenced Secret is
expected to contain `identity` and `known_hosts` fields. With the respective
private key of the SSH key pair, and the host keys of the Git repository.
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: ssh-credentials
type: Opaque
stringData:
identity: |
-----BEGIN OPENSSH PRIVATE KEY-----
...
-----END OPENSSH PRIVATE KEY-----
known_hosts: |
github.com ecdsa-sha2-nistp256 AAAA...
```
Alternatively, the Flux CLI can be used to automatically create the
secret, and also populate the known_hosts:
```sh
flux create secret git podinfo-auth \
--url=ssh://git@github.com/stefanprodan/podinfo \
--private-key-file=./identity
```
For password-protected SSH private keys, the password must be provided
via an additional `password` field in the secret. Flux CLI also supports
this via the `--password` flag.
### Interval
`.spec.interval` is a required field that specifies the interval at which the
Git repository must be fetched.
After successfully reconciling the object, the source-controller requeues it
for inspection after the specified interval. The value must be in a
[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration),
e.g. `10m0s` to reconcile the object every 10 minutes.
If the `.metadata.generation` of a resource changes (due to e.g. a change to
the spec), this is handled instantly outside the interval window.
### Timeout
`.spec.timeout` is an optional field to specify a timeout for Git operations
like cloning. The value must be in a
[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration),
e.g. `1m30s` for a timeout of one minute and thirty seconds. The default value
is `60s`.
### Reference
`.spec.ref` is an optional field to specify the Git reference to resolve and
watch for changes. References are specified in one or more subfields
(`.branch`, `.tag`, `.semver`, `.name`, `.commit`), with latter listed fields taking
precedence over earlier ones. If not specified, it defaults to a `master`
branch reference.
#### Branch example
To Git checkout a specified branch, use `.spec.ref.branch`:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
ref:
branch: <branch-name>
```
This will perform a shallow clone to only fetch the specified branch.
#### Tag example
To Git checkout a specified tag, use `.spec.ref.tag`:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
ref:
tag: <tag-name>
```
This field takes precedence over [`.branch`](#branch-example).
#### SemVer example
To Git checkout a tag based on a
[SemVer range](https://github.com/Masterminds/semver#checking-version-constraints),
use `.spec.ref.semver`:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
ref:
# SemVer range reference: https://github.com/Masterminds/semver#checking-version-constraints
semver: "<semver-range>"
```
This field takes precedence over [`.branch`](#branch-example) and
[`.tag`](#tag-example).
#### Name example
To Git checkout a specified [reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References),
use `.spec.ref.name`:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
ref:
# Ref name format reference: https://git-scm.com/docs/git-check-ref-format#_description
name: <reference-name>
```
Valid examples are: `refs/heads/main`, `refs/tags/v0.1.0`, `refs/pull/420/head`,
`refs/merge-requests/1/head`.
This field takes precedence over [`.branch`](#branch-example),
[`.tag`](#tag-example), and [`.semver`](#semver-example).
#### Commit example
To Git checkout a specified commit, use `.spec.ref.commit`:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
ref:
commit: "<commit SHA>"
```
This field takes precedence over all other fields. It can be combined with
`.spec.ref.branch` to perform a shallow clone of the branch, in which the
commit must exist:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
ref:
branch: <branch>
commit: "<commit SHA within branch>"
```
### Verification
`.spec.verify` is an optional field to enable the verification of Git commit
signatures. The field offers two subfields:
- `.mode`, to specify what Git commit object should be verified. Only supports
`head` at present.
- `.secretRef.name`, to specify a reference to a Secret in the same namespace as
the GitRepository. Containing the (PGP) public keys of trusted Git authors.
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: podinfo
namespace: default
spec:
interval: 1m
url: https://github.com/stefanprodan/podinfo
ref:
branch: master
verify:
mode: head
secretRef:
name: pgp-public-keys
```
When the verification succeeds, the controller adds a Condition with the
following attributes to the GitRepository's `.status.conditions`:
- `type: SourceVerifiedCondition`
- `status: "True"`
- `reason: Succeeded`
#### Verification Secret example
```yaml
---
apiVersion: v1
kind: Secret
metadata:
name: pgp-public-keys
namespace: default
type: Opaque
data:
author1.asc: <BASE64>
author2.asc: <BASE64>
```
Exporting armored public keys (`.asc` files) using `gpg`, and generating a
Secret:
```sh
# Export armored public keys
gpg --export --armor 3CB12BA185C47B67 > author1.asc
gpg --export --armor 6A7436E8790F8689 > author2.asc
# Generate secret
kubectl create secret generic pgp-public-keys \
--from-file=author1.asc \
--from-file=author2.asc \
-o yaml
```
### Ignore
`.spec.ignore` is an optional field to specify rules in [the `.gitignore`
pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Paths
matching the defined rules are excluded while archiving.
When specified, `.spec.ignore` overrides the [default exclusion
list](#default-exclusions), and may overrule the [`.sourceignore` file
exclusions](#sourceignore-file). See [excluding files](#excluding-files)
for more information.
### Suspend
`.spec.suspend` is an optional field to suspend the reconciliation of a
GitRepository. When set to `true`, the controller will stop reconciling the
GitRepository, and changes to the resource or in the Git repository will not
result in a new Artifact. When the field is set to `false` or removed, it will
resume.
#### Optimized Git clones
Optimized Git clones decreases resource utilization for GitRepository
reconciliations.
When enabled, it avoids full Git clone operations by first checking whether
the revision of the last stored artifact is still the head of the remote
repository and none of the other factors that contribute to a change in the
artifact, like ignore rules and included repositories, have changed. If that is
so, the reconciliation is skipped. Else, a full reconciliation is performed as
usual.
This feature is enabled by default. It can be disabled by starting the
controller with the argument `--feature-gates=OptimizedGitClones=false`.
NB: GitRepository objects configured for SemVer or Commit clones are
not affected by this functionality.
#### Proxy support
When a proxy is configured in the source-controller Pod through the appropriate
environment variables, for example `HTTPS_PROXY`, `NO_PROXY`, etc.
### Recurse submodules
`.spec.recurseSubmodules` is an optional field to enable the initialization of
all submodules within the cloned Git repository, using their default settings.
This option defaults to `false`.
Note that for most Git providers (e.g. GitHub and GitLab), deploy keys can not
be used as reusing a key across multiple repositories is not allowed. You have
to use either [HTTPS token-based authentication](#basic-access-authentication),
or an SSH key belonging to a (bot) user who has access to the main repository
and all submodules.
### Include
`.spec.include` is an optional field to map the contents of GitRepository
Artifacts into another. This may look identical to Git submodules but has
multiple benefits over regular submodules:
- Including a `GitRepository` allows you to use different authentication
methods for different repositories.
- A change in the included repository will trigger an update of the including
repository.
- Multiple `GitRepository` objects could include the same repository, which
decreases the amount of cloning done compared to using submodules.
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: include-example
spec:
include:
- repository:
name: other-repository
fromPath: deploy/kubernetes
toPath: base/app
```
The `.fromPath` and `.toPath` fields allow you to limit the files included, and
where they will be copied to. If you do not specify a value for `.fromPath`,
all files from the referenced GitRepository Artifact will be included. The
`.toPath` defaults to the `.repository.name` (e.g. `./other-repository/*`).
## Working with GitRepositories
### Excluding files
By default, files which match the [default exclusion rules](#default-exclusions)
are excluded while archiving the Git repository contents as an Artifact. It is
possible to overwrite and/or overrule the default exclusions using a file in
the Git repository and/or an in-spec set of rules.
#### `.sourceignore` file
Excluding files is possible by adding a `.sourceignore` file in the Git
repository. The `.sourceignore` file follows [the `.gitignore` pattern
format](https://git-scm.com/docs/gitignore#_pattern_format), and
pattern entries may overrule [default exclusions](#default-exclusions).
The controller recursively loads ignore files so a `.sourceignore` can be
placed in the repository root or in subdirectories.
#### Ignore spec
Another option is to define the exclusions within the GitRepository spec, using
the [`.spec.ignore` field](#ignore). Specified rules override the [default
exclusion list](#default-exclusions), and may overrule `.sourceignore` file
exclusions.
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
ignore: |
# exclude all
/*
# include deploy dir
!/deploy
# exclude file extensions from deploy dir
/deploy/**/*.md
/deploy/**/*.txt
```
### Triggering a reconcile
To manually tell the source-controller to reconcile a GitRepository outside the
[specified interval window](#interval), a GitRepository can be annotated with
`reconcile.fluxcd.io/requestedAt: <arbitrary value>`. Annotating the resource
queues the GitRepository for reconciliation if the `<arbitrary-value>` differs
from the last value the controller acted on, as reported in
[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at).
Using `kubectl`:
```sh
kubectl annotate --field-manager=flux-client-side-apply --overwrite gitrepository/<repository-name> reconcile.fluxcd.io/requestedAt="$(date +%s)"
```
Using `flux`:
```sh
flux reconcile source git <repository-name>
```
### Waiting for `Ready`
When a change is applied, it is possible to wait for the GitRepository to reach
a [ready state](#ready-gitrepository) using `kubectl`:
```sh
kubectl wait gitrepository/<repository-name> --for=condition=ready --timeout=1m
```
### Suspending and resuming
When you find yourself in a situation where you temporarily want to pause the
reconciliation of a GitRepository, you can suspend it using the
[`.spec.suspend` field](#suspend).
#### Suspend a GitRepository
In your YAML declaration:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
suspend: true
```
Using `kubectl`:
```sh
kubectl patch gitrepository <repository-name> --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}'
```
Using `flux`:
```sh
flux suspend source git <repository-name>
```
**Note:** When a GitRepository has an Artifact and is suspended, and this
Artifact later disappears from the storage due to e.g. the source-controller
Pod being evicted from a Node, this will not be reflected in the
GitRepository's Status until it is resumed.
#### Resume a GitRepository
In your YAML declaration, comment out (or remove) the field:
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
spec:
# suspend: true
```
**Note:** Setting the field value to `false` has the same effect as removing
it, but does not allow for "hot patching" using e.g. `kubectl` while practicing
GitOps; as the manually applied patch would be overwritten by the declared
state in Git.
Using `kubectl`:
```sh
kubectl patch gitrepository <repository-name> --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}'
```
Using `flux`:
```sh
flux resume source git <repository-name>
```
### Debugging a GitRepository
There are several ways to gather information about a GitRepository for
debugging purposes.
#### Describe the GitRepository
Describing a GitRepository using
`kubectl describe gitrepository <repository-name>`
displays the latest recorded information for the resource in the `Status` and
`Events` sections:
```console
...
Status:
...
Conditions:
Last Transition Time: 2022-02-14T09:40:27Z
Message: processing object: new generation 1 -> 2
Observed Generation: 2
Reason: ProgressingWithRetry
Status: True
Type: Reconciling
Last Transition Time: 2022-02-14T09:40:27Z
Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid"
Observed Generation: 2
Reason: GitOperationFailed
Status: False
Type: Ready
Last Transition Time: 2022-02-14T09:40:27Z
Message: failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid"
Observed Generation: 2
Reason: GitOperationFailed
Status: True
Type: FetchFailed
Observed Generation: 1
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning GitOperationFailed 2s (x9 over 4s) source-controller failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid"
```
#### Trace emitted Events
To view events for specific GitRepository(s), `kubectl events` can be used in
combination with `--for` to list the Events for specific objects. For example,
running
```sh
kubectl events --for GitRepository/<repository-name>
```
lists
```console
LAST SEEN TYPE REASON OBJECT MESSAGE
2m14s Normal NewArtifact gitrepository/<repository-name> stored artifact for commit 'Merge pull request #160 from stefanprodan/release-6.0.3'
36s Normal ArtifactUpToDate gitrepository/<repository-name> artifact up-to-date with remote revision: 'master@sha1:132f4e719209eb10b9485302f8593fc0e680f4fc'
94s Warning GitOperationFailed gitrepository/<repository-name> failed to checkout and determine revision: unable to clone 'https://github.com/stefanprodan/podinfo': couldn't find remote ref "refs/heads/invalid"
```
Besides being reported in Events, the reconciliation errors are also logged by
the controller. The Flux CLI offer commands for filtering the logs for a
specific GitRepository, e.g.
`flux logs --level=error --kind=GitRepository --name=<repository-name>`.
## GitRepository Status
### Artifact
The GitRepository reports the latest synchronized state from the Git repository
as an Artifact object in the `.status.artifact` of the resource.
The Artifact file is a gzip compressed TAR archive (`<commit sha>.tar.gz`), and
can be retrieved in-cluster from the `.status.artifact.url` HTTP address.
#### Artifact example
```yaml
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: <repository-name>
status:
artifact:
digest: sha256:e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b
lastUpdateTime: "2022-01-29T06:59:23Z"
path: gitrepository/<namespace>/<repository-name>/c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz
revision: master@sha1:363a6a8fe6a7f13e05d34c163b0ef02a777da20a
size: 91318
url: http://source-controller.<namespace>.svc.cluster.local./gitrepository/<namespace>/<repository-name>/363a6a8fe6a7f13e05d34c163b0ef02a777da20a.tar.gz
```
#### Default exclusions
The following files and extensions are excluded from the Artifact by
default:
- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`)
- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`)
- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`)
- CLI configs (`.goreleaser.yml, .sops.yaml`)
- Flux v1 config (`.flux.yaml`)
To define your own exclusion rules, see [excluding files](#excluding-files).
### Conditions
A GitRepository enters various states during its lifecycle, reflected as
[Kubernetes Conditions][typical-status-properties].
It can be [reconciling](#reconciling-gitrepository) while fetching the Git
state, it can be [ready](#ready-gitrepository), or it can [fail during
reconciliation](#failed-gitrepository).
The GitRepository API is compatible with the [kstatus specification][kstatus-spec],
and reports `Reconciling` and `Stalled` conditions where applicable to
provide better (timeout) support to solutions polling the GitRepository to
become `Ready`.
#### Reconciling GitRepository
The source-controller marks a GitRepository as _reconciling_ when one of the
following is true:
- There is no current Artifact for the GitRepository, or the reported Artifact
is determined to have disappeared from the storage.
- The generation of the GitRepository is newer than the [Observed
Generation](#observed-generation).
- The newly resolved Artifact revision differs from the current Artifact.
When the GitRepository is "reconciling", the `Ready` Condition status becomes
`Unknown` when the controller detects drift, and the controller adds a Condition
with the following attributes to the GitRepository's
`.status.conditions`:
- `type: Reconciling`
- `status: "True"`
- `reason: Progressing` | `reason: ProgressingWithRetry`
If the reconciling state is due to a new revision, an additional Condition is
added with the following attributes:
- `type: ArtifactOutdated`
- `status: "True"`
- `reason: NewRevision`
Both Conditions have a ["negative polarity"][typical-status-properties],
and are only present on the GitRepository while their status value is `"True"`.
#### Ready GitRepository
The source-controller marks a GitRepository as _ready_ when it has the
following characteristics:
- The GitRepository reports an [Artifact](#artifact).
- The reported Artifact exists in the controller's Artifact storage.
- The controller was able to communicate with the remote Git repository using
the current spec.
- The revision of the reported Artifact is up-to-date with the latest
resolved revision of the remote Git repository.
When the GitRepository is "ready", the controller sets a Condition with the
following attributes in the GitRepository's `.status.conditions`:
- `type: Ready`
- `status: "True"`
- `reason: Succeeded`
This `Ready` Condition will retain a status value of `"True"` until the
GitRepository is marked as [reconciling](#reconciling-gitrepository), or e.g. a
[transient error](#failed-gitrepository) occurs due to a temporary network issue.
When the GitRepository Artifact is archived in the controller's Artifact
storage, the controller sets a Condition with the following attributes in the
GitRepository's `.status.conditions`:
- `type: ArtifactInStorage`
- `status: "True"`
- `reason: Succeeded`
This `ArtifactInStorage` Condition will retain a status value of `"True"` until
the Artifact in the storage no longer exists.
#### Failed GitRepository
The source-controller may get stuck trying to produce an Artifact for a
GitRepository without completing. This can occur due to some of the following
factors:
- The remote Git repository [URL](#url) is temporarily unavailable.
- The Git repository does not exist.
- The [Secret reference](#secret-reference) contains a reference to a
non-existing Secret.
- A specified Include is unavailable.
- The verification of the Git commit signature failed.
- The credentials in the referenced Secret are invalid.
- The GitRepository spec contains a generic misconfiguration.
- A storage related failure when storing the artifact.
When this happens, the controller sets the `Ready` Condition status to `False`,
and adds a Condition with the following attributes to the GitRepository's
`.status.conditions`:
- `type: FetchFailed` | `type: IncludeUnavailable` | `type: StorageOperationFailed`
- `status: "True"`
- `reason: AuthenticationFailed` | `reason: GitOperationFailed`
This condition has a ["negative polarity"][typical-status-properties],
and is only present on the GitRepository while the status value is `"True"`.
There may be more arbitrary values for the `reason` field to provide accurate
reason for a condition.
In addition to the above Condition types, when the
[verification of a Git commit signature](#verification) fails. A condition with
the following attributes is added to the GitRepository's `.status.conditions`:
- `type: SourceVerifiedCondition`
- `status: "False"`
- `reason: Failed`
While the GitRepository has one or more of these Conditions, the controller
will continue to attempt to produce an Artifact for the resource with an
exponential backoff, until it succeeds and the GitRepository is marked as
[ready](#ready-gitrepository).
Note that a GitRepository can be [reconciling](#reconciling-gitrepository)
while failing at the same time, for example due to a newly introduced
configuration issue in the GitRepository spec. When a reconciliation fails, the
`Reconciling` Condition reason would be `ProgressingWithRetry`. When the
reconciliation is performed again after the failure, the reason is updated to
`Progressing`.
### Observed Ignore
The source-controller reports an observed ignore in the GitRepository's
`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value
which resulted in a [ready state](#ready-gitrepository), or stalled due to error
it can not recover from without human intervention.
The value is the same as the [ignore in spec](#ignore).
It indicates the ignore rules used in building the current artifact in storage.
It is also used by the controller to determine if an artifact needs to be
rebuilt.
Example:
```yaml
status:
...
observedIgnore: |
cue
pkg
...
```
### Observed Recurse Submodules
The source-controller reports an observed recurse submodule in the
GitRepository's `.status.observedRecurseSubmodules`. The observed recurse
submodules is the latest `.spec.recurseSubmodules` value which resulted in a
[ready state](#ready-gitrepository), or stalled due to error it can not recover
from without human intervention. The value is the same as the
[recurse submodules in spec](#recurse-submodules). It indicates the recurse
submodules configuration used in building the current artifact in storage. It is
also used by the controller to determine if an artifact needs to be rebuilt.
Example:
```yaml
status:
...
observedRecurseSubmodules: true
...
```
### Observed Include
The source-controller reports observed include in the GitRepository's
`.status.observedInclude`. The observed include is the latest
`.spec.recurseSubmodules` value which resulted in a
[ready state](#ready-gitrepository), or stalled due to error it can not recover
from without human intervention. The value is the same as the
[include in spec](#include). It indicates the include configuration used in
building the current artifact in storage. It is also used by the controller to
determine if an artifact needs to be rebuilt.
Example:
```yaml
status:
...
observedInclude:
- fromPath: deploy/webapp
repository:
name: repo1
toPath: foo
- fromPath: deploy/secure
repository:
name: repo2
toPath: bar
...
```
### Observed Generation
The source-controller reports an [observed generation][typical-status-properties]
in the GitRepository's `.status.observedGeneration`. The observed generation is
the latest `.metadata.generation` which resulted in either a [ready state](#ready-gitrepository),
or stalled due to error it can not recover from without human
intervention.
### Last Handled Reconcile At
The source-controller reports the last `reconcile.fluxcd.io/requestedAt`
annotation value it acted on in the `.status.lastHandledReconcileAt` field.
For practical information about this field, see [triggering a
reconcile](#triggering-a-reconcile).
[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties
[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus

View File

@ -1,5 +1,7 @@
# Buckets
<!-- menuweight:30 -->
The `Bucket` API defines a Source to produce an Artifact for objects from storage
solutions like Amazon S3, Google Cloud Storage buckets, or any other solution
with a S3 compatible API such as Minio, Alibaba Cloud OSS and others.
@ -81,7 +83,6 @@ control over.
...
Status:
Artifact:
Checksum: 72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686
Digest: sha256:72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686
Last Update Time: 2022-02-01T23:43:38Z
Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz
@ -895,12 +896,12 @@ Events:
#### Trace emitted Events
To view events for specific Bucket(s), `kubectl get events` can be used in
combination with `--field-sector` to list the Events for specific objects.
For example, running
To view events for specific Bucket(s), `kubectl events` can be used in
combination with `--for` to list the Events for specific objects. For example,
running
```sh
kubectl get events --field-selector involvedObject.kind=Bucket,involvedObject.name=<bucket-name>
kubectl events --for Bucket/<bucket-name>
```
lists
@ -937,7 +938,6 @@ metadata:
name: <bucket-name>
status:
artifact:
checksum: cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a
digest: sha256:cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a
lastUpdateTime: "2022-01-28T10:30:30Z"
path: bucket/<namespace>/<bucket-name>/c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz

View File

@ -1,5 +1,7 @@
# Git Repositories
<!-- menuweight:10 -->
The `GitRepository` API defines a Source to produce an Artifact for a Git
repository revision.
@ -59,7 +61,6 @@ You can run this example by saving the manifest into `gitrepository.yaml`.
...
Status:
Artifact:
Checksum: 95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2
Digest: sha256:95e386f421272710c4cedbbd8607dbbaa019d500e7a5a0b6720bc7bebefc7bf2
Last Update Time: 2022-02-14T11:23:36Z
Path: gitrepository/default/podinfo/132f4e719209eb10b9485302f8593fc0e680f4fc.tar.gz
@ -725,7 +726,6 @@ metadata:
name: <repository-name>
status:
artifact:
checksum: e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b
digest: sha256:e750c7a46724acaef8f8aa926259af30bbd9face2ae065ae8896ba5ee5ab832b
lastUpdateTime: "2022-01-29T06:59:23Z"
path: gitrepository/<namespace>/<repository-name>/c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz

View File

@ -1,5 +1,7 @@
# Helm Charts
<!-- menuweight:50 -->
The `HelmChart` API defines a Source to produce an Artifact for a Helm chart
archive with a set of specific configurations.
@ -67,7 +69,6 @@ helm-controller.
Status:
Observed Source Artifact Revision: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111
Artifact:
Checksum: 6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3
Digest: sha256:6c3cc3b955bce1686036ae6822ee2ca0ef6ecb994e3f2d19eaf3ec03dcba84b3
Last Update Time: 2022-02-13T11:24:10Z
Path: helmchart/default/podinfo/podinfo-5.2.1.tgz
@ -468,12 +469,12 @@ sion matching '9.*' found
#### Trace emitted Events
To view events for specific HelmChart(s), `kubectl get events` can be used in
combination with `--field-selector` to list the Events for specific objects.
For example, running
To view events for specific HelmChart(s), `kubectl events` can be used in
combination with `--for` to list the Events for specific objects. For example,
running
```sh
kubectl get events --field-selector involvedObject.kind=HelmChart,involvedObject.name=<chart-name>
kubectl events --for HelmChart/<chart-name>
```
lists
@ -556,7 +557,6 @@ metadata:
name: <chart-name>
status:
artifact:
checksum: e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e
digest: sha256:e30b95a08787de69ffdad3c232d65cfb131b5b50c6fd44295f48a078fceaa44e
lastUpdateTime: "2022-02-10T18:53:47Z"
path: helmchart/<source-namespace>/<chart-name>/<chart-name>-<chart-version>.tgz
@ -579,7 +579,6 @@ metadata:
name: <chart-name>
status:
artifact:
checksum: ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52
digest: sha256:ee68224ded207ebb18a8e9730cf3313fa6bc1f31e6d8d3943ab541113559bb52
lastUpdateTime: "2022-02-28T08:07:12Z"
path: helmchart/<source-namespace>/<chart-name>/<chart-name>-6.0.3+1.tgz
@ -605,7 +604,6 @@ metadata:
name: <chart-name>
status:
artifact:
checksum: 8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718
digest: sha256:8d1f0ac3f4b0e8759a32180086f17ac87ca04e5d46c356e67f97e97616ef4718
lastUpdateTime: "2022-02-28T08:07:12Z"
path: helmchart/<source-namespace>/<chart-name>/<chart-name>-6.0.3+4e5cbb7b97d0.tgz

View File

@ -1,5 +1,7 @@
# Helm Repositories
<!-- menuweight:40 -->
There are 2 [Helm repository types](#type) defined by the `HelmRepository` API:
- Helm HTTP/S repository, which defines a Source to produce an Artifact for a Helm
repository index YAML (`index.yaml`).
@ -63,7 +65,6 @@ You can run this example by saving the manifest into `helmrepository.yaml`.
...
Status:
Artifact:
Checksum: 83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111
Digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111
Last Update Time: 2022-02-04T09:55:58Z
Path: helmrepository/default/podinfo/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml
@ -597,12 +598,12 @@ Events:
#### Trace emitted Events
To view events for specific HelmRepository(s), `kubectl get events` can be used in
combination with `--field-sector` to list the Events for specific objects.
For example, running
To view events for specific HelmRepository(s), `kubectl events` can be used in
combination with `--for` to list the Events for specific objects. For example,
running
```sh
kubectl get events --field-selector involvedObject.kind=HelmRepository,involvedObject.name=<repository-name>
kubectl events --for HelmRepository/<repository-name>
```
lists
@ -641,7 +642,6 @@ metadata:
name: <repository-name>
status:
artifact:
checksum: 83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111
digest: sha256:83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111
lastUpdateTime: "2022-02-04T09:55:58Z"
path: helmrepository/<namespace>/<repository-name>/index-83a3c595163a6ff0333e0154c790383b5be441b9db632cb36da11db1c4ece111.yaml

View File

@ -1,5 +1,7 @@
# OCI Repositories
<!-- menuweight:20 -->
The `OCIRepository` API defines a Source to produce an Artifact for an OCI
repository.
@ -59,7 +61,6 @@ You can run this example by saving the manifest into `ocirepository.yaml`.
...
Status:
Artifact:
Checksum: d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b
Digest: sha256:d7e924b4882e55b97627355c7b3d2e711e9b54303afa2f50c25377f4df66a83b
Last Update Time: 2022-06-14T11:23:36Z
Path: ocirepository/default/podinfo/3b6cdcc7adcc9a84d3214ee1c029543789d90b5ae69debe9efa3f66e982875de.tar.gz
@ -681,12 +682,12 @@ Events:
#### Trace emitted Events
To view events for specific OCIRepository(s), `kubectl get events` can be used
in combination with `--field-sector` to list the Events for specific objects.
For example, running
To view events for specific OCIRepository(s), `kubectl events` can be used
in combination with `--for` to list the Events for specific objects. For
example, running
```sh
kubectl get events --field-selector involvedObject.kind=OCIRepository,involvedObject.name=<repository-name>
kubectl events --for OCIRepository/<repository-name>
```
lists
@ -732,7 +733,6 @@ metadata:
name: <repository-name>
status:
artifact:
checksum: 9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088
digest: sha256:9f3bc0f341d4ecf2bab460cc59320a2a9ea292f01d7b96e32740a9abfd341088
lastUpdateTime: "2022-08-08T09:35:45Z"
metadata:

18
go.mod
View File

@ -21,15 +21,15 @@ require (
github.com/docker/go-units v0.5.0
github.com/fluxcd/go-git/v5 v5.0.0-20221219190809-2e5c9d01cfc4
github.com/fluxcd/pkg/apis/event v0.4.1
github.com/fluxcd/pkg/apis/meta v0.19.1
github.com/fluxcd/pkg/apis/meta v1.0.0
github.com/fluxcd/pkg/git v0.11.0
github.com/fluxcd/pkg/git/gogit v0.8.1
github.com/fluxcd/pkg/gittestserver v0.8.2
github.com/fluxcd/pkg/helmtestserver v0.11.1
github.com/fluxcd/pkg/helmtestserver v0.12.0
github.com/fluxcd/pkg/lockedfile v0.1.0
github.com/fluxcd/pkg/masktoken v0.2.0
github.com/fluxcd/pkg/oci v0.21.1
github.com/fluxcd/pkg/runtime v0.31.0
github.com/fluxcd/pkg/runtime v0.33.0
github.com/fluxcd/pkg/sourceignore v0.3.3
github.com/fluxcd/pkg/ssh v0.7.3
github.com/fluxcd/pkg/testserver v0.4.0
@ -57,9 +57,9 @@ require (
golang.org/x/sync v0.1.0
google.golang.org/api v0.111.0
gotest.tools v2.2.0+incompatible
helm.sh/helm/v3 v3.11.1
k8s.io/api v0.26.2
k8s.io/apimachinery v0.26.2
helm.sh/helm/v3 v3.11.2
k8s.io/api v0.26.3
k8s.io/apimachinery v0.26.3
k8s.io/client-go v0.26.2
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5
sigs.k8s.io/cli-utils v0.34.0
@ -230,7 +230,7 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/huandu/xstrings v1.3.3 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/in-toto/in-toto-golang v0.3.4-0.20220709202702-fa494aaa0add // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
@ -255,7 +255,7 @@ require (
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
@ -292,7 +292,7 @@ require (
github.com/prometheus/procfs v0.8.0 // indirect
github.com/rivo/uniseg v0.4.2 // indirect
github.com/rs/xid v1.4.0 // indirect
github.com/rubenv/sql-migrate v1.2.0 // indirect
github.com/rubenv/sql-migrate v1.3.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/sassoftware/relic v0.0.0-20210427151427-dfb082b79b74 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect

69
go.sum
View File

@ -159,7 +159,7 @@ github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7Y
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI=
github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc=
@ -183,6 +183,7 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMx
github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E=
github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA=
github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
@ -248,6 +249,7 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
@ -532,24 +534,24 @@ github.com/fluxcd/pkg/apis/acl v0.1.0 h1:EoAl377hDQYL3WqanWCdifauXqXbMyFuK82NnX6
github.com/fluxcd/pkg/apis/acl v0.1.0/go.mod h1:zfEZzz169Oap034EsDhmCAGgnWlcWmIObZjYMusoXS8=
github.com/fluxcd/pkg/apis/event v0.4.1 h1:63wP8NM/uA4680F4Ft8q8/0rJivX90i7FmMkRvUI8Is=
github.com/fluxcd/pkg/apis/event v0.4.1/go.mod h1:LHT1ZsbMrcHwCHQCaFtQviQBZwhMOAbTUPK6+KgBkFo=
github.com/fluxcd/pkg/apis/meta v0.19.1 h1:fCI5CnTXpAqr67UlaI9q0H+OztMKB5kDTr6xV6vlAo0=
github.com/fluxcd/pkg/apis/meta v0.19.1/go.mod h1:ZPPMYrPnWwPQYNEGM/Uc0N4SurUPS3xNI3IIpCQEfuM=
github.com/fluxcd/pkg/apis/meta v1.0.0 h1:i9IGHd/VNEZELX7mepkiYFbJxs2J5znaB4cN9z2nPm8=
github.com/fluxcd/pkg/apis/meta v1.0.0/go.mod h1:04ZdpZYm1x+aL93K4daNHW1UX6E8K7Gyf5za9OhrE+U=
github.com/fluxcd/pkg/git v0.11.0 h1:GvB+3QOB8xbF5WNjVrkskseOnsZBuqSOzW3VxfsHuX4=
github.com/fluxcd/pkg/git v0.11.0/go.mod h1:VHRVlrZMHNoWBlaSAWxlGH6Vwlb9VRazUhPUykviHwY=
github.com/fluxcd/pkg/git/gogit v0.8.1 h1:Q3EV2WBX6HiXSmsHyrwFzwl82gO4ZtFwb675iQPWwVc=
github.com/fluxcd/pkg/git/gogit v0.8.1/go.mod h1:5M27gCl0gyo6l+ht9HwZSzimPY3LahKVIJ7/1vCCctg=
github.com/fluxcd/pkg/gittestserver v0.8.2 h1:LzrhnNouKYgZAI2JuuwPcl5ve/TRPo/d7APKIX0LDiI=
github.com/fluxcd/pkg/gittestserver v0.8.2/go.mod h1:YhSpqz46mAebmHfP+6QREcNEnmwPLSuklyjsI4h+AR4=
github.com/fluxcd/pkg/helmtestserver v0.11.1 h1:seotZ19JtzPfuzru5zHCEX/0Ff96PVPI41OLaHh4rC0=
github.com/fluxcd/pkg/helmtestserver v0.11.1/go.mod h1:pQ+UhqATeoJL0e812gXgUrEORhhE91epxgBFe0aIRvQ=
github.com/fluxcd/pkg/helmtestserver v0.12.0 h1:Hv3Q8S4ft/xMjbxTUsUL3FwlrGNJbXbm9SEzrDyAitg=
github.com/fluxcd/pkg/helmtestserver v0.12.0/go.mod h1:P6mAUF2wGO1f+r3+aHpeADF98NhZzHYfByvUASqyUPU=
github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo=
github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8=
github.com/fluxcd/pkg/masktoken v0.2.0 h1:HoSPTk4l1fz5Fevs2vVRvZGru33blfMwWSZKsHdfG/0=
github.com/fluxcd/pkg/masktoken v0.2.0/go.mod h1:EA7GleAHL33kN6kTW06m5R3/Q26IyuGO7Ef/0CtpDI0=
github.com/fluxcd/pkg/oci v0.21.1 h1:9kn19wkabE2xB77NRlOtMJlSYhZmUjdloZCzlHdAS6s=
github.com/fluxcd/pkg/oci v0.21.1/go.mod h1:9E2DBlQII7YmeWt2ieTh38wwkiBqx3yg5NEJ51uefaA=
github.com/fluxcd/pkg/runtime v0.31.0 h1:addyXaANHl/A68bEjCbiR4HzcFKgfXv1eaG7B7ZHxOo=
github.com/fluxcd/pkg/runtime v0.31.0/go.mod h1:toGOOubMo4ZC1aWhB8C3drdTglr1/A1dETeNwjiIv0g=
github.com/fluxcd/pkg/runtime v0.33.0 h1:y6mFOj22mU/BXAxSTucTlT7vrWUjd0+iccK0pRN5CF0=
github.com/fluxcd/pkg/runtime v0.33.0/go.mod h1:oDTerqMMtOQVNZeidwAPG7g/ai2xuidUduJzQh1IBVI=
github.com/fluxcd/pkg/sourceignore v0.3.3 h1:Ue29JAuPECEYdvIqdpXpQaDxpeySn7amarLArp7XoIs=
github.com/fluxcd/pkg/sourceignore v0.3.3/go.mod h1:yuJzKggph0Bdbk9LgXjJQhvJZSTJV/1vS7mJuB7mPa0=
github.com/fluxcd/pkg/ssh v0.7.3 h1:Dhs+nXdp806lBriUJtPyRi0SVIVWbJafJGD/qQ71GiY=
@ -570,6 +572,7 @@ github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVB
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
@ -600,7 +603,7 @@ github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY=
github.com/go-gorp/gorp/v3 v3.0.5/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs=
github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
@ -673,7 +676,6 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w
github.com/go-rod/rod v0.112.6 h1:zMirUmhsBeshMWyf285BD0UGtGq54HfThLDGSjcP3lU=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -924,6 +926,7 @@ github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
@ -933,6 +936,7 @@ github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjh
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0=
github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
@ -966,8 +970,9 @@ github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbc
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
@ -1064,6 +1069,7 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
@ -1083,7 +1089,6 @@ github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJ
github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
@ -1127,8 +1132,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@ -1137,11 +1143,10 @@ github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI=
github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
@ -1160,7 +1165,7 @@ github.com/minio/minio-go/v7 v7.0.49/go.mod h1:UI34MvQEiob3Cf/gGExGMmzugkM/tNgbF
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.4/go.mod h1:vTLESy5mRhKOs9KDp0/RATawxP1UqBmdrpVRMnpcvKQ=
github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
@ -1221,6 +1226,8 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nelsam/hel/v2 v2.3.2/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nightlyone/lockfile v1.0.0/go.mod h1:rywoIealpdNse2r832aiD9jRk8ErCatROs6LzC841CI=
github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso=
@ -1318,8 +1325,10 @@ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/poy/onpar v0.0.0-20200406201722-06f95a1c68e8/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU=
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
@ -1385,14 +1394,15 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rubenv/sql-migrate v1.2.0 h1:fOXMPLMd41sK7Tg75SXDec15k3zg5WNV6SjuDRiNfcU=
github.com/rubenv/sql-migrate v1.2.0/go.mod h1:Z5uVnq7vrIrPmHbVFfR4YLHRZquxeHpckCnRq0P/K9Y=
github.com/rubenv/sql-migrate v1.3.1 h1:Vx+n4Du8X8VTYuXbhNxdEUoh6wiJERA0GlWocR5FrbA=
github.com/rubenv/sql-migrate v1.3.1/go.mod h1:YzG/Vh82CwyhTFXy+Mf5ahAiiEOpAlHurg+23VEzcsk=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@ -1466,6 +1476,7 @@ github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
@ -1611,7 +1622,6 @@ github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g=
github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/pcg v1.0.0 h1:dt+dx+HvX8g7Un32rY9XWoYnd0NmKmrIzpHF7qiTDj0=
github.com/zeebo/pcg v1.0.0/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@ -1777,6 +1787,7 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1897,6 +1908,7 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -2061,9 +2073,11 @@ golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20221013171732-95e765b1cc43/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
@ -2074,6 +2088,7 @@ golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -2087,6 +2102,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -2147,6 +2163,7 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -2462,8 +2479,8 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I=
helm.sh/helm/v3 v3.11.1 h1:cmL9fFohOoNQf+wnp2Wa0OhNFH0KFnSzEkVxi3fcc3I=
helm.sh/helm/v3 v3.11.1/go.mod h1:z/Bu/BylToGno/6dtNGuSmjRqxKq5gaH+FU0BPO+AQ8=
helm.sh/helm/v3 v3.11.2 h1:P3cLaFxfoxaGLGJVnoPrhf1j86LC5EDINSpYSpMUkkA=
helm.sh/helm/v3 v3.11.2/go.mod h1:Hw+09mfpDiRRKAgAIZlFkPSeOkvv7Acl5McBvQyNPVw=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@ -2472,12 +2489,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ=
k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU=
k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU=
k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE=
k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI=
k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM=
k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ=
k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k=
k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
k8s.io/apiserver v0.26.1 h1:6vmnAqCDO194SVCPU3MU8NcDgSqsUA62tBUSWrFXhsc=
k8s.io/apiserver v0.26.1/go.mod h1:wr75z634Cv+sifswE9HlAo5FQ7UoUauIICRlOE+5dCg=
k8s.io/cli-runtime v0.26.0 h1:aQHa1SyUhpqxAw1fY21x2z2OS5RLtMJOCj7tN4oq8mw=

View File

@ -9,11 +9,11 @@
"externalPackages": [
{
"typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$",
"docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
"docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Duration"
},
{
"typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Condition$",
"docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Condition"
"docsURLTemplate": "https://pkg.go.dev/k8s.io/apimachinery/pkg/apis/meta/v1#Condition"
},
{
"typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/",
@ -21,11 +21,15 @@
},
{
"typeMatchPrefix": "^github.com/fluxcd/pkg/apis/meta",
"docsURLTemplate": "https://godoc.org/github.com/fluxcd/pkg/apis/meta#{{ .TypeIdentifier }}"
"docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/pkg/apis/meta#{{ .TypeIdentifier }}"
},
{
"typeMatchPrefix": "^github.com/fluxcd/pkg/apis/acl",
"docsURLTemplate": "https://godoc.org/github.com/fluxcd/pkg/apis/acl#{{ .TypeIdentifier }}"
"docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/pkg/apis/acl#{{ .TypeIdentifier }}"
},
{
"typeMatchPrefix": "^github.com/fluxcd/source-controller/api/v1",
"docsURLTemplate": "https://pkg.go.dev/github.com/fluxcd/source-controller/api/v1#{{ .TypeIdentifier }}"
}
],
"typeDisplayNamePrefixOverrides": {

View File

@ -1,5 +1,10 @@
{{ define "packages" }}
<h1>Source API reference</h1>
<h1>Source API reference
{{- with (index .packages 0) -}}
{{ with (index .GoPackages 0 ) -}}
{{ printf " %s" .Name -}}
{{ end -}}
{{ end }}</h1>
{{ with .packages}}
<p>Packages:</p>

View File

@ -1,5 +1,5 @@
/*
Copyright 2022 The Flux authors
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -282,7 +282,6 @@ func (r *ChartRepository) DownloadChart(chart *repo.ChartVersion) (*bytes.Buffer
// CacheIndex attempts to write the index from the remote into a new temporary file
// using DownloadIndex, and sets Path and cached.
// It returns the SHA256 checksum of the downloaded index bytes, or an error.
// The caller is expected to handle the garbage collection of Path, and to
// load the Index separately using LoadFromPath if required.
func (r *ChartRepository) CacheIndex() error {

View File

@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
)
var (

View File

@ -24,7 +24,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
)
func TestGetStatusLastHandledReconcileAt(t *testing.T) {

View File

@ -36,7 +36,7 @@ import (
conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check"
"github.com/fluxcd/pkg/runtime/patch"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/reconcile"
)

14
main.go
View File

@ -49,7 +49,8 @@ import (
"github.com/fluxcd/source-controller/internal/features"
"github.com/fluxcd/source-controller/internal/helm/registry"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
v1 "github.com/fluxcd/source-controller/api/v1"
v1beta2 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/controllers"
"github.com/fluxcd/source-controller/internal/cache"
"github.com/fluxcd/source-controller/internal/helm"
@ -76,7 +77,8 @@ var (
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(sourcev1.AddToScheme(scheme))
utilruntime.Must(v1beta2.AddToScheme(scheme))
utilruntime.Must(v1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
@ -240,7 +242,7 @@ func main() {
DependencyRequeueInterval: requeueDependency,
RateLimiter: helper.GetRateLimiter(rateLimiterOptions),
}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", sourcev1.GitRepositoryKind)
setupLog.Error(err, "unable to create controller", "controller", v1beta2.GitRepositoryKind)
os.Exit(1)
}
@ -255,7 +257,7 @@ func main() {
MaxConcurrentReconciles: concurrent,
RateLimiter: helper.GetRateLimiter(rateLimiterOptions),
}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmRepositoryKind, "type", "OCI")
setupLog.Error(err, "unable to create controller", "controller", v1beta2.HelmRepositoryKind, "type", "OCI")
os.Exit(1)
}
@ -293,7 +295,7 @@ func main() {
MaxConcurrentReconciles: concurrent,
RateLimiter: helper.GetRateLimiter(rateLimiterOptions),
}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmRepositoryKind)
setupLog.Error(err, "unable to create controller", "controller", v1beta2.HelmRepositoryKind)
os.Exit(1)
}
@ -312,7 +314,7 @@ func main() {
MaxConcurrentReconciles: concurrent,
RateLimiter: helper.GetRateLimiter(rateLimiterOptions),
}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmChartKind)
setupLog.Error(err, "unable to create controller", "controller", v1beta2.HelmChartKind)
os.Exit(1)
}
if err = (&controllers.BucketReconciler{