diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index 70be4e51..4aab0cde 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -29,6 +29,8 @@ jobs: uses: fluxcd/pkg/actions/kustomize@master - name: Setup Kubebuilder uses: fluxcd/pkg/actions/kubebuilder@master + - name: Setup Helm + uses: fluxcd/pkg/actions/helm@master - name: Run tests run: make test env: @@ -64,6 +66,27 @@ jobs: kubectl -n source-system wait helmchart/mariadb --for=condition=ready --timeout=5m kubectl -n source-system wait helmchart/mariadb-git --for=condition=ready --timeout=5m kubectl -n source-system delete -f ./config/testdata/helmchart-valuesfile + - name: Setup Minio + run: | + kubectl create ns minio + helm repo add minio https://helm.min.io/ + helm upgrade --wait -i minio minio/minio \ + --namespace minio \ + --set accessKey=myaccesskey \ + --set secretKey=mysecretkey \ + --set resources.requests.memory=128Mi \ + --set persistence.enable=false + kubectl -n minio port-forward svc/minio 9000:9000 &>/dev/null & + sleep 2 + wget -q https://dl.min.io/client/mc/release/linux-amd64/mc + chmod +x mc + ./mc alias set minio http://localhost:9000 myaccesskey mysecretkey --api S3v4 + ./mc mb minio/podinfo + ./mc cp --recursive ./config/testdata/minio/manifests minio/podinfo + - name: Run S3 tests + run: | + kubectl -n source-system apply -f ./config/testdata/minio/source.yaml + kubectl -n source-system wait bucket/podinfo --for=condition=ready --timeout=1m - name: Debug failure if: failure() run: | @@ -72,3 +95,4 @@ jobs: kubectl -n source-system get helmcharts -oyaml kubectl -n source-system get all kubectl -n source-system logs deploy/source-controller + kubectl -n minio get all diff --git a/PROJECT b/PROJECT index 0b8f5bd1..e1408f7f 100644 --- a/PROJECT +++ b/PROJECT @@ -10,4 +10,7 @@ resources: - group: source kind: HelmChart version: v1alpha1 +- group: source + kind: Bucket + version: v1alpha1 version: "2" diff --git a/api/v1alpha1/bucket_types.go b/api/v1alpha1/bucket_types.go new file mode 100644 index 00000000..87ed4822 --- /dev/null +++ b/api/v1alpha1/bucket_types.go @@ -0,0 +1,205 @@ +/* +Copyright 2020 The Flux CD contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + BucketKind = "Bucket" + BucketTimeout = time.Second * 20 +) + +// BucketSpec defines the desired state of an S3 compatible bucket +type BucketSpec struct { + // The S3 compatible storage provider name, default ('generic'). + // +kubebuilder:validation:Enum=generic;aws + // +optional + Provider string `json:"provider,omitempty"` + + // The bucket name. + // +required + BucketName string `json:"bucketName"` + + // The bucket endpoint address. + // +required + Endpoint string `json:"endpoint"` + + // Insecure allows connecting to a non-TLS S3 HTTP endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // The bucket region. + // +optional + Region string `json:"region,omitempty"` + + // The secret name containing the bucket accesskey and secretkey. + // +optional + SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` + + // The interval at which to check for bucket updates. + // +required + Interval metav1.Duration `json:"interval"` + + // The timeout for download operations, default ('20s'). + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore + // format (which is the same as .gitignore). + // +optional + Ignore *string `json:"ignore,omitempty"` +} + +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" +) + +// BucketStatus defines the observed state of a bucket +type BucketStatus struct { + // ObservedGeneration is the last observed generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []SourceCondition `json:"conditions,omitempty"` + + // URL is the download link for the artifact output of the last Bucket sync. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the output of the last successful Bucket sync. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` +} + +const ( + // BucketOperationSucceedReason represents the fact that the bucket listing + // and download operations succeeded. + BucketOperationSucceedReason string = "BucketOperationSucceed" + + // BucketOperationFailedReason represents the fact that the bucket listing + // or download operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// BucketProgressing resets the conditions of the Bucket +// to SourceCondition of type Ready with status unknown and +// progressing reason and message. It returns the modified Bucket. +func BucketProgressing(bucket Bucket) Bucket { + bucket.Status.ObservedGeneration = bucket.Generation + bucket.Status.URL = "" + bucket.Status.Conditions = []SourceCondition{} + SetBucketCondition(&bucket, ReadyCondition, corev1.ConditionUnknown, ProgressingReason, "reconciliation in progress") + return bucket +} + +// SetBucketCondition sets the given condition with the given status, reason and message on the Bucket. +func SetBucketCondition(bucket *Bucket, condition string, status corev1.ConditionStatus, reason, message string) { + bucket.Status.Conditions = filterOutSourceCondition(bucket.Status.Conditions, condition) + bucket.Status.Conditions = append(bucket.Status.Conditions, SourceCondition{ + Type: condition, + Status: status, + LastTransitionTime: metav1.Now(), + Reason: reason, + Message: message, + }) +} + +// BucketReady sets the given artifact and url on the Bucket +// and sets the ReadyCondition to True, with the given reason and +// message. It returns the modified Bucket. +func BucketReady(repository Bucket, artifact Artifact, url, reason, message string) Bucket { + repository.Status.Artifact = &artifact + repository.Status.URL = url + SetBucketCondition(&repository, ReadyCondition, corev1.ConditionTrue, reason, message) + return repository +} + +// BucketNotReady sets the ReadyCondition on the given Bucket +// to False, with the given reason and message. It returns the modified Bucket. +func BucketNotReady(repository Bucket, reason, message string) Bucket { + SetBucketCondition(&repository, ReadyCondition, corev1.ConditionFalse, reason, message) + return repository +} + +// BucketReadyMessage returns the message of the SourceCondition +// of type Ready with status true if present, or an empty string. +func BucketReadyMessage(repository Bucket) string { + for _, condition := range repository.Status.Conditions { + if condition.Type == ReadyCondition && condition.Status == corev1.ConditionTrue { + return condition.Message + } + } + return "" +} + +// GetTimeout returns the configured timeout or the default. +func (in *Bucket) GetTimeout() time.Duration { + if in.Spec.Timeout != nil { + return in.Spec.Timeout.Duration + } + return BucketTimeout +} + +// GetArtifact returns the latest artifact from the source +// if present in the status sub-resource. +func (in *Bucket) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// GetInterval returns the interval at which the source is updated. +func (in *Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// +genclient +// +genclient:Namespaced +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" + +// Bucket is the Schema for the buckets API +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + Status BucketStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BucketList contains a list of Bucket +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 48003a1f..249db1a0 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -42,6 +42,123 @@ func (in *Artifact) DeepCopy() *Artifact { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SourceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepository) DeepCopyInto(out *GitRepository) { *out = *in diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml new file mode 100644 index 00000000..667e7127 --- /dev/null +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -0,0 +1,174 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + name: buckets.source.toolkit.fluxcd.io +spec: + group: source.toolkit.fluxcd.io + names: + kind: Bucket + listKind: BucketList + plural: buckets + singular: bucket + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.url + name: URL + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the buckets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BucketSpec defines the desired state of an S3 compatible + bucket + properties: + bucketName: + description: The bucket name. + type: string + endpoint: + description: The bucket endpoint address. + type: string + ignore: + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). + type: string + insecure: + description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. + type: boolean + interval: + description: The interval at which to check for bucket updates. + type: string + provider: + description: The S3 compatible storage provider name, default ('generic'). + enum: + - generic + - aws + type: string + region: + description: The bucket region. + type: string + secretRef: + description: The secret name containing the bucket accesskey and secretkey. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + timeout: + description: The timeout for download operations, default ('20s'). + type: string + required: + - bucketName + - endpoint + - interval + type: object + status: + description: BucketStatus defines the observed state of a bucket + properties: + artifact: + description: Artifact represents the output of the last successful + Bucket sync. + properties: + checksum: + description: Checksum is the SHA1 checksum of the artifact. + type: string + lastUpdateTime: + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. + format: date-time + type: string + path: + description: Path is the relative file path of this artifact. + type: string + revision: + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit sha, Git + tag, a Helm index timestamp, a Helm chart version, etc. + type: string + url: + description: URL is the HTTP address of this artifact. + type: string + required: + - path + - url + type: object + conditions: + description: Conditions holds the conditions for the Bucket. + items: + description: SourceCondition contains condition information for + a source. + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + type: string + message: + description: Message is a human readable description of the + details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation + for the condition's last transition. + type: string + status: + description: Status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + type: + description: Type of the condition, currently ('Ready'). + type: string + required: + - status + - type + type: object + type: array + observedGeneration: + description: ObservedGeneration is the last observed generation. + format: int64 + type: integer + url: + description: URL is the download link for the artifact output of the + last Bucket sync. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index b108deaf..a666a925 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -4,4 +4,5 @@ resources: - bases/source.toolkit.fluxcd.io_gitrepositories.yaml - bases/source.toolkit.fluxcd.io_helmrepositories.yaml - bases/source.toolkit.fluxcd.io_helmcharts.yaml +- bases/source.toolkit.fluxcd.io_buckets.yaml # +kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/bucket_editor_role.yaml b/config/rbac/bucket_editor_role.yaml new file mode 100644 index 00000000..63d2229a --- /dev/null +++ b/config/rbac/bucket_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit buckets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: bucket-editor-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - buckets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - buckets/status + verbs: + - get diff --git a/config/rbac/bucket_viewer_role.yaml b/config/rbac/bucket_viewer_role.yaml new file mode 100644 index 00000000..f8033a28 --- /dev/null +++ b/config/rbac/bucket_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view buckets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: bucket-viewer-role +rules: +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - buckets + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - buckets/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 95e3d4db..c1cf16ab 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -13,6 +13,34 @@ rules: verbs: - create - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - buckets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - source.toolkit.fluxcd.io + resources: + - buckets/status + verbs: + - get + - patch + - update - apiGroups: - source.toolkit.fluxcd.io resources: diff --git a/config/samples/source_v1alpha1_bucket.yaml b/config/samples/source_v1alpha1_bucket.yaml new file mode 100644 index 00000000..6fa1b140 --- /dev/null +++ b/config/samples/source_v1alpha1_bucket.yaml @@ -0,0 +1,11 @@ +apiVersion: source.toolkit.fluxcd.io/v1alpha1 +kind: Bucket +metadata: + name: bucket-sample +spec: + interval: 1m + provider: generic + bucketName: podinfo + endpoint: minio.minio.svc.cluster.local:9000 + region: us-east-1 + insecure: true diff --git a/config/testdata/minio/manifests/namespace/namespace.yaml b/config/testdata/minio/manifests/namespace/namespace.yaml new file mode 100644 index 00000000..92d01686 --- /dev/null +++ b/config/testdata/minio/manifests/namespace/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: minio-test diff --git a/config/testdata/minio/manifests/namespace/role.yaml b/config/testdata/minio/manifests/namespace/role.yaml new file mode 100644 index 00000000..9c46c023 --- /dev/null +++ b/config/testdata/minio/manifests/namespace/role.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: gotk-reconciler + namespace: minio-test +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - apiGroup: rbac.authorization.k8s.io + kind: User + name: gotk:minio-test:reconciler diff --git a/config/testdata/minio/manifests/podinfo/deployment.yaml b/config/testdata/minio/manifests/podinfo/deployment.yaml new file mode 100644 index 00000000..de45798c --- /dev/null +++ b/config/testdata/minio/manifests/podinfo/deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: podinfo + namespace: minio-test +spec: + minReadySeconds: 3 + revisionHistoryLimit: 5 + progressDeadlineSeconds: 60 + strategy: + rollingUpdate: + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: podinfo + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9797" + labels: + app: podinfo + spec: + containers: + - name: podinfod + image: stefanprodan/podinfo:4.0.6 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 9898 + protocol: TCP + - name: http-metrics + containerPort: 9797 + protocol: TCP + - name: grpc + containerPort: 9999 + protocol: TCP + command: + - ./podinfo + - --port=9898 + - --port-metrics=9797 + - --grpc-port=9999 + - --grpc-service-name=podinfo + - --level=info + - --random-delay=false + - --random-error=false + env: + - name: PODINFO_UI_COLOR + value: "#34577c" + livenessProbe: + exec: + command: + - podcli + - check + - http + - localhost:9898/healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + exec: + command: + - podcli + - check + - http + - localhost:9898/readyz + initialDelaySeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 2000m + memory: 512Mi + requests: + cpu: 100m + memory: 64Mi diff --git a/config/testdata/minio/manifests/podinfo/service.yaml b/config/testdata/minio/manifests/podinfo/service.yaml new file mode 100644 index 00000000..4f7baeb1 --- /dev/null +++ b/config/testdata/minio/manifests/podinfo/service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: podinfo + namespace: minio-test +spec: + type: ClusterIP + selector: + app: podinfo + ports: + - name: http + port: 9898 + protocol: TCP + targetPort: http + - port: 9999 + targetPort: grpc + protocol: TCP + name: grpc diff --git a/config/testdata/minio/source.yaml b/config/testdata/minio/source.yaml new file mode 100644 index 00000000..ccd1ed8b --- /dev/null +++ b/config/testdata/minio/source.yaml @@ -0,0 +1,21 @@ +apiVersion: source.toolkit.fluxcd.io/v1alpha1 +kind: Bucket +metadata: + name: podinfo +spec: + interval: 1m + provider: generic + bucketName: podinfo + endpoint: minio.minio.svc.cluster.local:9000 + region: us-east-1 + insecure: true + secretRef: + name: minio-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials +data: + accesskey: bXlhY2Nlc3NrZXk= + secretkey: bXlzZWNyZXRrZXk= diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go new file mode 100644 index 00000000..15798a58 --- /dev/null +++ b/controllers/bucket_controller.go @@ -0,0 +1,365 @@ +/* +Copyright 2020 The Flux CD contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-logr/logr" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + "github.com/fluxcd/pkg/recorder" + "github.com/fluxcd/pkg/runtime/predicates" + + sourcev1 "github.com/fluxcd/source-controller/api/v1alpha1" +) + +// BucketReconciler reconciles a Bucket object +type BucketReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Storage *Storage + EventRecorder kuberecorder.EventRecorder + ExternalEventRecorder *recorder.EventRecorder +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch + +func (r *BucketReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + start := time.Now() + + var bucket sourcev1.Bucket + if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + log := r.Log.WithValues("controller", strings.ToLower(sourcev1.BucketKind), "request", req.NamespacedName) + + // Examine if the object is under deletion + if bucket.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !containsString(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) { + bucket.ObjectMeta.Finalizers = append(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) + if err := r.Update(ctx, &bucket); err != nil { + log.Error(err, "unable to register finalizer") + return ctrl.Result{}, err + } + } + } else { + // The object is being deleted + if containsString(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) { + // Our finalizer is still present, so lets handle garbage collection + if err := r.gc(bucket, true); err != nil { + r.event(bucket, recorder.EventSeverityError, fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err + } + // Remove our finalizer from the list and update it + bucket.ObjectMeta.Finalizers = removeString(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) + if err := r.Update(ctx, &bucket); err != nil { + return ctrl.Result{}, err + } + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil + } + } + + // set initial status + if resetBucket, ok := r.resetStatus(bucket); ok { + bucket = resetBucket + if err := r.Status().Update(ctx, &bucket); err != nil { + log.Error(err, "unable to update status") + return ctrl.Result{Requeue: true}, err + } + } + + // purge old artifacts from storage + if err := r.gc(bucket, false); err != nil { + log.Error(err, "unable to purge old artifacts") + } + + // reconcile bucket by downloading its content + reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) + + // update status with the reconciliation result + if err := r.Status().Update(ctx, &reconciledBucket); err != nil { + log.Error(err, "unable to update status") + return ctrl.Result{Requeue: true}, err + } + + // if reconciliation failed, record the failure and requeue immediately + if reconcileErr != nil { + r.event(reconciledBucket, recorder.EventSeverityError, reconcileErr.Error()) + return ctrl.Result{Requeue: true}, reconcileErr + } + + // emit revision change event + if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { + r.event(reconciledBucket, recorder.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) + } + + log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", + time.Now().Sub(start).String(), + bucket.GetInterval().Duration.String(), + )) + + return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil +} + +type BucketReconcilerOptions struct { + MaxConcurrentReconciles int +} + +func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) +} + +func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error { + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.Bucket{}). + WithEventFilter(predicates.ChangePredicate{}). + WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}). + Complete(r) +} + +func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { + s3Client, err := r.auth(ctx, bucket) + if err != nil { + err = fmt.Errorf("auth error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + } + + // create tmp dir + tempDir, err := ioutil.TempDir("", bucket.Name) + if err != nil { + err = fmt.Errorf("tmp dir error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + defer os.RemoveAll(tempDir) + + ctxTimeout, cancel := context.WithTimeout(ctx, bucket.GetTimeout()) + defer cancel() + + exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) + if err != nil { + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + if !exists { + err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + + // download bucket content + for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{Recursive: true}) { + if object.Err != nil { + err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + + if strings.HasSuffix(object.Key, "/") { + continue + } + + localPath := filepath.Join(tempDir, object.Key) + err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) + if err != nil { + err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + } + + revision, err := r.checksum(tempDir) + if err != nil { + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + // return early on unchanged revision + artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) + if bucket.GetArtifact() != nil && bucket.GetArtifact().Revision == revision { + if artifact.URL != bucket.GetArtifact().URL { + r.Storage.SetArtifactURL(bucket.GetArtifact()) + bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) + } + return bucket, nil + } + + // create artifact dir + err = r.Storage.MkdirAll(artifact) + if err != nil { + err = fmt.Errorf("mkdir dir error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + // acquire lock + unlock, err := r.Storage.Lock(artifact) + if err != nil { + err = fmt.Errorf("unable to acquire lock: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + defer unlock() + + // archive artifact and check integrity + if err := r.Storage.Archive(&artifact, tempDir, bucket.Spec.Ignore); err != nil { + err = fmt.Errorf("storage archive error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + // update latest symlink + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + err = fmt.Errorf("storage symlink error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) + return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil +} + +func (r *BucketReconciler) auth(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { + opt := minio.Options{ + Region: bucket.Spec.Region, + Secure: !bucket.Spec.Insecure, + } + + if bucket.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: bucket.GetNamespace(), + Name: bucket.Spec.SecretRef.Name, + } + + var secret corev1.Secret + if err := r.Get(ctx, secretName, &secret); err != nil { + return nil, fmt.Errorf("credentials secret error: %w", err) + } + + accesskey := "" + secretkey := "" + if k, ok := secret.Data["accesskey"]; ok { + accesskey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretkey = string(k) + } + if accesskey == "" || secretkey == "" { + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + } + opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") + } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { + opt.Creds = credentials.NewIAM("") + } + + if opt.Creds == nil { + return nil, fmt.Errorf("no bucket credentials found") + } + + return minio.New(bucket.Spec.Endpoint, &opt) +} + +func (r *BucketReconciler) checksum(root string) (string, error) { + checksum := "" + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + checksum += fmt.Sprintf("%x", sha1.Sum(data)) + return nil + }) + if err != nil { + return "", err + } + + return fmt.Sprintf("%x", sha1.Sum([]byte(checksum))), nil +} + +// resetStatus returns a modified v1alpha1.Bucket and a boolean indicating +// if the status field has been reset. +func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { + if bucket.GetArtifact() != nil && !r.Storage.ArtifactExist(*bucket.GetArtifact()) { + bucket = sourcev1.BucketProgressing(bucket) + bucket.Status.Artifact = nil + return bucket, true + } + if bucket.Generation != bucket.Status.ObservedGeneration { + return sourcev1.BucketProgressing(bucket), true + } + return bucket, false +} + +// gc performs a garbage collection on all but current artifacts of the given bucket. +func (r *BucketReconciler) gc(bucket sourcev1.Bucket, all bool) error { + if all { + return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "")) + } + if bucket.GetArtifact() != nil { + return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) + } + return nil +} + +// event emits a Kubernetes event and forwards the event to notification controller if configured +func (r *BucketReconciler) event(bucket sourcev1.Bucket, severity, msg string) { + if r.EventRecorder != nil { + r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) + } + if r.ExternalEventRecorder != nil { + objRef, err := reference.GetReference(r.Scheme, &bucket) + if err != nil { + r.Log.WithValues( + "request", + fmt.Sprintf("%s/%s", bucket.GetNamespace(), bucket.GetName()), + ).Error(err, "unable to send event") + return + } + + if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { + r.Log.WithValues( + "request", + fmt.Sprintf("%s/%s", bucket.GetNamespace(), bucket.GetName()), + ).Error(err, "unable to send event") + return + } + } +} diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index a6aaced6..f6a3f8df 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -233,7 +233,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour defer unlock() // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tmpGit, repository.Spec); err != nil { + if err := r.Storage.Archive(&artifact, tmpGit, repository.Spec.Ignore); err != nil { err = fmt.Errorf("storage archive error: %w", err) return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err } @@ -241,7 +241,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour // update latest symlink url, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - err = fmt.Errorf("storage lock error: %w", err) + err = fmt.Errorf("storage symlink error: %w", err) return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err } diff --git a/controllers/storage.go b/controllers/storage.go index 41883cc9..f42a14d4 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -152,12 +152,12 @@ func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool { // Archive atomically archives the given directory as a tarball to the given v1alpha1.Artifact // path, excluding any VCS specific files and directories, or any of the excludes defined in // the excludeFiles. If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, spec sourcev1.GitRepositorySpec) (err error) { +func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, ignore *string) (err error) { if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { return fmt.Errorf("invalid dir path: %s", dir) } - ps, err := loadExcludePatterns(dir, spec) + ps, err := loadExcludePatterns(dir, ignore) if err != nil { return err } @@ -404,7 +404,7 @@ func getPatterns(reader io.Reader, path []string) []gitignore.Pattern { // loadExcludePatterns loads the excluded patterns from sourceignore or other // sources. -func loadExcludePatterns(dir string, spec sourcev1.GitRepositorySpec) ([]gitignore.Pattern, error) { +func loadExcludePatterns(dir string, ignore *string) ([]gitignore.Pattern, error) { path := strings.Split(dir, "/") var ps []gitignore.Pattern @@ -412,7 +412,7 @@ func loadExcludePatterns(dir string, spec sourcev1.GitRepositorySpec) ([]gitigno ps = append(ps, gitignore.ParsePattern(p, path)) } - if spec.Ignore == nil { + if ignore == nil { for _, p := range strings.Split(excludeExt, ",") { ps = append(ps, gitignore.ParsePattern(p, path)) } @@ -424,7 +424,7 @@ func loadExcludePatterns(dir string, spec sourcev1.GitRepositorySpec) ([]gitigno return nil, err } } else { - ps = append(ps, getPatterns(bytes.NewBufferString(*spec.Ignore), path)...) + ps = append(ps, getPatterns(bytes.NewBufferString(*ignore), path)...) } return ps, nil diff --git a/controllers/storage_test.go b/controllers/storage_test.go index a90a7229..8ec9d42c 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -159,7 +159,7 @@ func createArchive(t *testing.T, storage *Storage, filenames []string, sourceIgn t.Fatalf("artifact directory creation failed: %v", err) } - if err := storage.Archive(&artifact, gitDir, spec); err != nil { + if err := storage.Archive(&artifact, gitDir, spec.Ignore); err != nil { t.Fatalf("archiving failed: %v", err) } diff --git a/docs/api/source.md b/docs/api/source.md index 46508777..a264f4bb 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -9,12 +9,202 @@

Package v1alpha1 contains API Schema definitions for the source v1alpha1 API group

Resource Types: +

Bucket +

+

Bucket is the Schema for the buckets API

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1alpha1 +
+kind
+string +
+Bucket +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +BucketSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+provider
+ +string + +
+(Optional) +

The S3 compatible storage provider name, default (‘generic’).

+
+bucketName
+ +string + +
+

The bucket name.

+
+endpoint
+ +string + +
+

The bucket endpoint address.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS S3 HTTP endpoint.

+
+region
+ +string + +
+(Optional) +

The bucket region.

+
+secretRef
+ + +Kubernetes core/v1.LocalObjectReference + + +
+(Optional) +

The secret name containing the bucket accesskey and secretkey.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

The interval at which to check for bucket updates.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for download operations, default (‘20s’).

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore +format (which is the same as .gitignore).

+
+
+status
+ + +BucketStatus + + +
+
+
+

GitRepository

GitRepository is the Schema for the gitrepositories API

@@ -467,6 +657,7 @@ HelmRepositoryStatus

(Appears on: +BucketStatus, GitRepositoryStatus, HelmChartStatus, HelmRepositoryStatus) @@ -548,6 +739,212 @@ update of this artifact.

+

BucketSpec +

+

+(Appears on: +Bucket) +

+

BucketSpec defines the desired state of an S3 compatible bucket

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+(Optional) +

The S3 compatible storage provider name, default (‘generic’).

+
+bucketName
+ +string + +
+

The bucket name.

+
+endpoint
+ +string + +
+

The bucket endpoint address.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS S3 HTTP endpoint.

+
+region
+ +string + +
+(Optional) +

The bucket region.

+
+secretRef
+ + +Kubernetes core/v1.LocalObjectReference + + +
+(Optional) +

The secret name containing the bucket accesskey and secretkey.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

The interval at which to check for bucket updates.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

The timeout for download operations, default (‘20s’).

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore +format (which is the same as .gitignore).

+
+
+
+

BucketStatus +

+

+(Appears on: +Bucket) +

+

BucketStatus defines the observed state of a bucket

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation.

+
+conditions
+ + +[]SourceCondition + + +
+(Optional) +

Conditions holds the conditions for the Bucket.

+
+url
+ +string + +
+(Optional) +

URL is the download link for the artifact output of the last Bucket sync.

+
+artifact
+ + +Artifact + + +
+(Optional) +

Artifact represents the output of the last successful Bucket sync.

+
+
+

GitRepositoryRef

@@ -1228,6 +1625,7 @@ string

(Appears on: +BucketStatus, GitRepositoryStatus, HelmChartStatus, HelmRepositoryStatus) diff --git a/go.mod b/go.mod index 766f5b68..eeb6fdc9 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/go-git/go-billy/v5 v5.0.0 github.com/go-git/go-git/v5 v5.1.0 github.com/go-logr/logr v0.1.0 + github.com/minio/minio-go/v7 v7.0.5 github.com/onsi/ginkgo v1.12.1 github.com/onsi/gomega v1.10.1 helm.sh/helm/v3 v3.3.1 diff --git a/go.sum b/go.sum index 9ef50f4a..f3be6257 100644 --- a/go.sum +++ b/go.sum @@ -455,6 +455,9 @@ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= @@ -506,6 +509,14 @@ github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/minio-go v1.0.0 h1:ooSujki+Z1PRGZsYffJw5jnF5eMBvzMVV86TLAlM0UM= +github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= +github.com/minio/minio-go/v7 v7.0.5 h1:I2NIJ2ojwJqD/YByemC1M59e1b4FW9kS7NlOar7HPV4= +github.com/minio/minio-go/v7 v7.0.5/go.mod h1:TA0CQCjJZHM5SJj9IjqR0NmpmQJ6bCbXifAJ3mUU6Hw= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -648,6 +659,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.4.0 h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY= github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= @@ -781,6 +794,7 @@ golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -819,6 +833,8 @@ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -863,6 +879,8 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -954,6 +972,8 @@ gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/main.go b/main.go index d32afe83..137b6c35 100644 --- a/main.go +++ b/main.go @@ -35,6 +35,7 @@ import ( "github.com/fluxcd/pkg/recorder" "github.com/fluxcd/pkg/runtime/logger" + sourcev1 "github.com/fluxcd/source-controller/api/v1alpha1" "github.com/fluxcd/source-controller/controllers" // +kubebuilder:scaffold:imports @@ -161,6 +162,19 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmChartKind) os.Exit(1) } + if err = (&controllers.BucketReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Bucket"), + Scheme: mgr.GetScheme(), + Storage: storage, + EventRecorder: mgr.GetEventRecorderFor("source-controller"), + ExternalEventRecorder: eventRecorder, + }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ + MaxConcurrentReconciles: concurrent, + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Bucket") + os.Exit(1) + } // +kubebuilder:scaffold:builder setupLog.Info("starting manager")