diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go new file mode 100644 index 00000000..63b2b96f --- /dev/null +++ b/controllers/bucket_controller.go @@ -0,0 +1,351 @@ +/* +Copyright 2020 The Flux CD contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "crypto/sha1" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-logr/logr" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kuberecorder "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + + "github.com/fluxcd/pkg/recorder" + "github.com/fluxcd/pkg/runtime/predicates" + + sourcev1 "github.com/fluxcd/source-controller/api/v1alpha1" +) + +// BucketReconciler reconciles a Bucket object +type BucketReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + Storage *Storage + EventRecorder kuberecorder.EventRecorder + ExternalEventRecorder *recorder.EventRecorder +} + +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch + +func (r *BucketReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + start := time.Now() + + var bucket sourcev1.Bucket + if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + log := r.Log.WithValues("controller", strings.ToLower(sourcev1.BucketKind), "request", req.NamespacedName) + + // Examine if the object is under deletion + if bucket.ObjectMeta.DeletionTimestamp.IsZero() { + // The object is not being deleted, so if it does not have our finalizer, + // then lets add the finalizer and update the object. This is equivalent + // registering our finalizer. + if !containsString(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) { + bucket.ObjectMeta.Finalizers = append(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) + if err := r.Update(ctx, &bucket); err != nil { + log.Error(err, "unable to register finalizer") + return ctrl.Result{}, err + } + } + } else { + // The object is being deleted + if containsString(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) { + // Our finalizer is still present, so lets handle garbage collection + if err := r.gc(bucket, true); err != nil { + r.event(bucket, recorder.EventSeverityError, fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err + } + // Remove our finalizer from the list and update it + bucket.ObjectMeta.Finalizers = removeString(bucket.ObjectMeta.Finalizers, sourcev1.SourceFinalizer) + if err := r.Update(ctx, &bucket); err != nil { + return ctrl.Result{}, err + } + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil + } + } + + // set initial status + if resetBucket, ok := r.resetStatus(bucket); ok { + bucket = resetBucket + if err := r.Status().Update(ctx, &bucket); err != nil { + log.Error(err, "unable to update status") + return ctrl.Result{Requeue: true}, err + } + } + + // purge old artifacts from storage + if err := r.gc(bucket, false); err != nil { + log.Error(err, "unable to purge old artifacts") + } + + // reconcile bucket by downloading its content + reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) + + // update status with the reconciliation result + if err := r.Status().Update(ctx, &reconciledBucket); err != nil { + log.Error(err, "unable to update status") + return ctrl.Result{Requeue: true}, err + } + + // if reconciliation failed, record the failure and requeue immediately + if reconcileErr != nil { + r.event(reconciledBucket, recorder.EventSeverityError, reconcileErr.Error()) + return ctrl.Result{Requeue: true}, reconcileErr + } + + // emit revision change event + if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { + r.event(reconciledBucket, recorder.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) + } + + log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", + time.Now().Sub(start).String(), + bucket.GetInterval().Duration.String(), + )) + + return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil +} + +type BucketReconcilerOptions struct { + MaxConcurrentReconciles int +} + +func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { + return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) +} + +func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error { + return ctrl.NewControllerManagedBy(mgr). + For(&sourcev1.Bucket{}). + WithEventFilter(predicates.ChangePredicate{}). + WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}). + Complete(r) +} + +func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { + s3Client, err := r.auth(ctx, bucket) + if err != nil { + err = fmt.Errorf("auth error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + } + + // create tmp dir + tempDir, err := ioutil.TempDir("", bucket.Name) + if err != nil { + err = fmt.Errorf("tmp dir error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + defer os.RemoveAll(tempDir) + + ctxTimeout, cancel := context.WithTimeout(ctx, bucket.GetTimeout()) + defer cancel() + + // download bucket content + for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{Recursive: true}) { + if object.Err != nil { + err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + + if strings.HasSuffix(object.Key, "/") { + continue + } + + localPath := filepath.Join(tempDir, object.Key) + err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) + if err != nil { + err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + } + + revision, err := r.checksum(tempDir) + if err != nil { + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + // return early on unchanged revision + artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) + if bucket.GetArtifact() != nil && bucket.GetArtifact().Revision == revision { + if artifact.URL != bucket.GetArtifact().URL { + r.Storage.SetArtifactURL(bucket.GetArtifact()) + bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) + } + return bucket, nil + } + + // create artifact dir + err = r.Storage.MkdirAll(artifact) + if err != nil { + err = fmt.Errorf("mkdir dir error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + // acquire lock + unlock, err := r.Storage.Lock(artifact) + if err != nil { + err = fmt.Errorf("unable to acquire lock: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + defer unlock() + + // archive artifact and check integrity + if err := r.Storage.Archive(&artifact, tempDir, bucket.Spec.Ignore); err != nil { + err = fmt.Errorf("storage archive error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + // update latest symlink + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + err = fmt.Errorf("storage symlink error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + } + + message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) + return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil +} + +func (r *BucketReconciler) auth(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { + opt := minio.Options{ + Region: bucket.Spec.Region, + Secure: !bucket.Spec.Insecure, + } + + if bucket.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: bucket.GetNamespace(), + Name: bucket.Spec.SecretRef.Name, + } + + var secret corev1.Secret + if err := r.Get(ctx, secretName, &secret); err != nil { + return nil, fmt.Errorf("credentials secret error: %w", err) + } + + accesskey := "" + secretkey := "" + if k, ok := secret.Data["accesskey"]; ok { + accesskey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretkey = string(k) + } + if accesskey == "" || secretkey == "" { + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + } + opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") + } else if bucket.Spec.Provider == "aws" { + opt.Creds = credentials.NewIAM("") + } + + return minio.New(bucket.Spec.Endpoint, &opt) +} + +func (r *BucketReconciler) checksum(root string) (string, error) { + checksum := "" + err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.Mode().IsRegular() { + return nil + } + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + checksum += fmt.Sprintf("%x", sha1.Sum(data)) + return nil + }) + if err != nil { + return "", err + } + + return fmt.Sprintf("%x", sha1.Sum([]byte(checksum))), nil +} + +// resetStatus returns a modified v1alpha1.Bucket and a boolean indicating +// if the status field has been reset. +func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { + if bucket.GetArtifact() != nil && !r.Storage.ArtifactExist(*bucket.GetArtifact()) { + bucket = sourcev1.BucketProgressing(bucket) + bucket.Status.Artifact = nil + return bucket, true + } + if bucket.Generation != bucket.Status.ObservedGeneration { + return sourcev1.BucketProgressing(bucket), true + } + return bucket, false +} + +// gc performs a garbage collection on all but current artifacts of the given bucket. +func (r *BucketReconciler) gc(bucket sourcev1.Bucket, all bool) error { + if all { + return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "")) + } + if bucket.GetArtifact() != nil { + return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) + } + return nil +} + +// event emits a Kubernetes event and forwards the event to notification controller if configured +func (r *BucketReconciler) event(bucket sourcev1.Bucket, severity, msg string) { + if r.EventRecorder != nil { + r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) + } + if r.ExternalEventRecorder != nil { + objRef, err := reference.GetReference(r.Scheme, &bucket) + if err != nil { + r.Log.WithValues( + "request", + fmt.Sprintf("%s/%s", bucket.GetNamespace(), bucket.GetName()), + ).Error(err, "unable to send event") + return + } + + if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { + r.Log.WithValues( + "request", + fmt.Sprintf("%s/%s", bucket.GetNamespace(), bucket.GetName()), + ).Error(err, "unable to send event") + return + } + } +} diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index a6aaced6..f6a3f8df 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -233,7 +233,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour defer unlock() // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tmpGit, repository.Spec); err != nil { + if err := r.Storage.Archive(&artifact, tmpGit, repository.Spec.Ignore); err != nil { err = fmt.Errorf("storage archive error: %w", err) return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err } @@ -241,7 +241,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour // update latest symlink url, err := r.Storage.Symlink(artifact, "latest.tar.gz") if err != nil { - err = fmt.Errorf("storage lock error: %w", err) + err = fmt.Errorf("storage symlink error: %w", err) return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err } diff --git a/controllers/storage.go b/controllers/storage.go index 41883cc9..f42a14d4 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -152,12 +152,12 @@ func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool { // Archive atomically archives the given directory as a tarball to the given v1alpha1.Artifact // path, excluding any VCS specific files and directories, or any of the excludes defined in // the excludeFiles. If successful, it sets the checksum and last update time on the artifact. -func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, spec sourcev1.GitRepositorySpec) (err error) { +func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, ignore *string) (err error) { if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { return fmt.Errorf("invalid dir path: %s", dir) } - ps, err := loadExcludePatterns(dir, spec) + ps, err := loadExcludePatterns(dir, ignore) if err != nil { return err } @@ -404,7 +404,7 @@ func getPatterns(reader io.Reader, path []string) []gitignore.Pattern { // loadExcludePatterns loads the excluded patterns from sourceignore or other // sources. -func loadExcludePatterns(dir string, spec sourcev1.GitRepositorySpec) ([]gitignore.Pattern, error) { +func loadExcludePatterns(dir string, ignore *string) ([]gitignore.Pattern, error) { path := strings.Split(dir, "/") var ps []gitignore.Pattern @@ -412,7 +412,7 @@ func loadExcludePatterns(dir string, spec sourcev1.GitRepositorySpec) ([]gitigno ps = append(ps, gitignore.ParsePattern(p, path)) } - if spec.Ignore == nil { + if ignore == nil { for _, p := range strings.Split(excludeExt, ",") { ps = append(ps, gitignore.ParsePattern(p, path)) } @@ -424,7 +424,7 @@ func loadExcludePatterns(dir string, spec sourcev1.GitRepositorySpec) ([]gitigno return nil, err } } else { - ps = append(ps, getPatterns(bytes.NewBufferString(*spec.Ignore), path)...) + ps = append(ps, getPatterns(bytes.NewBufferString(*ignore), path)...) } return ps, nil diff --git a/controllers/storage_test.go b/controllers/storage_test.go index a90a7229..8ec9d42c 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -159,7 +159,7 @@ func createArchive(t *testing.T, storage *Storage, filenames []string, sourceIgn t.Fatalf("artifact directory creation failed: %v", err) } - if err := storage.Archive(&artifact, gitDir, spec); err != nil { + if err := storage.Archive(&artifact, gitDir, spec.Ignore); err != nil { t.Fatalf("archiving failed: %v", err) } diff --git a/go.mod b/go.mod index 766f5b68..eeb6fdc9 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/go-git/go-billy/v5 v5.0.0 github.com/go-git/go-git/v5 v5.1.0 github.com/go-logr/logr v0.1.0 + github.com/minio/minio-go/v7 v7.0.5 github.com/onsi/ginkgo v1.12.1 github.com/onsi/gomega v1.10.1 helm.sh/helm/v3 v3.3.1 diff --git a/go.sum b/go.sum index 9ef50f4a..f3be6257 100644 --- a/go.sum +++ b/go.sum @@ -455,6 +455,9 @@ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= @@ -506,6 +509,14 @@ github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= +github.com/minio/minio-go v1.0.0 h1:ooSujki+Z1PRGZsYffJw5jnF5eMBvzMVV86TLAlM0UM= +github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= +github.com/minio/minio-go/v7 v7.0.5 h1:I2NIJ2ojwJqD/YByemC1M59e1b4FW9kS7NlOar7HPV4= +github.com/minio/minio-go/v7 v7.0.5/go.mod h1:TA0CQCjJZHM5SJj9IjqR0NmpmQJ6bCbXifAJ3mUU6Hw= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= +github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -648,6 +659,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.4.0 h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY= github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= @@ -781,6 +794,7 @@ golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -819,6 +833,8 @@ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -863,6 +879,8 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -954,6 +972,8 @@ gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= diff --git a/main.go b/main.go index d32afe83..137b6c35 100644 --- a/main.go +++ b/main.go @@ -35,6 +35,7 @@ import ( "github.com/fluxcd/pkg/recorder" "github.com/fluxcd/pkg/runtime/logger" + sourcev1 "github.com/fluxcd/source-controller/api/v1alpha1" "github.com/fluxcd/source-controller/controllers" // +kubebuilder:scaffold:imports @@ -161,6 +162,19 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", sourcev1.HelmChartKind) os.Exit(1) } + if err = (&controllers.BucketReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("Bucket"), + Scheme: mgr.GetScheme(), + Storage: storage, + EventRecorder: mgr.GetEventRecorderFor("source-controller"), + ExternalEventRecorder: eventRecorder, + }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ + MaxConcurrentReconciles: concurrent, + }); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Bucket") + os.Exit(1) + } // +kubebuilder:scaffold:builder setupLog.Info("starting manager")