Add reconciler sharding capability based on label
With this enhancement, the controller can be configured with `--watch-label-selector`, after which only objects with this label will be reconciled by the controller. This allows for horizontal scaling of the helm-controller, where each controller can be deployed multiple times with a unique label selector which is used as the sharding key. Note that if you want to ensure a `HelmChart` gets created for a specific source-controller instance, you have to provide the labels for this controller in `.spec.chart.metadata.labels` of the `HelmRelease`. Signed-off-by: Hidde Beydals <hidde@hhh.computer>
This commit is contained in:
parent
5a1c5138de
commit
08925bc282
|
@ -7,7 +7,7 @@ require (
|
|||
github.com/fluxcd/pkg/apis/meta v1.0.0
|
||||
k8s.io/apiextensions-apiserver v0.26.3
|
||||
k8s.io/apimachinery v0.26.3
|
||||
sigs.k8s.io/controller-runtime v0.14.5
|
||||
sigs.k8s.io/controller-runtime v0.14.6
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
|
@ -86,8 +86,8 @@ k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
|||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
|
||||
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s=
|
||||
sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
|
|
4
go.mod
4
go.mod
|
@ -10,7 +10,7 @@ require (
|
|||
github.com/fluxcd/pkg/apis/event v0.4.1
|
||||
github.com/fluxcd/pkg/apis/kustomize v1.0.0
|
||||
github.com/fluxcd/pkg/apis/meta v1.0.0
|
||||
github.com/fluxcd/pkg/runtime v0.33.0
|
||||
github.com/fluxcd/pkg/runtime v0.35.0
|
||||
github.com/fluxcd/pkg/ssa v0.26.0
|
||||
github.com/fluxcd/source-controller/api v0.36.1
|
||||
github.com/go-logr/logr v1.2.3
|
||||
|
@ -27,7 +27,7 @@ require (
|
|||
k8s.io/cli-runtime v0.26.3
|
||||
k8s.io/client-go v0.26.3
|
||||
sigs.k8s.io/cli-utils v0.34.0
|
||||
sigs.k8s.io/controller-runtime v0.14.5
|
||||
sigs.k8s.io/controller-runtime v0.14.6
|
||||
sigs.k8s.io/kustomize/api v0.12.1
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
|
8
go.sum
8
go.sum
|
@ -177,8 +177,8 @@ github.com/fluxcd/pkg/apis/kustomize v1.0.0 h1:5T2b/mRZiGWtP7fvSU8gZOApIc06H6SdL
|
|||
github.com/fluxcd/pkg/apis/kustomize v1.0.0/go.mod h1:XaDYlKxrf9D2zZWcZ0BnSIqGtcm8mdNtJGzZWYjCnQo=
|
||||
github.com/fluxcd/pkg/apis/meta v1.0.0 h1:i9IGHd/VNEZELX7mepkiYFbJxs2J5znaB4cN9z2nPm8=
|
||||
github.com/fluxcd/pkg/apis/meta v1.0.0/go.mod h1:04ZdpZYm1x+aL93K4daNHW1UX6E8K7Gyf5za9OhrE+U=
|
||||
github.com/fluxcd/pkg/runtime v0.33.0 h1:y6mFOj22mU/BXAxSTucTlT7vrWUjd0+iccK0pRN5CF0=
|
||||
github.com/fluxcd/pkg/runtime v0.33.0/go.mod h1:oDTerqMMtOQVNZeidwAPG7g/ai2xuidUduJzQh1IBVI=
|
||||
github.com/fluxcd/pkg/runtime v0.35.0 h1:9PYLcul8qdfLYQArcYpHe/QuMqyhAGGFN9F7uY/QVX4=
|
||||
github.com/fluxcd/pkg/runtime v0.35.0/go.mod h1:sAaSTH8RHj3Y99xj0AtAndDTe5cv0DP4enyLV62EO78=
|
||||
github.com/fluxcd/pkg/ssa v0.26.0 h1:xqAPF9wA4a3HVeUL1bMsrk1pJDjo5IEqL3+Vjts6vTM=
|
||||
github.com/fluxcd/pkg/ssa v0.26.0/go.mod h1:GEzdW/IkhD/EGvhRerwipp5IrLVkWjhVFsB4Y7MnnMI=
|
||||
github.com/fluxcd/source-controller/api v0.36.1 h1:/ul69kJNEwrFG1Cwk2P/GwgraIxOETCL+tP+zMtxTu8=
|
||||
|
@ -1159,8 +1159,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
|||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/cli-utils v0.34.0 h1:zCUitt54f0/MYj/ajVFnG6XSXMhpZ72O/3RewIchW8w=
|
||||
sigs.k8s.io/cli-utils v0.34.0/go.mod h1:EXyMwPMu9OL+LRnj0JEMsGG/fRvbgFadcVlSnE8RhFs=
|
||||
sigs.k8s.io/controller-runtime v0.14.5 h1:6xaWFqzT5KuAQ9ufgUaj1G/+C4Y1GRkhrxl+BJ9i+5s=
|
||||
sigs.k8s.io/controller-runtime v0.14.5/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA=
|
||||
sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kustomize/api v0.12.1 h1:7YM7gW3kYBwtKvoY216ZzY+8hM+lV53LUayghNRJ0vM=
|
||||
|
|
33
main.go
33
main.go
|
@ -23,12 +23,14 @@ import (
|
|||
|
||||
flag "github.com/spf13/pflag"
|
||||
"helm.sh/helm/v3/pkg/kube"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
"sigs.k8s.io/cli-utils/pkg/kstatus/polling"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics"
|
||||
|
||||
|
@ -36,21 +38,21 @@ import (
|
|||
"github.com/fluxcd/pkg/runtime/client"
|
||||
helper "github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/events"
|
||||
feathelper "github.com/fluxcd/pkg/runtime/features"
|
||||
"github.com/fluxcd/pkg/runtime/leaderelection"
|
||||
"github.com/fluxcd/pkg/runtime/logger"
|
||||
"github.com/fluxcd/pkg/runtime/metrics"
|
||||
"github.com/fluxcd/pkg/runtime/pprof"
|
||||
"github.com/fluxcd/pkg/runtime/probes"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
v2 "github.com/fluxcd/helm-controller/api/v2beta1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
|
||||
"github.com/fluxcd/helm-controller/internal/controllers"
|
||||
"github.com/fluxcd/helm-controller/internal/features"
|
||||
intkube "github.com/fluxcd/helm-controller/internal/kube"
|
||||
"github.com/fluxcd/helm-controller/internal/oomwatch"
|
||||
feathelper "github.com/fluxcd/pkg/runtime/features"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
const controllerName = "helm-controller"
|
||||
|
@ -76,7 +78,6 @@ func main() {
|
|||
concurrent int
|
||||
requeueDependency time.Duration
|
||||
gracefulShutdownTimeout time.Duration
|
||||
watchAllNamespaces bool
|
||||
httpRetry int
|
||||
clientOptions client.Options
|
||||
kubeConfigOpts client.KubeConfigOptions
|
||||
|
@ -85,6 +86,7 @@ func main() {
|
|||
aclOptions acl.Options
|
||||
leaderElectionOptions leaderelection.Options
|
||||
rateLimiterOptions helper.RateLimiterOptions
|
||||
watchOptions helper.WatchOptions
|
||||
oomWatchInterval time.Duration
|
||||
oomWatchMemoryThreshold uint8
|
||||
oomWatchMaxMemoryPath string
|
||||
|
@ -103,8 +105,6 @@ func main() {
|
|||
"The interval at which failing dependencies are reevaluated.")
|
||||
flag.DurationVar(&gracefulShutdownTimeout, "graceful-shutdown-timeout", 600*time.Second,
|
||||
"The duration given to the reconciler to finish before forcibly stopping.")
|
||||
flag.BoolVar(&watchAllNamespaces, "watch-all-namespaces", true,
|
||||
"Watch for custom resources in all namespaces, if set to false it will only watch the runtime namespace.")
|
||||
flag.IntVar(&httpRetry, "http-retry", 9,
|
||||
"The maximum number of retries when failing to fetch artifacts over HTTP.")
|
||||
flag.StringVar(&intkube.DefaultServiceAccountName, "default-service-account", "",
|
||||
|
@ -125,6 +125,7 @@ func main() {
|
|||
rateLimiterOptions.BindFlags(flag.CommandLine)
|
||||
kubeConfigOpts.BindFlags(flag.CommandLine)
|
||||
featureGates.BindFlags(flag.CommandLine)
|
||||
watchOptions.BindFlags(flag.CommandLine)
|
||||
|
||||
flag.Parse()
|
||||
|
||||
|
@ -141,10 +142,16 @@ func main() {
|
|||
crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...)
|
||||
|
||||
watchNamespace := ""
|
||||
if !watchAllNamespaces {
|
||||
if !watchOptions.AllNamespaces {
|
||||
watchNamespace = os.Getenv("RUNTIME_NAMESPACE")
|
||||
}
|
||||
|
||||
watchSelector, err := helper.GetWatchSelector(watchOptions)
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to configure watch label selector for manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var disableCacheFor []ctrlclient.Object
|
||||
shouldCache, err := features.Enabled(features.CacheSecretsAndConfigMaps)
|
||||
if err != nil {
|
||||
|
@ -155,6 +162,11 @@ func main() {
|
|||
disableCacheFor = append(disableCacheFor, &corev1.Secret{}, &corev1.ConfigMap{})
|
||||
}
|
||||
|
||||
leaderElectionId := fmt.Sprintf("%s-%s", controllerName, "leader-election")
|
||||
if watchOptions.LabelSelector != "" {
|
||||
leaderElectionId = leaderelection.GenerateID(leaderElectionId, watchOptions.LabelSelector)
|
||||
}
|
||||
|
||||
// set the managedFields owner for resources reconciled from Helm charts
|
||||
kube.ManagedFieldsManager = controllerName
|
||||
|
||||
|
@ -170,10 +182,15 @@ func main() {
|
|||
RenewDeadline: &leaderElectionOptions.RenewDeadline,
|
||||
RetryPeriod: &leaderElectionOptions.RetryPeriod,
|
||||
GracefulShutdownTimeout: &gracefulShutdownTimeout,
|
||||
LeaderElectionID: fmt.Sprintf("%s-leader-election", controllerName),
|
||||
LeaderElectionID: leaderElectionId,
|
||||
Namespace: watchNamespace,
|
||||
Logger: ctrl.Log,
|
||||
ClientDisableCacheFor: disableCacheFor,
|
||||
NewCache: ctrlcache.BuilderWithOptions(ctrlcache.Options{
|
||||
SelectorsByObject: ctrlcache.SelectorsByObject{
|
||||
&v2.HelmRelease{}: {Label: watchSelector},
|
||||
},
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
|
|
Loading…
Reference in New Issue