cleanup after adopt controller-runtime (#56)
This commit is contained in:
parent
75d912aa5c
commit
3a1e271b80
|
@ -1,13 +1,9 @@
|
|||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -19,7 +15,6 @@ import (
|
|||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/huawei-cloudnative/karmada/cmd/controller-manager/app/leaderelection"
|
||||
"github.com/huawei-cloudnative/karmada/cmd/controller-manager/app/options"
|
||||
memberclusterv1alpha1 "github.com/huawei-cloudnative/karmada/pkg/apis/membercluster/v1alpha1"
|
||||
propagationv1alpha1 "github.com/huawei-cloudnative/karmada/pkg/apis/propagationstrategy/v1alpha1"
|
||||
|
@ -85,56 +80,20 @@ func Run(opts *options.Options, stopChan <-chan struct{}) error {
|
|||
LeaderElectionID: "41db11fa.karmada.io",
|
||||
})
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to build controller manager: %v", err)
|
||||
klog.Errorf("failed to build controller manager: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: To be removed.
|
||||
startControllers(opts, stopChan)
|
||||
|
||||
setupControllers(controllerManager)
|
||||
|
||||
// blocks until the stop channel is closed.
|
||||
if err := controllerManager.Start(stopChan); err != nil {
|
||||
klog.Fatalf("controller manager exits unexpectedly: %v", err)
|
||||
klog.Errorf("controller manager exits unexpectedly: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(opts.HostNamespace) == 0 {
|
||||
// For in-cluster deployment set the namespace associated with the service account token
|
||||
data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||
if err != nil {
|
||||
klog.Fatalf("An error occurred while attempting to discover the namespace from the service account: %v", err)
|
||||
}
|
||||
opts.HostNamespace = strings.TrimSpace(string(data))
|
||||
}
|
||||
|
||||
// Validate if the namespace is configured
|
||||
if len(opts.HostNamespace) == 0 {
|
||||
klog.Fatalf("The namespace must be specified")
|
||||
}
|
||||
|
||||
elector, err := leaderelection.NewLeaderElector(opts, startControllers)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
select {
|
||||
case <-stopChan:
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
elector.Run(ctx)
|
||||
|
||||
klog.Errorf("lost lease")
|
||||
return errors.New("lost lease")
|
||||
}
|
||||
|
||||
func startControllers(opts *options.Options, stopChan <-chan struct{}) {
|
||||
|
||||
// never reach here
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupControllers(mgr controllerruntime.Manager) {
|
||||
|
|
|
@ -1,71 +0,0 @@
|
|||
package leaderelection
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
kubeclient "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"github.com/huawei-cloudnative/karmada/cmd/controller-manager/app/options"
|
||||
)
|
||||
|
||||
// NewLeaderElector builds a leader elector.
|
||||
func NewLeaderElector(opts *options.Options, fnStartControllers func(*options.Options, <-chan struct{})) (*leaderelection.LeaderElector, error) {
|
||||
const component = "controller-manager"
|
||||
rest.AddUserAgent(opts.KubeConfig, "leader-election")
|
||||
leaderElectionClient := kubeclient.NewForConfigOrDie(opts.KubeConfig)
|
||||
|
||||
// Prepare event clients.
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: leaderElectionClient.CoreV1().Events(opts.HostNamespace)})
|
||||
eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: component})
|
||||
|
||||
// add a uniquifier so that two processes on the same host don't accidentally both become active
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
klog.Infof("unable to get hostname: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
id := hostname + "_" + string(uuid.NewUUID())
|
||||
|
||||
rl, err := resourcelock.New(opts.LeaderElection.ResourceLock,
|
||||
opts.HostNamespace,
|
||||
component,
|
||||
leaderElectionClient.CoreV1(),
|
||||
leaderElectionClient.CoordinationV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: eventRecorder,
|
||||
})
|
||||
if err != nil {
|
||||
klog.Infof("couldn't create resource lock: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return leaderelection.NewLeaderElector(leaderelection.LeaderElectionConfig{
|
||||
Lock: rl,
|
||||
LeaseDuration: opts.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: opts.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: opts.LeaderElection.RetryPeriod.Duration,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
klog.Info("promoted as leader")
|
||||
stopChan := ctx.Done()
|
||||
fnStartControllers(opts, stopChan)
|
||||
<-stopChan
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
klog.Info("leader election lost")
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
Loading…
Reference in New Issue