Add --cluster-status-update-frequency flag for karmada-agent (#272)

Signed-off-by: RainbowMango <renhongcai@huawei.com>
This commit is contained in:
Hongcai Ren 2021-04-17 18:38:24 +08:00 committed by GitHub
parent 9584b44f28
commit 71f4ddeb81
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 44 additions and 23 deletions

View File

@ -27,6 +27,8 @@ spec:
- /bin/karmada-agent
- --karmada-kubeconfig=/etc/kubeconfig/karmada-kubeconfig
- --cluster-name={{member_cluster_name}}
- --cluster-status-update-frequency=10s
- --v=4
volumeMounts:
- name: kubeconfig
mountPath: /etc/kubeconfig

View File

@ -74,7 +74,7 @@ func run(opts *options.Options, stopChan <-chan struct{}) error {
return err
}
setupControllers(controllerManager, controlPlaneRestConfig, opts.ClusterName, stopChan)
setupControllers(controllerManager, opts, stopChan)
// blocks until the stop channel is closed.
if err := controllerManager.Start(stopChan); err != nil {
@ -85,16 +85,16 @@ func run(opts *options.Options, stopChan <-chan struct{}) error {
return nil
}
func setupControllers(mgr controllerruntime.Manager, controlPlaneRestConfig *restclient.Config, clusterName string, stopChan <-chan struct{}) {
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
predicateFun := predicate.Funcs{
CreateFunc: func(createEvent event.CreateEvent) bool {
return createEvent.Meta.GetName() == clusterName
return createEvent.Meta.GetName() == opts.ClusterName
},
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
return updateEvent.MetaOld.GetName() == clusterName
return updateEvent.MetaOld.GetName() == opts.ClusterName
},
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
return deleteEvent.Meta.GetName() == clusterName
return deleteEvent.Meta.GetName() == opts.ClusterName
},
GenericFunc: func(genericEvent event.GenericEvent) bool {
return false
@ -102,10 +102,11 @@ func setupControllers(mgr controllerruntime.Manager, controlPlaneRestConfig *res
}
clusterStatusController := &status.ClusterStatusController{
Client: mgr.GetClient(),
EventRecorder: mgr.GetEventRecorderFor(status.ControllerName),
PredicateFunc: predicateFun,
ClusterClientSetFunc: util.NewClusterClientSetForAgent,
Client: mgr.GetClient(),
EventRecorder: mgr.GetEventRecorderFor(status.ControllerName),
PredicateFunc: predicateFun,
ClusterClientSetFunc: util.NewClusterClientSetForAgent,
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
}
if err := clusterStatusController.SetupWithManager(mgr); err != nil {
klog.Fatalf("Failed to setup cluster status controller: %v", err)

View File

@ -1,13 +1,23 @@
package options
import (
"time"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Options contains everything necessary to create and run controller-manager.
type Options struct {
KarmadaKubeConfig string
ClusterName string
// ClusterStatusUpdateFrequency is the frequency that karmada-agent computes cluster status.
// If cluster lease feature is not enabled, it is also the frequency that karmada-agent posts cluster status
// to karmada-apiserver. In that case, be cautious when changing the constant, it must work with
// ClusterMonitorGracePeriod(--cluster-monitor-grace-period) in karmada-controller-manager.
ClusterStatusUpdateFrequency metav1.Duration
}
// NewOptions builds an default scheduler options.
@ -23,4 +33,5 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.KarmadaKubeConfig, "karmada-kubeconfig", o.KarmadaKubeConfig, "Path to karmada kubeconfig.")
fs.StringVar(&o.ClusterName, "cluster-name", o.ClusterName, "Name of member cluster that the agent serves for.")
fs.DurationVar(&o.ClusterStatusUpdateFrequency.Duration, "cluster-status-update-frequency", 10*time.Second, "Specifies how often karmada-agent posts cluster status to karmada-apiserver. Note: be cautious when changing the constant, it must work with ClusterMonitorGracePeriod in karmada-controller-manager.")
}

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/component-base/logs"
@ -146,6 +147,8 @@ func setupControllers(mgr controllerruntime.Manager, stopChan <-chan struct{}) {
EventRecorder: mgr.GetEventRecorderFor(status.ControllerName),
PredicateFunc: clusterPredicateFunc,
ClusterClientSetFunc: util.NewClusterClientSet,
// TODO(RainbowMango): Temporarily hard code the duration until we add flags for this.
ClusterStatusUpdateFrequency: metav1.Duration{Duration: 10 * time.Second},
}
if err := clusterStatusController.SetupWithManager(mgr); err != nil {
klog.Fatalf("Failed to setup cluster status controller: %v", err)

View File

@ -3,13 +3,12 @@ package status
import (
"context"
"net/http"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
@ -39,6 +38,11 @@ type ClusterStatusController struct {
EventRecorder record.EventRecorder
PredicateFunc predicate.Predicate
ClusterClientSetFunc func(c *v1alpha1.Cluster, client client.Client) (*util.ClusterClient, error)
// ClusterStatusUpdateFrequency is the frequency that controller computes cluster status.
// If cluster lease feature is not enabled, it is also the frequency that controller posts cluster status
// to karmada-apiserver.
ClusterStatusUpdateFrequency metav1.Duration
}
// Reconcile syncs status of the given member cluster.
@ -137,7 +141,7 @@ func (c *ClusterStatusController) updateStatusIfNeeded(cluster *v1alpha1.Cluster
}
}
return controllerruntime.Result{RequeueAfter: 10 * time.Second}, nil
return controllerruntime.Result{RequeueAfter: c.ClusterStatusUpdateFrequency.Duration}, nil
}
func getClusterHealthStatus(clusterClient *util.ClusterClient) (online, healthy bool) {
@ -166,29 +170,29 @@ func healthEndpointCheck(client *kubernetes.Clientset, path string) (int, error)
return healthStatus, resp.Error()
}
func generateReadyCondition(online, healthy bool) []v1.Condition {
var conditions []v1.Condition
currentTime := v1.Now()
func generateReadyCondition(online, healthy bool) []metav1.Condition {
var conditions []metav1.Condition
currentTime := metav1.Now()
newClusterOfflineCondition := v1.Condition{
newClusterOfflineCondition := metav1.Condition{
Type: v1alpha1.ClusterConditionReady,
Status: v1.ConditionFalse,
Status: metav1.ConditionFalse,
Reason: clusterNotReachableReason,
Message: clusterNotReachableMsg,
LastTransitionTime: currentTime,
}
newClusterReadyCondition := v1.Condition{
newClusterReadyCondition := metav1.Condition{
Type: v1alpha1.ClusterConditionReady,
Status: v1.ConditionTrue,
Status: metav1.ConditionTrue,
Reason: clusterReady,
Message: clusterHealthy,
LastTransitionTime: currentTime,
}
newClusterNotReadyCondition := v1.Condition{
newClusterNotReadyCondition := metav1.Condition{
Type: v1alpha1.ClusterConditionReady,
Status: v1.ConditionFalse,
Status: metav1.ConditionFalse,
Reason: clusterNotReady,
Message: clusterUnhealthy,
LastTransitionTime: currentTime,
@ -248,7 +252,7 @@ func getAPIEnablements(clusterClient *util.ClusterClient) ([]v1alpha1.APIEnablem
func getNodeSummary(clusterClient *util.ClusterClient) (v1alpha1.NodeSummary, error) {
var nodeSummary = v1alpha1.NodeSummary{}
nodeList, err := clusterClient.KubeClient.CoreV1().Nodes().List(context.TODO(), v1.ListOptions{})
nodeList, err := clusterClient.KubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nodeSummary, err
}
@ -264,7 +268,7 @@ func getNodeSummary(clusterClient *util.ClusterClient) (v1alpha1.NodeSummary, er
allocatable := getClusterAllocatable(nodeList)
podList, err := clusterClient.KubeClient.CoreV1().Pods("").List(context.TODO(), v1.ListOptions{})
podList, err := clusterClient.KubeClient.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nodeSummary, err
}