Merge pull request #1781 from Garrybest/pr_clusterlifecycle
enhance cluster lifecycle management: add taints for the clusters which are unhealthy for a period of time
This commit is contained in:
commit
be5755daaf
|
@ -330,18 +330,19 @@ func registerWithControlPlaneAPIServer(controlPlaneRestConfig, clusterRestConfig
|
|||
}
|
||||
|
||||
func generateClusterInControllerPlane(controlPlaneRestConfig *restclient.Config, opts *options.Options, impersonatorSecret corev1.Secret) (*clusterv1alpha1.Cluster, error) {
|
||||
clusterObj := &clusterv1alpha1.Cluster{}
|
||||
clusterObj.Name = opts.ClusterName
|
||||
clusterObj.Spec.SyncMode = clusterv1alpha1.Pull
|
||||
clusterObj.Spec.APIEndpoint = opts.ClusterAPIEndpoint
|
||||
clusterObj.Spec.ProxyURL = opts.ProxyServerAddress
|
||||
clusterObj.Spec.ImpersonatorSecretRef = &clusterv1alpha1.LocalSecretReference{
|
||||
clusterObj := &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: opts.ClusterName}}
|
||||
mutateFunc := func(cluster *clusterv1alpha1.Cluster) {
|
||||
cluster.Spec.SyncMode = clusterv1alpha1.Pull
|
||||
cluster.Spec.APIEndpoint = opts.ClusterAPIEndpoint
|
||||
cluster.Spec.ProxyURL = opts.ProxyServerAddress
|
||||
cluster.Spec.ImpersonatorSecretRef = &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: impersonatorSecret.Namespace,
|
||||
Name: impersonatorSecret.Name,
|
||||
}
|
||||
}
|
||||
|
||||
controlPlaneKarmadaClient := karmadaclientset.NewForConfigOrDie(controlPlaneRestConfig)
|
||||
cluster, err := util.CreateOrUpdateClusterObject(controlPlaneKarmadaClient, clusterObj)
|
||||
cluster, err := util.CreateOrUpdateClusterObject(controlPlaneKarmadaClient, clusterObj, mutateFunc)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to create cluster(%s) object, error: %v", clusterObj.Name, err)
|
||||
return nil, err
|
||||
|
|
|
@ -177,6 +177,7 @@ func startClusterController(ctx controllerscontext.Context) (enabled bool, err e
|
|||
ClusterMonitorPeriod: opts.ClusterMonitorPeriod.Duration,
|
||||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod.Duration,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod.Duration,
|
||||
FailoverEvictionTimeout: opts.FailoverEvictionTimeout.Duration,
|
||||
}
|
||||
if err := clusterController.SetupWithManager(mgr); err != nil {
|
||||
return false, err
|
||||
|
@ -489,6 +490,7 @@ func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stop
|
|||
ClusterMonitorGracePeriod: opts.ClusterMonitorGracePeriod,
|
||||
ClusterStartupGracePeriod: opts.ClusterStartupGracePeriod,
|
||||
ClusterStatusUpdateFrequency: opts.ClusterStatusUpdateFrequency,
|
||||
FailoverEvictionTimeout: opts.FailoverEvictionTimeout,
|
||||
ClusterLeaseDuration: opts.ClusterLeaseDuration,
|
||||
ClusterLeaseRenewIntervalFraction: opts.ClusterLeaseRenewIntervalFraction,
|
||||
ClusterCacheSyncTimeout: opts.ClusterCacheSyncTimeout,
|
||||
|
|
|
@ -38,6 +38,8 @@ type Options struct {
|
|||
// ClusterStatusUpdateFrequency is the frequency that controller computes and report cluster status.
|
||||
// It must work with ClusterMonitorGracePeriod(--cluster-monitor-grace-period) in karmada-controller-manager.
|
||||
ClusterStatusUpdateFrequency metav1.Duration
|
||||
// FailoverEvictionTimeout is the grace period for deleting scheduling result on failed clusters.
|
||||
FailoverEvictionTimeout metav1.Duration
|
||||
// ClusterLeaseDuration is a duration that candidates for a lease need to wait to force acquire it.
|
||||
// This is measure against time of last observed lease RenewTime.
|
||||
ClusterLeaseDuration metav1.Duration
|
||||
|
@ -138,6 +140,8 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau
|
|||
"Specifies the grace period of allowing a running cluster to be unresponsive before marking it unhealthy.")
|
||||
flags.DurationVar(&o.ClusterStartupGracePeriod.Duration, "cluster-startup-grace-period", 60*time.Second,
|
||||
"Specifies the grace period of allowing a cluster to be unresponsive during startup before marking it unhealthy.")
|
||||
flags.DurationVar(&o.FailoverEvictionTimeout.Duration, "failover-eviction-timeout", 5*time.Minute,
|
||||
"Specifies the grace period for deleting scheduling result on failed clusters.")
|
||||
flags.StringVar(&o.SkippedPropagatingAPIs, "skipped-propagating-apis", "", "Semicolon separated resources that should be skipped from propagating in addition to the default skip list(cluster.karmada.io;policy.karmada.io;work.karmada.io). Supported formats are:\n"+
|
||||
"<group> for skip resources with a specific API group(e.g. networking.k8s.io),\n"+
|
||||
"<group>/<version> for skip resources with a specific API version(e.g. networking.k8s.io/v1beta1),\n"+
|
||||
|
|
|
@ -2,6 +2,14 @@ package v1alpha1
|
|||
|
||||
const (
|
||||
// TaintClusterUnscheduler will be added when cluster becomes unschedulable
|
||||
// and removed when node becomes scheduable.
|
||||
// and removed when cluster becomes scheduable.
|
||||
TaintClusterUnscheduler = "cluster.karmada.io/unschedulable"
|
||||
|
||||
// TaintClusterNotReady will be added when cluster is not ready
|
||||
// and removed when cluster becomes ready.
|
||||
TaintClusterNotReady = "cluster.karmada.io/not-ready"
|
||||
// TaintClusterUnreachable will be added when cluster becomes unreachable
|
||||
// (corresponding to ClusterConditionReady status ConditionUnknown)
|
||||
// and removed when cluster becomes reachable (ClusterConditionReady status ConditionTrue).
|
||||
TaintClusterUnreachable = "cluster.karmada.io/unreachable"
|
||||
)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util"
|
||||
utilhelper "github.com/karmada-io/karmada/pkg/util/helper"
|
||||
"github.com/karmada-io/karmada/pkg/util/names"
|
||||
)
|
||||
|
||||
|
@ -36,6 +37,21 @@ const (
|
|||
HealthUpdateRetry = 5
|
||||
)
|
||||
|
||||
var (
|
||||
// UnreachableTaintTemplate is the taint for when a cluster becomes unreachable.
|
||||
UnreachableTaintTemplate = &corev1.Taint{
|
||||
Key: clusterv1alpha1.TaintClusterUnreachable,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
// NotReadyTaintTemplate is the taint for when a cluster is not ready for
|
||||
// executing resources.
|
||||
NotReadyTaintTemplate = &corev1.Taint{
|
||||
Key: clusterv1alpha1.TaintClusterNotReady,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
}
|
||||
)
|
||||
|
||||
// Controller is to sync Cluster.
|
||||
type Controller struct {
|
||||
client.Client // used to operate Cluster resources.
|
||||
|
@ -51,17 +67,63 @@ type Controller struct {
|
|||
ClusterMonitorGracePeriod time.Duration
|
||||
// When cluster is just created, e.g. agent bootstrap or cluster join, we give a longer grace period.
|
||||
ClusterStartupGracePeriod time.Duration
|
||||
// FailoverEvictionTimeout represents the grace period for deleting scheduling result on failed clusters.
|
||||
FailoverEvictionTimeout time.Duration
|
||||
|
||||
// Per Cluster map stores last observed health together with a local time when it was observed.
|
||||
clusterHealthMap sync.Map
|
||||
clusterHealthMap *clusterHealthMap
|
||||
}
|
||||
|
||||
type clusterHealthMap struct {
|
||||
sync.RWMutex
|
||||
clusterHealths map[string]*clusterHealthData
|
||||
}
|
||||
|
||||
func newClusterHealthMap() *clusterHealthMap {
|
||||
return &clusterHealthMap{
|
||||
clusterHealths: make(map[string]*clusterHealthData),
|
||||
}
|
||||
}
|
||||
|
||||
// getDeepCopy - returns copy of cluster health data.
|
||||
// It prevents data being changed after retrieving it from the map.
|
||||
func (n *clusterHealthMap) getDeepCopy(name string) *clusterHealthData {
|
||||
n.RLock()
|
||||
defer n.RUnlock()
|
||||
return n.clusterHealths[name].deepCopy()
|
||||
}
|
||||
|
||||
func (n *clusterHealthMap) set(name string, data *clusterHealthData) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
n.clusterHealths[name] = data
|
||||
}
|
||||
|
||||
func (n *clusterHealthMap) delete(name string) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
delete(n.clusterHealths, name)
|
||||
}
|
||||
|
||||
type clusterHealthData struct {
|
||||
probeTimestamp metav1.Time
|
||||
readyTransitionTimestamp metav1.Time
|
||||
status *clusterv1alpha1.ClusterStatus
|
||||
lease *coordinationv1.Lease
|
||||
}
|
||||
|
||||
func (n *clusterHealthData) deepCopy() *clusterHealthData {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return &clusterHealthData{
|
||||
probeTimestamp: n.probeTimestamp,
|
||||
readyTransitionTimestamp: n.readyTransitionTimestamp,
|
||||
status: n.status.DeepCopy(),
|
||||
lease: n.lease.DeepCopy(),
|
||||
}
|
||||
}
|
||||
|
||||
// Reconcile performs a full reconciliation for the object referred to by the Request.
|
||||
// The Controller will requeue the Request to be processed again if an error is non-nil or
|
||||
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
|
||||
|
@ -92,7 +154,7 @@ func (c *Controller) Start(ctx context.Context) error {
|
|||
|
||||
// Incorporate the results of cluster health signal pushed from cluster-status-controller to master.
|
||||
go wait.UntilWithContext(ctx, func(ctx context.Context) {
|
||||
if err := c.monitorClusterHealth(); err != nil {
|
||||
if err := c.monitorClusterHealth(ctx); err != nil {
|
||||
klog.Errorf("Error monitoring cluster health: %v", err)
|
||||
}
|
||||
}, c.ClusterMonitorPeriod)
|
||||
|
@ -103,6 +165,7 @@ func (c *Controller) Start(ctx context.Context) error {
|
|||
|
||||
// SetupWithManager creates a controller and register to controller manager.
|
||||
func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error {
|
||||
c.clusterHealthMap = newClusterHealthMap()
|
||||
return utilerrors.NewAggregate([]error{
|
||||
controllerruntime.NewControllerManagedBy(mgr).For(&clusterv1alpha1.Cluster{}).Complete(c),
|
||||
mgr.Add(c),
|
||||
|
@ -138,7 +201,7 @@ func (c *Controller) removeCluster(cluster *clusterv1alpha1.Cluster) (controller
|
|||
}
|
||||
|
||||
// delete the health data from the map explicitly after we removing the cluster.
|
||||
c.clusterHealthMap.Delete(cluster.Name)
|
||||
c.clusterHealthMap.delete(cluster.Name)
|
||||
|
||||
return c.removeFinalizer(cluster)
|
||||
}
|
||||
|
@ -247,25 +310,27 @@ func (c *Controller) createExecutionSpace(cluster *clusterv1alpha1.Cluster) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) monitorClusterHealth() error {
|
||||
func (c *Controller) monitorClusterHealth(ctx context.Context) (err error) {
|
||||
clusterList := &clusterv1alpha1.ClusterList{}
|
||||
if err := c.Client.List(context.TODO(), clusterList); err != nil {
|
||||
if err = c.Client.List(ctx, clusterList); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clusters := clusterList.Items
|
||||
for i := range clusters {
|
||||
cluster := &clusters[i]
|
||||
if err := wait.PollImmediate(MonitorRetrySleepTime, MonitorRetrySleepTime*HealthUpdateRetry, func() (bool, error) {
|
||||
var observedReadyCondition, currentReadyCondition *metav1.Condition
|
||||
if err = wait.PollImmediate(MonitorRetrySleepTime, MonitorRetrySleepTime*HealthUpdateRetry, func() (bool, error) {
|
||||
// Cluster object may be changed in this function.
|
||||
if err := c.tryUpdateClusterHealth(cluster); err == nil {
|
||||
observedReadyCondition, currentReadyCondition, err = c.tryUpdateClusterHealth(ctx, cluster)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
clusterName := cluster.Name
|
||||
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, cluster); err != nil {
|
||||
if err = c.Get(ctx, client.ObjectKey{Name: clusterName}, cluster); err != nil {
|
||||
// If the cluster does not exist any more, we delete the health data from the map.
|
||||
if apierrors.IsNotFound(err) {
|
||||
c.clusterHealthMap.Delete(clusterName)
|
||||
c.clusterHealthMap.delete(clusterName)
|
||||
return true, nil
|
||||
}
|
||||
klog.Errorf("Getting a cluster to retry updating cluster health error: %v", clusterName, err)
|
||||
|
@ -276,6 +341,13 @@ func (c *Controller) monitorClusterHealth() error {
|
|||
klog.Errorf("Update health of Cluster '%v' from Controller error: %v. Skipping.", cluster.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if currentReadyCondition != nil {
|
||||
if err = c.processTaintBaseEviction(ctx, cluster, observedReadyCondition); err != nil {
|
||||
klog.Errorf("Failed to process taint base eviction error: %v. Skipping.", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -283,15 +355,11 @@ func (c *Controller) monitorClusterHealth() error {
|
|||
|
||||
// tryUpdateClusterHealth checks a given cluster's conditions and tries to update it.
|
||||
//nolint:gocyclo
|
||||
func (c *Controller) tryUpdateClusterHealth(cluster *clusterv1alpha1.Cluster) error {
|
||||
func (c *Controller) tryUpdateClusterHealth(ctx context.Context, cluster *clusterv1alpha1.Cluster) (*metav1.Condition, *metav1.Condition, error) {
|
||||
// Step 1: Get the last cluster heath from `clusterHealthMap`.
|
||||
var clusterHealth *clusterHealthData
|
||||
if value, exists := c.clusterHealthMap.Load(cluster.Name); exists {
|
||||
clusterHealth = value.(*clusterHealthData)
|
||||
}
|
||||
|
||||
clusterHealth := c.clusterHealthMap.getDeepCopy(cluster.Name)
|
||||
defer func() {
|
||||
c.clusterHealthMap.Store(cluster.Name, clusterHealth)
|
||||
c.clusterHealthMap.set(cluster.Name, clusterHealth)
|
||||
}()
|
||||
|
||||
// Step 2: Get the cluster ready condition.
|
||||
|
@ -314,6 +382,7 @@ func (c *Controller) tryUpdateClusterHealth(cluster *clusterv1alpha1.Cluster) er
|
|||
clusterHealth = &clusterHealthData{
|
||||
status: &cluster.Status,
|
||||
probeTimestamp: cluster.CreationTimestamp,
|
||||
readyTransitionTimestamp: cluster.CreationTimestamp,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -331,19 +400,31 @@ func (c *Controller) tryUpdateClusterHealth(cluster *clusterv1alpha1.Cluster) er
|
|||
}
|
||||
|
||||
// Step 4: Update the clusterHealth if necessary.
|
||||
// If this condition have no difference from last condition, we leave everything as it is.
|
||||
// If this condition has no difference from last condition, we leave everything as it is.
|
||||
// Otherwise, we only update the probeTimestamp.
|
||||
if clusterHealth == nil || !equality.Semantic.DeepEqual(savedCondition, currentReadyCondition) {
|
||||
if clusterHealth == nil {
|
||||
clusterHealth = &clusterHealthData{
|
||||
status: cluster.Status.DeepCopy(),
|
||||
probeTimestamp: metav1.Now(),
|
||||
readyTransitionTimestamp: metav1.Now(),
|
||||
}
|
||||
} else if !equality.Semantic.DeepEqual(savedCondition, currentReadyCondition) {
|
||||
transitionTime := metav1.Now()
|
||||
if savedCondition != nil && currentReadyCondition != nil && savedCondition.LastTransitionTime == currentReadyCondition.LastTransitionTime {
|
||||
transitionTime = clusterHealth.readyTransitionTimestamp
|
||||
}
|
||||
clusterHealth = &clusterHealthData{
|
||||
status: cluster.Status.DeepCopy(),
|
||||
probeTimestamp: metav1.Now(),
|
||||
readyTransitionTimestamp: transitionTime,
|
||||
}
|
||||
}
|
||||
|
||||
// Always update the probe time if cluster lease is renewed.
|
||||
// Note: If cluster-status-controller never posted the cluster status, but continues renewing the
|
||||
// heartbeat leases, the cluster controller will assume the cluster is healthy and take no action.
|
||||
observedLease := &coordinationv1.Lease{}
|
||||
err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: util.NamespaceClusterLease, Name: cluster.Name}, observedLease)
|
||||
err := c.Client.Get(ctx, client.ObjectKey{Namespace: util.NamespaceClusterLease, Name: cluster.Name}, observedLease)
|
||||
if err == nil && (savedLease == nil || savedLease.Spec.RenewTime.Before(observedLease.Spec.RenewTime)) {
|
||||
clusterHealth.lease = observedLease
|
||||
clusterHealth.probeTimestamp = metav1.Now()
|
||||
|
@ -383,16 +464,49 @@ func (c *Controller) tryUpdateClusterHealth(cluster *clusterv1alpha1.Cluster) er
|
|||
currentReadyCondition = meta.FindStatusCondition(cluster.Status.Conditions, clusterv1alpha1.ClusterConditionReady)
|
||||
|
||||
if !equality.Semantic.DeepEqual(currentReadyCondition, observedReadyCondition) {
|
||||
if err := c.Status().Update(context.TODO(), cluster); err != nil {
|
||||
if err := c.Status().Update(ctx, cluster); err != nil {
|
||||
klog.Errorf("Error updating cluster %s: %v", cluster.Name, err)
|
||||
return err
|
||||
return observedReadyCondition, currentReadyCondition, err
|
||||
}
|
||||
clusterHealth = &clusterHealthData{
|
||||
status: &cluster.Status,
|
||||
probeTimestamp: clusterHealth.probeTimestamp,
|
||||
readyTransitionTimestamp: metav1.Now(),
|
||||
lease: observedLease,
|
||||
}
|
||||
return nil
|
||||
return observedReadyCondition, currentReadyCondition, nil
|
||||
}
|
||||
}
|
||||
return observedReadyCondition, currentReadyCondition, nil
|
||||
}
|
||||
|
||||
func (c *Controller) processTaintBaseEviction(ctx context.Context, cluster *clusterv1alpha1.Cluster, observedReadyCondition *metav1.Condition) error {
|
||||
decisionTimestamp := metav1.Now()
|
||||
clusterHealth := c.clusterHealthMap.getDeepCopy(cluster.Name)
|
||||
if clusterHealth == nil {
|
||||
return fmt.Errorf("health data doesn't exist for cluster %q", cluster.Name)
|
||||
}
|
||||
// Check eviction timeout against decisionTimestamp
|
||||
switch observedReadyCondition.Status {
|
||||
case metav1.ConditionFalse:
|
||||
if decisionTimestamp.After(clusterHealth.readyTransitionTimestamp.Add(c.FailoverEvictionTimeout)) {
|
||||
// We want to update the taint straight away if Cluster is already tainted with the UnreachableTaint
|
||||
taintToAdd := *NotReadyTaintTemplate
|
||||
if err := utilhelper.UpdateClusterControllerTaint(ctx, c.Client, []*corev1.Taint{&taintToAdd}, []*corev1.Taint{UnreachableTaintTemplate}, cluster); err != nil {
|
||||
klog.ErrorS(err, "Failed to instantly update UnreachableTaint to NotReadyTaint, will try again in the next cycle.", "cluster", cluster.Name)
|
||||
}
|
||||
}
|
||||
case metav1.ConditionUnknown:
|
||||
if decisionTimestamp.After(clusterHealth.probeTimestamp.Add(c.FailoverEvictionTimeout)) {
|
||||
// We want to update the taint straight away if Cluster is already tainted with the UnreachableTaint
|
||||
taintToAdd := *UnreachableTaintTemplate
|
||||
if err := utilhelper.UpdateClusterControllerTaint(ctx, c.Client, []*corev1.Taint{&taintToAdd}, []*corev1.Taint{NotReadyTaintTemplate}, cluster); err != nil {
|
||||
klog.ErrorS(err, "Failed to instantly swap NotReadyTaint to UnreachableTaint, will try again in the next cycle.", "cluster", cluster.Name)
|
||||
}
|
||||
}
|
||||
case metav1.ConditionTrue:
|
||||
if err := utilhelper.UpdateClusterControllerTaint(ctx, c.Client, nil, []*corev1.Taint{NotReadyTaintTemplate, UnreachableTaintTemplate}, cluster); err != nil {
|
||||
klog.ErrorS(err, "Failed to remove taints from cluster, will retry in next iteration.", "cluster", cluster.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -32,6 +32,8 @@ type Options struct {
|
|||
// ClusterStatusUpdateFrequency is the frequency that controller computes and report cluster status.
|
||||
// It must work with ClusterMonitorGracePeriod.
|
||||
ClusterStatusUpdateFrequency metav1.Duration
|
||||
// FailoverEvictionTimeout is the grace period for deleting scheduling result on failed clusters.
|
||||
FailoverEvictionTimeout metav1.Duration
|
||||
// ClusterLeaseDuration is a duration that candidates for a lease need to wait to force acquire it.
|
||||
// This is measure against time of last observed lease RenewTime.
|
||||
ClusterLeaseDuration metav1.Duration
|
||||
|
|
|
@ -84,11 +84,14 @@ func (c *Controller) ensureImpersonationSecretForCluster(cluster *clusterv1alpha
|
|||
}
|
||||
|
||||
if cluster.Spec.ImpersonatorSecretRef == nil {
|
||||
mutateFunc := func(cluster *clusterv1alpha1.Cluster) {
|
||||
cluster.Spec.ImpersonatorSecretRef = &clusterv1alpha1.LocalSecretReference{
|
||||
Namespace: impersonatorSecret.Namespace,
|
||||
Name: impersonatorSecret.Name,
|
||||
}
|
||||
_, err = util.CreateOrUpdateClusterObject(controlPlaneKarmadaClient, cluster)
|
||||
}
|
||||
|
||||
_, err = util.CreateOrUpdateClusterObject(controlPlaneKarmadaClient, cluster, mutateFunc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ func CreateClusterObject(controlPlaneClient *karmadaclientset.Clientset, cluster
|
|||
|
||||
// CreateOrUpdateClusterObject create cluster object in karmada control plane,
|
||||
// if cluster object has been existed and different from input clusterObj, update it.
|
||||
func CreateOrUpdateClusterObject(controlPlaneClient *karmadaclientset.Clientset, clusterObj *clusterv1alpha1.Cluster) (*clusterv1alpha1.Cluster, error) {
|
||||
func CreateOrUpdateClusterObject(controlPlaneClient *karmadaclientset.Clientset, clusterObj *clusterv1alpha1.Cluster, mutate func(*clusterv1alpha1.Cluster)) (*clusterv1alpha1.Cluster, error) {
|
||||
cluster, exist, err := GetClusterWithKarmadaClient(controlPlaneClient, clusterObj.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -67,8 +67,7 @@ func CreateOrUpdateClusterObject(controlPlaneClient *karmadaclientset.Clientset,
|
|||
klog.Warningf("cluster(%s) already exist and newest", clusterObj.Name)
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
cluster.Spec = clusterObj.Spec
|
||||
mutate(cluster)
|
||||
cluster, err = updateCluster(controlPlaneClient, cluster)
|
||||
if err != nil {
|
||||
klog.Warningf("failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
|
@ -77,6 +76,7 @@ func CreateOrUpdateClusterObject(controlPlaneClient *karmadaclientset.Clientset,
|
|||
return cluster, nil
|
||||
}
|
||||
|
||||
mutate(clusterObj)
|
||||
if cluster, err = createCluster(controlPlaneClient, clusterObj); err != nil {
|
||||
klog.Warningf("failed to create cluster(%s). error: %v", clusterObj.Name, err)
|
||||
return nil, err
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
)
|
||||
|
||||
// TaintExists checks if the given taint exists in list of taints. Returns true if exists false otherwise.
|
||||
func TaintExists(taints []corev1.Taint, taintToFind *corev1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.MatchTaint(taintToFind) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// UpdateClusterControllerTaint add and remove some taints.
|
||||
func UpdateClusterControllerTaint(ctx context.Context, client client.Client, taintsToAdd, taintsToRemove []*corev1.Taint, cluster *clusterv1alpha1.Cluster) error {
|
||||
var clusterTaintsToAdd, clusterTaintsToRemove []corev1.Taint
|
||||
// Find which taints need to be added.
|
||||
for _, taintToAdd := range taintsToAdd {
|
||||
if !TaintExists(cluster.Spec.Taints, taintToAdd) {
|
||||
clusterTaintsToAdd = append(clusterTaintsToAdd, *taintToAdd)
|
||||
}
|
||||
}
|
||||
// Find which taints need to be removed.
|
||||
for _, taintToRemove := range taintsToRemove {
|
||||
if TaintExists(cluster.Spec.Taints, taintToRemove) {
|
||||
clusterTaintsToRemove = append(clusterTaintsToRemove, *taintToRemove)
|
||||
}
|
||||
}
|
||||
// If no taints need to be added and removed, just return.
|
||||
if len(clusterTaintsToAdd) == 0 && len(clusterTaintsToRemove) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
taints := make([]corev1.Taint, 0, len(cluster.Spec.Taints)+len(clusterTaintsToAdd)-len(clusterTaintsToRemove))
|
||||
// Remove taints which need to be removed.
|
||||
for i := range cluster.Spec.Taints {
|
||||
if !TaintExists(clusterTaintsToRemove, &cluster.Spec.Taints[i]) {
|
||||
taints = append(taints, cluster.Spec.Taints[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Add taints.
|
||||
for _, taintToAdd := range clusterTaintsToAdd {
|
||||
now := metav1.Now()
|
||||
taintToAdd.TimeAdded = &now
|
||||
taints = append(taints, taintToAdd)
|
||||
}
|
||||
|
||||
cluster = cluster.DeepCopy()
|
||||
cluster.Spec.Taints = taints
|
||||
|
||||
return client.Update(ctx, cluster)
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
package helper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
|
||||
"github.com/karmada-io/karmada/pkg/util/gclient"
|
||||
)
|
||||
|
||||
var (
|
||||
unreachableTaintTemplate = &corev1.Taint{
|
||||
Key: clusterv1alpha1.TaintClusterUnreachable,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
notReadyTaintTemplate = &corev1.Taint{
|
||||
Key: clusterv1alpha1.TaintClusterNotReady,
|
||||
Effect: corev1.TaintEffectNoExecute,
|
||||
}
|
||||
)
|
||||
|
||||
func TestUpdateClusterControllerTaint(t *testing.T) {
|
||||
type args struct {
|
||||
taints []corev1.Taint
|
||||
taintsToAdd []*corev1.Taint
|
||||
taintsToRemove []*corev1.Taint
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantTaints []corev1.Taint
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "ready condition from true to false",
|
||||
args: args{
|
||||
taints: nil,
|
||||
taintsToAdd: []*corev1.Taint{notReadyTaintTemplate.DeepCopy()},
|
||||
taintsToRemove: []*corev1.Taint{unreachableTaintTemplate.DeepCopy()},
|
||||
},
|
||||
wantTaints: []corev1.Taint{*notReadyTaintTemplate},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "ready condition from true to unknown",
|
||||
args: args{
|
||||
taints: nil,
|
||||
taintsToAdd: []*corev1.Taint{unreachableTaintTemplate.DeepCopy()},
|
||||
taintsToRemove: []*corev1.Taint{notReadyTaintTemplate.DeepCopy()},
|
||||
},
|
||||
wantTaints: []corev1.Taint{*unreachableTaintTemplate},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "ready condition from false to unknown",
|
||||
args: args{
|
||||
taints: []corev1.Taint{*notReadyTaintTemplate},
|
||||
taintsToAdd: []*corev1.Taint{unreachableTaintTemplate.DeepCopy()},
|
||||
taintsToRemove: []*corev1.Taint{notReadyTaintTemplate.DeepCopy()},
|
||||
},
|
||||
wantTaints: []corev1.Taint{*unreachableTaintTemplate},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "ready condition from false to true",
|
||||
args: args{
|
||||
taints: []corev1.Taint{*notReadyTaintTemplate},
|
||||
taintsToAdd: []*corev1.Taint{},
|
||||
taintsToRemove: []*corev1.Taint{notReadyTaintTemplate.DeepCopy(), unreachableTaintTemplate.DeepCopy()},
|
||||
},
|
||||
wantTaints: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "ready condition from unknown to true",
|
||||
args: args{
|
||||
taints: []corev1.Taint{*unreachableTaintTemplate},
|
||||
taintsToAdd: []*corev1.Taint{},
|
||||
taintsToRemove: []*corev1.Taint{notReadyTaintTemplate.DeepCopy(), unreachableTaintTemplate.DeepCopy()},
|
||||
},
|
||||
wantTaints: nil,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "ready condition from unknown to false",
|
||||
args: args{
|
||||
taints: []corev1.Taint{*unreachableTaintTemplate},
|
||||
taintsToAdd: []*corev1.Taint{notReadyTaintTemplate.DeepCopy()},
|
||||
taintsToRemove: []*corev1.Taint{unreachableTaintTemplate.DeepCopy()},
|
||||
},
|
||||
wantTaints: []corev1.Taint{*notReadyTaintTemplate},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cluster := &clusterv1alpha1.Cluster{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "member"},
|
||||
Spec: clusterv1alpha1.ClusterSpec{
|
||||
Taints: tt.args.taints,
|
||||
},
|
||||
}
|
||||
c := fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster).Build()
|
||||
|
||||
if err := UpdateClusterControllerTaint(ctx, c, tt.args.taintsToAdd, tt.args.taintsToRemove, cluster); (err != nil) != tt.wantErr {
|
||||
t.Errorf("UpdateClusterControllerTaint() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
|
||||
if err := c.Get(ctx, client.ObjectKey{Name: cluster.Name}, cluster); err != nil {
|
||||
t.Fatalf("Failed to get cluster %s: %v", cluster.Name, err)
|
||||
}
|
||||
|
||||
if len(cluster.Spec.Taints) != len(tt.wantTaints) {
|
||||
t.Errorf("Cluster gotTaints = %v, want %v", cluster.Spec.Taints, tt.wantTaints)
|
||||
}
|
||||
for i := range cluster.Spec.Taints {
|
||||
if cluster.Spec.Taints[i].Key != tt.wantTaints[i].Key ||
|
||||
cluster.Spec.Taints[i].Value != tt.wantTaints[i].Value ||
|
||||
cluster.Spec.Taints[i].Effect != tt.wantTaints[i].Effect {
|
||||
t.Errorf("Cluster gotTaints = %v, want %v", cluster.Spec.Taints, tt.wantTaints)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -77,14 +77,11 @@ func validateClusterTaintEffect(effect *corev1.TaintEffect, allowEmpty bool, fld
|
|||
|
||||
allErrors := field.ErrorList{}
|
||||
switch *effect {
|
||||
// TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoExecute.
|
||||
case corev1.TaintEffectNoSchedule:
|
||||
// case corev1.TaintEffectNoSchedule, corev1.TaintEffectNoExecute:
|
||||
case corev1.TaintEffectNoSchedule, corev1.TaintEffectNoExecute:
|
||||
default:
|
||||
validValues := []string{
|
||||
string(corev1.TaintEffectNoSchedule),
|
||||
// TODO: Uncomment this block when implement TaintEffectNoExecute.
|
||||
// string(corev1.TaintEffectNoExecute),
|
||||
string(corev1.TaintEffectNoExecute),
|
||||
}
|
||||
allErrors = append(allErrors, field.NotSupported(fldPath, *effect, validValues))
|
||||
}
|
||||
|
|
|
@ -1446,6 +1446,7 @@ sigs.k8s.io/controller-runtime/pkg/certwatcher
|
|||
sigs.k8s.io/controller-runtime/pkg/client
|
||||
sigs.k8s.io/controller-runtime/pkg/client/apiutil
|
||||
sigs.k8s.io/controller-runtime/pkg/client/config
|
||||
sigs.k8s.io/controller-runtime/pkg/client/fake
|
||||
sigs.k8s.io/controller-runtime/pkg/cluster
|
||||
sigs.k8s.io/controller-runtime/pkg/config
|
||||
sigs.k8s.io/controller-runtime/pkg/config/v1alpha1
|
||||
|
|
|
@ -0,0 +1,765 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/testing"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/internal/objectutil"
|
||||
)
|
||||
|
||||
type versionedTracker struct {
|
||||
testing.ObjectTracker
|
||||
scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
type fakeClient struct {
|
||||
tracker versionedTracker
|
||||
scheme *runtime.Scheme
|
||||
restMapper meta.RESTMapper
|
||||
schemeWriteLock sync.Mutex
|
||||
}
|
||||
|
||||
var _ client.WithWatch = &fakeClient{}
|
||||
|
||||
const (
|
||||
maxNameLength = 63
|
||||
randomLength = 5
|
||||
maxGeneratedNameLength = maxNameLength - randomLength
|
||||
)
|
||||
|
||||
// NewFakeClient creates a new fake client for testing.
|
||||
// You can choose to initialize it with a slice of runtime.Object.
|
||||
//
|
||||
// Deprecated: Please use NewClientBuilder instead.
|
||||
func NewFakeClient(initObjs ...runtime.Object) client.WithWatch {
|
||||
return NewClientBuilder().WithRuntimeObjects(initObjs...).Build()
|
||||
}
|
||||
|
||||
// NewFakeClientWithScheme creates a new fake client with the given scheme
|
||||
// for testing.
|
||||
// You can choose to initialize it with a slice of runtime.Object.
|
||||
//
|
||||
// Deprecated: Please use NewClientBuilder instead.
|
||||
func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.WithWatch {
|
||||
return NewClientBuilder().WithScheme(clientScheme).WithRuntimeObjects(initObjs...).Build()
|
||||
}
|
||||
|
||||
// NewClientBuilder returns a new builder to create a fake client.
|
||||
func NewClientBuilder() *ClientBuilder {
|
||||
return &ClientBuilder{}
|
||||
}
|
||||
|
||||
// ClientBuilder builds a fake client.
|
||||
type ClientBuilder struct {
|
||||
scheme *runtime.Scheme
|
||||
restMapper meta.RESTMapper
|
||||
initObject []client.Object
|
||||
initLists []client.ObjectList
|
||||
initRuntimeObjects []runtime.Object
|
||||
}
|
||||
|
||||
// WithScheme sets this builder's internal scheme.
|
||||
// If not set, defaults to client-go's global scheme.Scheme.
|
||||
func (f *ClientBuilder) WithScheme(scheme *runtime.Scheme) *ClientBuilder {
|
||||
f.scheme = scheme
|
||||
return f
|
||||
}
|
||||
|
||||
// WithRESTMapper sets this builder's restMapper.
|
||||
// The restMapper is directly set as mapper in the Client. This can be used for example
|
||||
// with a meta.DefaultRESTMapper to provide a static rest mapping.
|
||||
// If not set, defaults to an empty meta.DefaultRESTMapper.
|
||||
func (f *ClientBuilder) WithRESTMapper(restMapper meta.RESTMapper) *ClientBuilder {
|
||||
f.restMapper = restMapper
|
||||
return f
|
||||
}
|
||||
|
||||
// WithObjects can be optionally used to initialize this fake client with client.Object(s).
|
||||
func (f *ClientBuilder) WithObjects(initObjs ...client.Object) *ClientBuilder {
|
||||
f.initObject = append(f.initObject, initObjs...)
|
||||
return f
|
||||
}
|
||||
|
||||
// WithLists can be optionally used to initialize this fake client with client.ObjectList(s).
|
||||
func (f *ClientBuilder) WithLists(initLists ...client.ObjectList) *ClientBuilder {
|
||||
f.initLists = append(f.initLists, initLists...)
|
||||
return f
|
||||
}
|
||||
|
||||
// WithRuntimeObjects can be optionally used to initialize this fake client with runtime.Object(s).
|
||||
func (f *ClientBuilder) WithRuntimeObjects(initRuntimeObjs ...runtime.Object) *ClientBuilder {
|
||||
f.initRuntimeObjects = append(f.initRuntimeObjects, initRuntimeObjs...)
|
||||
return f
|
||||
}
|
||||
|
||||
// Build builds and returns a new fake client.
|
||||
func (f *ClientBuilder) Build() client.WithWatch {
|
||||
if f.scheme == nil {
|
||||
f.scheme = scheme.Scheme
|
||||
}
|
||||
if f.restMapper == nil {
|
||||
f.restMapper = meta.NewDefaultRESTMapper([]schema.GroupVersion{})
|
||||
}
|
||||
|
||||
tracker := versionedTracker{ObjectTracker: testing.NewObjectTracker(f.scheme, scheme.Codecs.UniversalDecoder()), scheme: f.scheme}
|
||||
for _, obj := range f.initObject {
|
||||
if err := tracker.Add(obj); err != nil {
|
||||
panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err))
|
||||
}
|
||||
}
|
||||
for _, obj := range f.initLists {
|
||||
if err := tracker.Add(obj); err != nil {
|
||||
panic(fmt.Errorf("failed to add list %v to fake client: %w", obj, err))
|
||||
}
|
||||
}
|
||||
for _, obj := range f.initRuntimeObjects {
|
||||
if err := tracker.Add(obj); err != nil {
|
||||
panic(fmt.Errorf("failed to add runtime object %v to fake client: %w", obj, err))
|
||||
}
|
||||
}
|
||||
return &fakeClient{
|
||||
tracker: tracker,
|
||||
scheme: f.scheme,
|
||||
restMapper: f.restMapper,
|
||||
}
|
||||
}
|
||||
|
||||
const trackerAddResourceVersion = "999"
|
||||
|
||||
func (t versionedTracker) Add(obj runtime.Object) error {
|
||||
var objects []runtime.Object
|
||||
if meta.IsListType(obj) {
|
||||
var err error
|
||||
objects, err = meta.ExtractList(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
objects = []runtime.Object{obj}
|
||||
}
|
||||
for _, obj := range objects {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get accessor for object: %w", err)
|
||||
}
|
||||
if accessor.GetResourceVersion() == "" {
|
||||
// We use a "magic" value of 999 here because this field
|
||||
// is parsed as uint and and 0 is already used in Update.
|
||||
// As we can't go lower, go very high instead so this can
|
||||
// be recognized
|
||||
accessor.SetResourceVersion(trackerAddResourceVersion)
|
||||
}
|
||||
|
||||
obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.ObjectTracker.Add(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get accessor for object: %v", err)
|
||||
}
|
||||
if accessor.GetName() == "" {
|
||||
return apierrors.NewInvalid(
|
||||
obj.GetObjectKind().GroupVersionKind().GroupKind(),
|
||||
accessor.GetName(),
|
||||
field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")})
|
||||
}
|
||||
if accessor.GetResourceVersion() != "" {
|
||||
return apierrors.NewBadRequest("resourceVersion can not be set for Create requests")
|
||||
}
|
||||
accessor.SetResourceVersion("1")
|
||||
obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil {
|
||||
accessor.SetResourceVersion("")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertFromUnstructuredIfNecessary will convert *unstructured.Unstructured for a GVK that is recocnized
|
||||
// by the schema into the whatever the schema produces with New() for said GVK.
|
||||
// This is required because the tracker unconditionally saves on manipulations, but it's List() implementation
|
||||
// tries to assign whatever it finds into a ListType it gets from schema.New() - Thus we have to ensure
|
||||
// we save as the very same type, otherwise subsequent List requests will fail.
|
||||
func convertFromUnstructuredIfNecessary(s *runtime.Scheme, o runtime.Object) (runtime.Object, error) {
|
||||
u, isUnstructured := o.(*unstructured.Unstructured)
|
||||
if !isUnstructured || !s.Recognizes(u.GroupVersionKind()) {
|
||||
return o, nil
|
||||
}
|
||||
|
||||
typed, err := s.New(u.GroupVersionKind())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scheme recognizes %s but failed to produce an object for it: %w", u.GroupVersionKind().String(), err)
|
||||
}
|
||||
|
||||
unstructuredSerialized, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to serialize %T: %w", unstructuredSerialized, err)
|
||||
}
|
||||
if err := json.Unmarshal(unstructuredSerialized, typed); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal the content of %T into %T: %w", u, typed, err)
|
||||
}
|
||||
|
||||
return typed, nil
|
||||
}
|
||||
|
||||
func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error {
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get accessor for object: %v", err)
|
||||
}
|
||||
|
||||
if accessor.GetName() == "" {
|
||||
return apierrors.NewInvalid(
|
||||
obj.GetObjectKind().GroupVersionKind().GroupKind(),
|
||||
accessor.GetName(),
|
||||
field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")})
|
||||
}
|
||||
|
||||
gvk := obj.GetObjectKind().GroupVersionKind()
|
||||
if gvk.Empty() {
|
||||
gvk, err = apiutil.GVKForObject(obj, t.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName())
|
||||
if err != nil {
|
||||
// If the resource is not found and the resource allows create on update, issue a
|
||||
// create instead.
|
||||
if apierrors.IsNotFound(err) && allowsCreateOnUpdate(gvk) {
|
||||
return t.Create(gvr, obj, ns)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
oldAccessor, err := meta.Accessor(oldObject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the new object does not have the resource version set and it allows unconditional update,
|
||||
// default it to the resource version of the existing resource
|
||||
if accessor.GetResourceVersion() == "" && allowsUnconditionalUpdate(gvk) {
|
||||
accessor.SetResourceVersion(oldAccessor.GetResourceVersion())
|
||||
}
|
||||
if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() {
|
||||
return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified"))
|
||||
}
|
||||
if oldAccessor.GetResourceVersion() == "" {
|
||||
oldAccessor.SetResourceVersion("0")
|
||||
}
|
||||
intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can not convert resourceVersion %q to int: %v", oldAccessor.GetResourceVersion(), err)
|
||||
}
|
||||
intResourceVersion++
|
||||
accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10))
|
||||
if !accessor.GetDeletionTimestamp().IsZero() && len(accessor.GetFinalizers()) == 0 {
|
||||
return t.ObjectTracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName())
|
||||
}
|
||||
obj, err = convertFromUnstructuredIfNecessary(t.scheme, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.ObjectTracker.Update(gvr, obj, ns)
|
||||
}
|
||||
|
||||
func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object) error {
|
||||
gvr, err := getGVRFromObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o, err := c.tracker.Get(gvr, key.Namespace, key.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gvk, err := apiutil.GVKForObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ta, err := meta.TypeAccessor(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ta.SetKind(gvk.Kind)
|
||||
ta.SetAPIVersion(gvk.GroupVersion().String())
|
||||
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decoder := scheme.Codecs.UniversalDecoder()
|
||||
zero(obj)
|
||||
_, _, err = decoder.Decode(j, nil, obj)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeClient) Watch(ctx context.Context, list client.ObjectList, opts ...client.ListOption) (watch.Interface, error) {
|
||||
gvk, err := apiutil.GVKForObject(list, c.scheme)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
|
||||
}
|
||||
|
||||
listOpts := client.ListOptions{}
|
||||
listOpts.ApplyOptions(opts)
|
||||
|
||||
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
|
||||
return c.tracker.Watch(gvr, listOpts.Namespace)
|
||||
}
|
||||
|
||||
func (c *fakeClient) List(ctx context.Context, obj client.ObjectList, opts ...client.ListOption) error {
|
||||
gvk, err := apiutil.GVKForObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
originalKind := gvk.Kind
|
||||
|
||||
if strings.HasSuffix(gvk.Kind, "List") {
|
||||
gvk.Kind = gvk.Kind[:len(gvk.Kind)-4]
|
||||
}
|
||||
|
||||
if _, isUnstructuredList := obj.(*unstructured.UnstructuredList); isUnstructuredList && !c.scheme.Recognizes(gvk) {
|
||||
// We need to register the ListKind with UnstructuredList:
|
||||
// https://github.com/kubernetes/kubernetes/blob/7b2776b89fb1be28d4e9203bdeec079be903c103/staging/src/k8s.io/client-go/dynamic/fake/simple.go#L44-L51
|
||||
c.schemeWriteLock.Lock()
|
||||
c.scheme.AddKnownTypeWithName(gvk.GroupVersion().WithKind(gvk.Kind+"List"), &unstructured.UnstructuredList{})
|
||||
c.schemeWriteLock.Unlock()
|
||||
}
|
||||
|
||||
listOpts := client.ListOptions{}
|
||||
listOpts.ApplyOptions(opts)
|
||||
|
||||
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
|
||||
o, err := c.tracker.List(gvr, gvk, listOpts.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ta, err := meta.TypeAccessor(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ta.SetKind(originalKind)
|
||||
ta.SetAPIVersion(gvk.GroupVersion().String())
|
||||
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decoder := scheme.Codecs.UniversalDecoder()
|
||||
zero(obj)
|
||||
_, _, err = decoder.Decode(j, nil, obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if listOpts.LabelSelector != nil {
|
||||
objs, err := meta.ExtractList(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = meta.SetList(obj, filteredObjs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeClient) Scheme() *runtime.Scheme {
|
||||
return c.scheme
|
||||
}
|
||||
|
||||
func (c *fakeClient) RESTMapper() meta.RESTMapper {
|
||||
return c.restMapper
|
||||
}
|
||||
|
||||
func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
|
||||
createOptions := &client.CreateOptions{}
|
||||
createOptions.ApplyOptions(opts)
|
||||
|
||||
for _, dryRunOpt := range createOptions.DryRun {
|
||||
if dryRunOpt == metav1.DryRunAll {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
gvr, err := getGVRFromObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if accessor.GetName() == "" && accessor.GetGenerateName() != "" {
|
||||
base := accessor.GetGenerateName()
|
||||
if len(base) > maxGeneratedNameLength {
|
||||
base = base[:maxGeneratedNameLength]
|
||||
}
|
||||
accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength)))
|
||||
}
|
||||
|
||||
return c.tracker.Create(gvr, obj, accessor.GetNamespace())
|
||||
}
|
||||
|
||||
func (c *fakeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
|
||||
gvr, err := getGVRFromObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
delOptions := client.DeleteOptions{}
|
||||
delOptions.ApplyOptions(opts)
|
||||
|
||||
// Check the ResourceVersion if that Precondition was specified.
|
||||
if delOptions.Preconditions != nil && delOptions.Preconditions.ResourceVersion != nil {
|
||||
name := accessor.GetName()
|
||||
dbObj, err := c.tracker.Get(gvr, accessor.GetNamespace(), name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldAccessor, err := meta.Accessor(dbObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actualRV := oldAccessor.GetResourceVersion()
|
||||
expectRV := *delOptions.Preconditions.ResourceVersion
|
||||
if actualRV != expectRV {
|
||||
msg := fmt.Sprintf(
|
||||
"the ResourceVersion in the precondition (%s) does not match the ResourceVersion in record (%s). "+
|
||||
"The object might have been modified",
|
||||
expectRV, actualRV)
|
||||
return apierrors.NewConflict(gvr.GroupResource(), name, errors.New(msg))
|
||||
}
|
||||
}
|
||||
|
||||
return c.deleteObject(gvr, accessor)
|
||||
}
|
||||
|
||||
func (c *fakeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {
|
||||
gvk, err := apiutil.GVKForObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dcOptions := client.DeleteAllOfOptions{}
|
||||
dcOptions.ApplyOptions(opts)
|
||||
|
||||
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
|
||||
o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objs, err := meta.ExtractList(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, o := range filteredObjs {
|
||||
accessor, err := meta.Accessor(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = c.deleteObject(gvr, accessor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
|
||||
updateOptions := &client.UpdateOptions{}
|
||||
updateOptions.ApplyOptions(opts)
|
||||
|
||||
for _, dryRunOpt := range updateOptions.DryRun {
|
||||
if dryRunOpt == metav1.DryRunAll {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
gvr, err := getGVRFromObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.tracker.Update(gvr, obj, accessor.GetNamespace())
|
||||
}
|
||||
|
||||
func (c *fakeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
|
||||
patchOptions := &client.PatchOptions{}
|
||||
patchOptions.ApplyOptions(opts)
|
||||
|
||||
for _, dryRunOpt := range patchOptions.DryRun {
|
||||
if dryRunOpt == metav1.DryRunAll {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
gvr, err := getGVRFromObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
accessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := patch.Data(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
reaction := testing.ObjectReaction(c.tracker)
|
||||
handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !handled {
|
||||
panic("tracker could not handle patch method")
|
||||
}
|
||||
|
||||
gvk, err := apiutil.GVKForObject(obj, c.scheme)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ta, err := meta.TypeAccessor(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ta.SetKind(gvk.Kind)
|
||||
ta.SetAPIVersion(gvk.GroupVersion().String())
|
||||
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
decoder := scheme.Codecs.UniversalDecoder()
|
||||
zero(obj)
|
||||
_, _, err = decoder.Decode(j, nil, obj)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fakeClient) Status() client.StatusWriter {
|
||||
return &fakeStatusWriter{client: c}
|
||||
}
|
||||
|
||||
func (c *fakeClient) deleteObject(gvr schema.GroupVersionResource, accessor metav1.Object) error {
|
||||
old, err := c.tracker.Get(gvr, accessor.GetNamespace(), accessor.GetName())
|
||||
if err == nil {
|
||||
oldAccessor, err := meta.Accessor(old)
|
||||
if err == nil {
|
||||
if len(oldAccessor.GetFinalizers()) > 0 {
|
||||
now := metav1.Now()
|
||||
oldAccessor.SetDeletionTimestamp(&now)
|
||||
return c.tracker.Update(gvr, old, accessor.GetNamespace())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: implement propagation
|
||||
return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName())
|
||||
}
|
||||
|
||||
func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) {
|
||||
gvk, err := apiutil.GVKForObject(obj, scheme)
|
||||
if err != nil {
|
||||
return schema.GroupVersionResource{}, err
|
||||
}
|
||||
gvr, _ := meta.UnsafeGuessKindToResource(gvk)
|
||||
return gvr, nil
|
||||
}
|
||||
|
||||
type fakeStatusWriter struct {
|
||||
client *fakeClient
|
||||
}
|
||||
|
||||
func (sw *fakeStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
|
||||
// TODO(droot): This results in full update of the obj (spec + status). Need
|
||||
// a way to update status field only.
|
||||
return sw.client.Update(ctx, obj, opts...)
|
||||
}
|
||||
|
||||
func (sw *fakeStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
|
||||
// TODO(droot): This results in full update of the obj (spec + status). Need
|
||||
// a way to update status field only.
|
||||
return sw.client.Patch(ctx, obj, patch, opts...)
|
||||
}
|
||||
|
||||
func allowsUnconditionalUpdate(gvk schema.GroupVersionKind) bool {
|
||||
switch gvk.Group {
|
||||
case "apps":
|
||||
switch gvk.Kind {
|
||||
case "ControllerRevision", "DaemonSet", "Deployment", "ReplicaSet", "StatefulSet":
|
||||
return true
|
||||
}
|
||||
case "autoscaling":
|
||||
switch gvk.Kind {
|
||||
case "HorizontalPodAutoscaler":
|
||||
return true
|
||||
}
|
||||
case "batch":
|
||||
switch gvk.Kind {
|
||||
case "CronJob", "Job":
|
||||
return true
|
||||
}
|
||||
case "certificates":
|
||||
switch gvk.Kind {
|
||||
case "Certificates":
|
||||
return true
|
||||
}
|
||||
case "flowcontrol":
|
||||
switch gvk.Kind {
|
||||
case "FlowSchema", "PriorityLevelConfiguration":
|
||||
return true
|
||||
}
|
||||
case "networking":
|
||||
switch gvk.Kind {
|
||||
case "Ingress", "IngressClass", "NetworkPolicy":
|
||||
return true
|
||||
}
|
||||
case "policy":
|
||||
switch gvk.Kind {
|
||||
case "PodSecurityPolicy":
|
||||
return true
|
||||
}
|
||||
case "rbac":
|
||||
switch gvk.Kind {
|
||||
case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding":
|
||||
return true
|
||||
}
|
||||
case "scheduling":
|
||||
switch gvk.Kind {
|
||||
case "PriorityClass":
|
||||
return true
|
||||
}
|
||||
case "settings":
|
||||
switch gvk.Kind {
|
||||
case "PodPreset":
|
||||
return true
|
||||
}
|
||||
case "storage":
|
||||
switch gvk.Kind {
|
||||
case "StorageClass":
|
||||
return true
|
||||
}
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
case "ConfigMap", "Endpoint", "Event", "LimitRange", "Namespace", "Node",
|
||||
"PersistentVolume", "PersistentVolumeClaim", "Pod", "PodTemplate",
|
||||
"ReplicationController", "ResourceQuota", "Secret", "Service",
|
||||
"ServiceAccount", "EndpointSlice":
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func allowsCreateOnUpdate(gvk schema.GroupVersionKind) bool {
|
||||
switch gvk.Group {
|
||||
case "coordination":
|
||||
switch gvk.Kind {
|
||||
case "Lease":
|
||||
return true
|
||||
}
|
||||
case "node":
|
||||
switch gvk.Kind {
|
||||
case "RuntimeClass":
|
||||
return true
|
||||
}
|
||||
case "rbac":
|
||||
switch gvk.Kind {
|
||||
case "ClusterRole", "ClusterRoleBinding", "Role", "RoleBinding":
|
||||
return true
|
||||
}
|
||||
case "":
|
||||
switch gvk.Kind {
|
||||
case "Endpoint", "Event", "LimitRange", "Service":
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// zero zeros the value of a pointer.
|
||||
func zero(x interface{}) {
|
||||
if x == nil {
|
||||
return
|
||||
}
|
||||
res := reflect.ValueOf(x).Elem()
|
||||
res.Set(reflect.Zero(res.Type()))
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package fake provides a fake client for testing.
|
||||
|
||||
A fake client is backed by its simple object store indexed by GroupVersionResource.
|
||||
You can create a fake client with optional objects.
|
||||
|
||||
client := NewFakeClientWithScheme(scheme, initObjs...) // initObjs is a slice of runtime.Object
|
||||
|
||||
You can invoke the methods defined in the Client interface.
|
||||
|
||||
When in doubt, it's almost always better not to use this package and instead use
|
||||
envtest.Environment with a real client and API server.
|
||||
|
||||
WARNING: ⚠️ Current Limitations / Known Issues with the fake Client ⚠️
|
||||
- This client does not have a way to inject specific errors to test handled vs. unhandled errors.
|
||||
- There is some support for sub resources which can cause issues with tests if you're trying to update
|
||||
e.g. metadata and status in the same reconcile.
|
||||
- No OpeanAPI validation is performed when creating or updating objects.
|
||||
- ObjectMeta's `Generation` and `ResourceVersion` don't behave properly, Patch or Update
|
||||
operations that rely on these fields will fail, or give false positives.
|
||||
|
||||
*/
|
||||
package fake
|
Loading…
Reference in New Issue