add replicas syncer for resources with HPA

Signed-off-by: lxtywypc <lxtywypc@gmail.com>
This commit is contained in:
lxtywypc 2023-09-20 17:29:34 +08:00
parent 5c77f4516e
commit 5ae8178ec3
5 changed files with 579 additions and 1 deletions

View File

@ -15,6 +15,7 @@ import (
"k8s.io/client-go/informers"
kubeclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/scale"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/term"
"k8s.io/klog/v2"
@ -44,6 +45,7 @@ import (
metricsclient "github.com/karmada-io/karmada/pkg/controllers/federatedhpa/metrics"
"github.com/karmada-io/karmada/pkg/controllers/federatedresourcequota"
"github.com/karmada-io/karmada/pkg/controllers/gracefuleviction"
"github.com/karmada-io/karmada/pkg/controllers/hpareplicassyncer"
"github.com/karmada-io/karmada/pkg/controllers/mcs"
"github.com/karmada-io/karmada/pkg/controllers/namespace"
"github.com/karmada-io/karmada/pkg/controllers/status"
@ -185,7 +187,7 @@ func Run(ctx context.Context, opts *options.Options) error {
var controllers = make(controllerscontext.Initializers)
// controllersDisabledByDefault is the set of controllers which is disabled by default
var controllersDisabledByDefault = sets.New("")
var controllersDisabledByDefault = sets.New("hpaReplicasSyncer")
func init() {
controllers["cluster"] = startClusterController
@ -205,6 +207,7 @@ func init() {
controllers["applicationFailover"] = startApplicationFailoverController
controllers["federatedHorizontalPodAutoscaler"] = startFederatedHorizontalPodAutoscalerController
controllers["cronFederatedHorizontalPodAutoscaler"] = startCronFederatedHorizontalPodAutoscalerController
controllers["hpaReplicasSyncer"] = startHPAReplicasSyncerController
}
func startClusterController(ctx controllerscontext.Context) (enabled bool, err error) {
@ -591,6 +594,26 @@ func startCronFederatedHorizontalPodAutoscalerController(ctx controllerscontext.
return true, nil
}
func startHPAReplicasSyncerController(ctx controllerscontext.Context) (enabled bool, err error) {
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(ctx.KubeClientSet.Discovery())
scaleClient, err := scale.NewForConfig(ctx.Mgr.GetConfig(), ctx.Mgr.GetRESTMapper(), dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
if err != nil {
return false, err
}
hpaReplicasSyncer := hpareplicassyncer.HPAReplicasSyncer{
Client: ctx.Mgr.GetClient(),
RESTMapper: ctx.Mgr.GetRESTMapper(),
ScaleClient: scaleClient,
}
err = hpaReplicasSyncer.SetupWithManager(ctx.Mgr)
if err != nil {
return false, err
}
return true, nil
}
// setupControllers initialize controllers and setup one by one.
func setupControllers(mgr controllerruntime.Manager, opts *options.Options, stopChan <-chan struct{}) {
restConfig := mgr.GetConfig()

View File

@ -0,0 +1,148 @@
package hpareplicassyncer
import (
"context"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/scale"
"k8s.io/klog/v2"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
)
var hpaPredicate = predicate.Funcs{
CreateFunc: func(e event.CreateEvent) bool {
return false
},
UpdateFunc: func(e event.UpdateEvent) bool {
oldHPA, ok := e.ObjectOld.(*autoscalingv2.HorizontalPodAutoscaler)
if !ok {
return false
}
newHPA, ok := e.ObjectNew.(*autoscalingv2.HorizontalPodAutoscaler)
if !ok {
return false
}
return oldHPA.Status.CurrentReplicas != newHPA.Status.CurrentReplicas
},
DeleteFunc: func(e event.DeleteEvent) bool {
return false
},
}
// HPAReplicasSyncer is to sync replicas from status of HPA to resource template.
type HPAReplicasSyncer struct {
Client client.Client
RESTMapper meta.RESTMapper
ScaleClient scale.ScalesGetter
}
// SetupWithManager creates a controller and register to controller manager.
func (r *HPAReplicasSyncer) SetupWithManager(mgr controllerruntime.Manager) error {
return controllerruntime.NewControllerManagedBy(mgr).Named("replicas-syncer").
For(&autoscalingv2.HorizontalPodAutoscaler{}, builder.WithPredicates(hpaPredicate)).
Complete(r)
}
// Reconcile performs a full reconciliation for the object referred to by the Request.
// The Controller will requeue the Request to be processed again if an error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *HPAReplicasSyncer) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
klog.V(4).Infof("Reconciling for HPA %s/%s", req.Namespace, req.Name)
hpa := &autoscalingv2.HorizontalPodAutoscaler{}
err := r.Client.Get(ctx, req.NamespacedName, hpa)
if err != nil {
if apierrors.IsNotFound(err) {
return controllerruntime.Result{}, nil
}
return controllerruntime.Result{}, err
}
workloadGR, scale, err := r.getGroupResourceAndScaleForWorkloadFromHPA(ctx, hpa)
if err != nil {
return controllerruntime.Result{}, err
}
err = r.updateScaleIfNeed(ctx, workloadGR, scale.DeepCopy(), hpa)
if err != nil {
return controllerruntime.Result{}, err
}
// TODO(@lxtywypc): Add finalizer for HPA and remove them
// when the HPA is deleting and the replicas have been synced.
return controllerruntime.Result{}, nil
}
// getGroupResourceAndScaleForWorkloadFromHPA parses GroupResource and get Scale
// of the workload declared in spec.scaleTargetRef of HPA.
func (r *HPAReplicasSyncer) getGroupResourceAndScaleForWorkloadFromHPA(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler,
) (schema.GroupResource, *autoscalingv1.Scale, error) {
gvk := schema.FromAPIVersionAndKind(hpa.Spec.ScaleTargetRef.APIVersion, hpa.Spec.ScaleTargetRef.Kind)
mapping, err := r.RESTMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
klog.Errorf("Failed to get group resource for resource(kind=%s, %s/%s): %v",
hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, err)
return schema.GroupResource{}, nil, err
}
gr := mapping.Resource.GroupResource()
scale, err := r.ScaleClient.Scales(hpa.Namespace).Get(ctx, gr, hpa.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
// If the scale of workload is not found, skip processing.
return gr, nil, nil
}
klog.Errorf("Failed to get scale for resource(kind=%s, %s/%s): %v",
hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, err)
return schema.GroupResource{}, nil, err
}
return gr, scale, nil
}
// updateScaleIfNeed would update the scale of workload on fed-control plane
// if the replicas declared in the workload on karmada-control-plane does not match
// the actual replicas in member clusters effected by HPA.
func (r *HPAReplicasSyncer) updateScaleIfNeed(ctx context.Context, workloadGR schema.GroupResource, scale *autoscalingv1.Scale, hpa *autoscalingv2.HorizontalPodAutoscaler) error {
// If the scale of workload is not found, skip processing.
if scale == nil {
klog.V(4).Infof("Scale of resource(kind=%s, %s/%s) not found, the resource might have been removed, skip",
hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
return nil
}
if scale.Spec.Replicas != hpa.Status.CurrentReplicas {
oldReplicas := scale.Spec.Replicas
scale.Spec.Replicas = hpa.Status.CurrentReplicas
_, err := r.ScaleClient.Scales(hpa.Namespace).Update(ctx, workloadGR, scale, metav1.UpdateOptions{})
if err != nil {
klog.Errorf("Failed to try to sync scale for resource(kind=%s, %s/%s) from %d to %d: %v",
hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, oldReplicas, hpa.Status.CurrentReplicas, err)
return err
}
klog.V(4).Infof("Successfully synced scale for resource(kind=%s, %s/%s) from %d to %d",
hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, oldReplicas, hpa.Status.CurrentReplicas)
}
return nil
}

View File

@ -0,0 +1,325 @@
package hpareplicassyncer
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
scalefake "k8s.io/client-go/scale/fake"
coretesting "k8s.io/client-go/testing"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1"
"github.com/karmada-io/karmada/pkg/util/gclient"
)
func TestGetGroupResourceAndScaleForWorkloadFromHPA(t *testing.T) {
deployment := newDeployment("deployment-1", 1)
workload := newWorkload("workload-1", 1)
syncer := newHPAReplicasSyncer(deployment, workload)
cases := []struct {
name string
hpa *autoscalingv2.HorizontalPodAutoscaler
expectedError bool
expectedScale bool
expectedGR schema.GroupResource
}{
{
name: "normal case",
hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 0),
expectedError: false,
expectedScale: true,
expectedGR: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"},
},
{
name: "customized resource case",
hpa: newHPA(workloadv1alpha1.SchemeGroupVersion.String(), "Workload", "workload-1", 0),
expectedError: false,
expectedScale: true,
expectedGR: schema.GroupResource{Group: workloadv1alpha1.SchemeGroupVersion.Group, Resource: "workloads"},
},
{
name: "scale not found",
hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-2", 0),
expectedError: false,
expectedScale: false,
expectedGR: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"},
},
{
name: "resource not registered",
hpa: newHPA("fake/v1", "FakeWorkload", "fake-workload-1", 0),
expectedError: true,
expectedScale: false,
expectedGR: schema.GroupResource{},
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
gr, scale, err := syncer.getGroupResourceAndScaleForWorkloadFromHPA(context.TODO(), tt.hpa)
if tt.expectedError {
assert.NotEmpty(t, err)
return
}
assert.Empty(t, err)
if tt.expectedScale {
assert.NotEmpty(t, scale)
} else {
assert.Empty(t, scale)
}
assert.Equal(t, tt.expectedGR, gr)
})
}
}
func TestUpdateScaleIfNeed(t *testing.T) {
cases := []struct {
name string
object client.Object
gr schema.GroupResource
scale *autoscalingv1.Scale
hpa *autoscalingv2.HorizontalPodAutoscaler
expectedError bool
}{
{
name: "normal case",
object: newDeployment("deployment-1", 0),
gr: schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"},
scale: newScale("deployment-1", 0),
hpa: newHPA(appsv1.SchemeGroupVersion.String(), "Deployment", "deployment-1", 3),
expectedError: false,
},
{
name: "custom resource case",
object: newWorkload("workload-1", 0),
gr: schema.GroupResource{Group: workloadv1alpha1.SchemeGroupVersion.Group, Resource: "workloads"},
scale: newScale("workload-1", 0),
hpa: newHPA(workloadv1alpha1.SchemeGroupVersion.String(), "Workload", "workload-1", 3),
expectedError: false,
},
{
name: "scale not found",
object: newDeployment("deployment-1", 0),
gr: schema.GroupResource{Group: "fake", Resource: "fakeworkloads"},
scale: newScale("fake-workload-1", 0),
hpa: newHPA("fake/v1", "FakeWorkload", "fake-workload-1", 3),
expectedError: true,
},
}
for _, tt := range cases {
t.Run(tt.name, func(t *testing.T) {
syncer := newHPAReplicasSyncer(tt.object)
err := syncer.updateScaleIfNeed(context.TODO(), tt.gr, tt.scale, tt.hpa)
if tt.expectedError {
assert.NotEmpty(t, err)
return
}
assert.Empty(t, err)
obj := &unstructured.Unstructured{}
obj.SetAPIVersion(tt.hpa.Spec.ScaleTargetRef.APIVersion)
obj.SetKind(tt.hpa.Spec.ScaleTargetRef.Kind)
err = syncer.Client.Get(context.TODO(), types.NamespacedName{Namespace: tt.scale.Namespace, Name: tt.scale.Name}, obj)
assert.Empty(t, err)
if err != nil {
return
}
scale, err := getScaleFromUnstructured(obj)
assert.Empty(t, err)
if err != nil {
return
}
assert.Equal(t, tt.hpa.Status.CurrentReplicas, scale.Spec.Replicas)
})
}
}
func newHPAReplicasSyncer(objs ...client.Object) *HPAReplicasSyncer {
scheme := gclient.NewSchema()
_ = workloadv1alpha1.AddToScheme(scheme)
fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build()
fakeMapper := newMapper()
fakeScaleClient := &scalefake.FakeScaleClient{}
fakeScaleClient.AddReactor("get", "*", reactionFuncForGetting(fakeClient, fakeMapper))
fakeScaleClient.AddReactor("update", "*", reactionFuncForUpdating(fakeClient, fakeMapper))
return &HPAReplicasSyncer{
Client: fakeClient,
RESTMapper: fakeMapper,
ScaleClient: fakeScaleClient,
}
}
func reactionFuncForGetting(c client.Client, mapper meta.RESTMapper) coretesting.ReactionFunc {
return func(action coretesting.Action) (bool, runtime.Object, error) {
getAction, ok := action.(coretesting.GetAction)
if !ok {
return false, nil, fmt.Errorf("Not GET Action!")
}
obj, err := newUnstructured(getAction.GetResource(), mapper)
if err != nil {
return true, nil, err
}
nn := types.NamespacedName{Namespace: getAction.GetNamespace(), Name: getAction.GetName()}
err = c.Get(context.TODO(), nn, obj)
if err != nil {
return true, nil, err
}
scale, err := getScaleFromUnstructured(obj)
return true, scale, err
}
}
func newUnstructured(gvr schema.GroupVersionResource, mapper meta.RESTMapper) (*unstructured.Unstructured, error) {
gvk, err := mapper.KindFor(gvr)
if err != nil {
return nil, err
}
un := &unstructured.Unstructured{}
un.SetGroupVersionKind(gvk)
return un, nil
}
func getScaleFromUnstructured(obj *unstructured.Unstructured) (*autoscalingv1.Scale, error) {
replicas := int32(0)
spec, ok := obj.Object["spec"].(map[string]interface{})
if ok {
replicas = int32(spec["replicas"].(int64))
}
return &autoscalingv1.Scale{
Spec: autoscalingv1.ScaleSpec{
Replicas: replicas,
},
Status: autoscalingv1.ScaleStatus{
Replicas: replicas,
},
}, nil
}
func reactionFuncForUpdating(c client.Client, mapper meta.RESTMapper) coretesting.ReactionFunc {
return func(action coretesting.Action) (bool, runtime.Object, error) {
updateAction, ok := action.(coretesting.UpdateAction)
if !ok {
return false, nil, fmt.Errorf("Not UPDATE Action!")
}
scale, ok := updateAction.GetObject().(*autoscalingv1.Scale)
if !ok {
return false, nil, fmt.Errorf("Not autoscalingv1.Scale Object!")
}
obj, err := newUnstructured(updateAction.GetResource(), mapper)
if err != nil {
return true, nil, err
}
nn := types.NamespacedName{Namespace: scale.Namespace, Name: scale.Name}
err = c.Get(context.TODO(), nn, obj)
if err != nil {
return true, nil, err
}
updateScaleForUnstructured(obj, scale)
return true, scale, c.Update(context.TODO(), obj)
}
}
func updateScaleForUnstructured(obj *unstructured.Unstructured, scale *autoscalingv1.Scale) {
spec, ok := obj.Object["spec"].(map[string]interface{})
if !ok {
spec = map[string]interface{}{}
obj.Object["spec"] = spec
}
spec["replicas"] = scale.Spec.Replicas
}
func newMapper() meta.RESTMapper {
m := meta.NewDefaultRESTMapper([]schema.GroupVersion{})
m.Add(appsv1.SchemeGroupVersion.WithKind("Deployment"), meta.RESTScopeNamespace)
m.Add(workloadv1alpha1.SchemeGroupVersion.WithKind("Workload"), meta.RESTScopeNamespace)
return m
}
func newDeployment(name string, replicas int32) *appsv1.Deployment {
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
},
}
}
func newWorkload(name string, replicas int32) *workloadv1alpha1.Workload {
return &workloadv1alpha1.Workload{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
Spec: workloadv1alpha1.WorkloadSpec{
Replicas: &replicas,
},
}
}
func newHPA(apiVersion, kind, name string, replicas int32) *autoscalingv2.HorizontalPodAutoscaler {
return &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
APIVersion: apiVersion,
Kind: kind,
Name: name,
},
},
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
CurrentReplicas: replicas,
},
}
}
func newScale(name string, replicas int32) *autoscalingv1.Scale {
return &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "default",
},
Spec: autoscalingv1.ScaleSpec{
Replicas: replicas,
},
}
}

81
vendor/k8s.io/client-go/scale/fake/client.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package fake provides a fake client interface to arbitrary Kubernetes
// APIs that exposes common high level operations and exposes common
// metadata.
package fake
import (
"context"
autoscalingapi "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/scale"
"k8s.io/client-go/testing"
)
// FakeScaleClient provides a fake implementation of scale.ScalesGetter.
type FakeScaleClient struct {
testing.Fake
}
func (f *FakeScaleClient) Scales(namespace string) scale.ScaleInterface {
return &fakeNamespacedScaleClient{
namespace: namespace,
fake: &f.Fake,
}
}
type fakeNamespacedScaleClient struct {
namespace string
fake *testing.Fake
}
func (f *fakeNamespacedScaleClient) Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscalingapi.Scale, error) {
obj, err := f.fake.
Invokes(testing.NewGetSubresourceAction(resource.WithVersion(""), f.namespace, "scale", name), &autoscalingapi.Scale{})
if err != nil {
return nil, err
}
return obj.(*autoscalingapi.Scale), err
}
func (f *fakeNamespacedScaleClient) Update(ctx context.Context, resource schema.GroupResource, scale *autoscalingapi.Scale, opts metav1.UpdateOptions) (*autoscalingapi.Scale, error) {
obj, err := f.fake.
Invokes(testing.NewUpdateSubresourceAction(resource.WithVersion(""), "scale", f.namespace, scale), &autoscalingapi.Scale{})
if err != nil {
return nil, err
}
return obj.(*autoscalingapi.Scale), err
}
func (f *fakeNamespacedScaleClient) Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, patch []byte, opts metav1.PatchOptions) (*autoscalingapi.Scale, error) {
obj, err := f.fake.
Invokes(testing.NewPatchSubresourceAction(gvr, f.namespace, name, pt, patch, "scale"), &autoscalingapi.Scale{})
if err != nil {
return nil, err
}
return obj.(*autoscalingapi.Scale), err
}

1
vendor/modules.txt vendored
View File

@ -1391,6 +1391,7 @@ k8s.io/client-go/rest/fake
k8s.io/client-go/rest/watch
k8s.io/client-go/restmapper
k8s.io/client-go/scale
k8s.io/client-go/scale/fake
k8s.io/client-go/scale/scheme
k8s.io/client-go/scale/scheme/appsint
k8s.io/client-go/scale/scheme/appsv1beta1