Remove deprecated ReplicaSchedulingPolicy API

Signed-off-by: Xinzhao Xu <z2d@jifangcheng.com>
This commit is contained in:
Xinzhao Xu 2021-12-24 17:19:18 +08:00
parent 21403e29f6
commit d7b593ce55
21 changed files with 53 additions and 1140 deletions

View File

@ -1,24 +0,0 @@
apiVersion: policy.karmada.io/v1alpha1
kind: ReplicaSchedulingPolicy
metadata:
name: foo
namespace: foons
spec:
resourceSelectors:
- apiVersion: apps/v1
kind: Deployment
namespace: foons
name: deployment-1
totalReplicas: 100
preferences:
staticWeightList:
- targetCluster:
labelSelector:
matchLabels:
location: us
weight: 1
- targetCluster:
labelSelector:
matchLabels:
location: cn
weight: 2

View File

@ -1,20 +0,0 @@
apiVersion: policy.karmada.io/v1alpha1
kind: ReplicaSchedulingPolicy
metadata:
name: foo
namespace: foons
spec:
resourceSelectors:
- apiVersion: apps/v1
kind: Deployment
namespace: foons
name: deployment-1
totalReplicas: 100
preferences:
staticWeightList:
- targetCluster:
clusterNames: [cluster1]
weight: 1
- targetCluster:
clusterNames: [cluster2]
weight: 2

View File

@ -1,268 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.6.2
creationTimestamp: null
name: replicaschedulingpolicies.policy.karmada.io
spec:
group: policy.karmada.io
names:
kind: ReplicaSchedulingPolicy
listKind: ReplicaSchedulingPolicyList
plural: replicaschedulingpolicies
shortNames:
- rsp
singular: replicaschedulingpolicy
scope: Namespaced
versions:
- deprecated: true
name: v1alpha1
schema:
openAPIV3Schema:
description: ReplicaSchedulingPolicy represents the policy that propagates
total number of replicas for deployment.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec represents the desired behavior of ReplicaSchedulingPolicy.
properties:
preferences:
description: Preferences describes weight for each cluster or for
each group of cluster.
properties:
dynamicWeight:
description: DynamicWeight specifies the factor to generates dynamic
weight list. If specified, StaticWeightList will be ignored.
enum:
- AvailableReplicas
type: string
staticWeightList:
description: StaticWeightList defines the static cluster weight.
items:
description: StaticClusterWeight defines the static cluster
weight.
properties:
targetCluster:
description: TargetCluster describes the filter to select
clusters.
properties:
clusterNames:
description: ClusterNames is the list of clusters to
be selected.
items:
type: string
type: array
exclude:
description: ExcludedClusters is the list of clusters
to be ignored.
items:
type: string
type: array
fieldSelector:
description: FieldSelector is a filter to select member
clusters by fields. If non-nil and non-empty, only
the clusters match this filter will be selected.
properties:
matchExpressions:
description: A list of field selector requirements.
items:
description: A node selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: The label key that the selector
applies to.
type: string
operator:
description: Represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If
the operator is In or NotIn, the values
array must be non-empty. If the operator
is Exists or DoesNotExist, the values array
must be empty. If the operator is Gt or
Lt, the values array must have a single
element, which will be interpreted as an
integer. This array is replaced during a
strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
type: object
labelSelector:
description: LabelSelector is a filter to select member
clusters by labels. If non-nil and non-empty, only
the clusters match this filter will be selected.
properties:
matchExpressions:
description: matchExpressions is a list of label
selector requirements. The requirements are ANDed.
items:
description: A label selector requirement is a
selector that contains values, a key, and an
operator that relates the key and values.
properties:
key:
description: key is the label key that the
selector applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are
In, NotIn, Exists and DoesNotExist.
type: string
values:
description: values is an array of string
values. If the operator is In or NotIn,
the values array must be non-empty. If the
operator is Exists or DoesNotExist, the
values array must be empty. This array is
replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value}
pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions,
whose key field is "key", the operator is "In",
and the values array contains only "value". The
requirements are ANDed.
type: object
type: object
type: object
weight:
description: Weight expressing the preference to the cluster(s)
specified by 'TargetCluster'.
format: int64
minimum: 1
type: integer
required:
- targetCluster
- weight
type: object
type: array
type: object
resourceSelectors:
description: ResourceSelectors used to select resources.
items:
description: ResourceSelector the resources will be selected.
properties:
apiVersion:
description: APIVersion represents the API version of the target
resources.
type: string
kind:
description: Kind represents the Kind of the target resources.
type: string
labelSelector:
description: A label query over a set of resources. If name
is not empty, labelSelector will be ignored.
properties:
matchExpressions:
description: matchExpressions is a list of label selector
requirements. The requirements are ANDed.
items:
description: A label selector requirement is a selector
that contains values, a key, and an operator that relates
the key and values.
properties:
key:
description: key is the label key that the selector
applies to.
type: string
operator:
description: operator represents a key's relationship
to a set of values. Valid operators are In, NotIn,
Exists and DoesNotExist.
type: string
values:
description: values is an array of string values.
If the operator is In or NotIn, the values array
must be non-empty. If the operator is Exists or
DoesNotExist, the values array must be empty. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
matchLabels:
additionalProperties:
type: string
description: matchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
name:
description: Name of the target resource. Default is empty,
which means selecting all resources.
type: string
namespace:
description: Namespace of the target resource. Default is empty,
which means inherit from the parent object scope.
type: string
required:
- apiVersion
- kind
type: object
type: array
totalReplicas:
description: TotalReplicas represents the total number of replicas
across member clusters. The replicas(spec.replicas) specified for
deployment template will be discarded.
format: int32
type: integer
required:
- preferences
- resourceSelectors
- totalReplicas
type: object
required:
- spec
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -5,7 +5,6 @@ resources:
- bases/policy.karmada.io_clusterpropagationpolicies.yaml
- bases/policy.karmada.io_overridepolicies.yaml
- bases/policy.karmada.io_propagationpolicies.yaml
- bases/policy.karmada.io_replicaschedulingpolicies.yaml
- bases/work.karmada.io_resourcebindings.yaml
- bases/work.karmada.io_clusterresourcebindings.yaml
- bases/work.karmada.io_works.yaml

View File

@ -217,6 +217,49 @@ type ReplicaSchedulingStrategy struct {
WeightPreference *ClusterPreferences `json:"weightPreference,omitempty"`
}
// ClusterPreferences describes weight for each cluster or for each group of cluster.
type ClusterPreferences struct {
// StaticWeightList defines the static cluster weight.
// +optional
StaticWeightList []StaticClusterWeight `json:"staticWeightList,omitempty"`
// DynamicWeight specifies the factor to generates dynamic weight list.
// If specified, StaticWeightList will be ignored.
// +kubebuilder:validation:Enum=AvailableReplicas
// +optional
DynamicWeight DynamicWeightFactor `json:"dynamicWeight,omitempty"`
}
// StaticClusterWeight defines the static cluster weight.
type StaticClusterWeight struct {
// TargetCluster describes the filter to select clusters.
// +required
TargetCluster ClusterAffinity `json:"targetCluster"`
// Weight expressing the preference to the cluster(s) specified by 'TargetCluster'.
// +kubebuilder:validation:Minimum=1
// +required
Weight int64 `json:"weight"`
}
// DynamicWeightFactor represents the weight factor.
// For now only support 'AvailableReplicas', more factors could be extended if there is a need.
type DynamicWeightFactor string
const (
// DynamicWeightByAvailableReplicas represents the cluster weight list should be generated according to
// available resource (available replicas).
// Example:
// The scheduler selected 3 clusters (A/B/C) and should divide 12 replicas to them.
// Workload:
// Desired replica: 12
// Cluster:
// A: Max available replica: 6
// B: Max available replica: 12
// C: Max available replica: 18
// The weight of cluster A:B:C will be 6:12:18 (equals to 1:2:3). At last, the assignment would be 'A: 2, B: 4, C: 6'.
DynamicWeightByAvailableReplicas DynamicWeightFactor = "AvailableReplicas"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PropagationPolicyList contains a list of PropagationPolicy.

View File

@ -1,85 +0,0 @@
package v1alpha1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:shortName=rsp
// +kubebuilder:deprecatedversion
// ReplicaSchedulingPolicy represents the policy that propagates total number of replicas for deployment.
type ReplicaSchedulingPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the desired behavior of ReplicaSchedulingPolicy.
Spec ReplicaSchedulingSpec `json:"spec"`
}
// ReplicaSchedulingSpec represents the desired behavior of ReplicaSchedulingPolicy.
type ReplicaSchedulingSpec struct {
// ResourceSelectors used to select resources.
// +required
ResourceSelectors []ResourceSelector `json:"resourceSelectors"`
// TotalReplicas represents the total number of replicas across member clusters.
// The replicas(spec.replicas) specified for deployment template will be discarded.
// +required
TotalReplicas int32 `json:"totalReplicas"`
// Preferences describes weight for each cluster or for each group of cluster.
// +required
Preferences ClusterPreferences `json:"preferences"`
}
// ClusterPreferences describes weight for each cluster or for each group of cluster.
type ClusterPreferences struct {
// StaticWeightList defines the static cluster weight.
// +optional
StaticWeightList []StaticClusterWeight `json:"staticWeightList,omitempty"`
// DynamicWeight specifies the factor to generates dynamic weight list.
// If specified, StaticWeightList will be ignored.
// +kubebuilder:validation:Enum=AvailableReplicas
// +optional
DynamicWeight DynamicWeightFactor `json:"dynamicWeight,omitempty"`
}
// StaticClusterWeight defines the static cluster weight.
type StaticClusterWeight struct {
// TargetCluster describes the filter to select clusters.
// +required
TargetCluster ClusterAffinity `json:"targetCluster"`
// Weight expressing the preference to the cluster(s) specified by 'TargetCluster'.
// +kubebuilder:validation:Minimum=1
// +required
Weight int64 `json:"weight"`
}
// DynamicWeightFactor represents the weight factor.
// For now only support 'AvailableReplicas', more factors could be extended if there is a need.
type DynamicWeightFactor string
const (
// DynamicWeightByAvailableReplicas represents the cluster weight list should be generated according to
// available resource (available replicas).
// Example:
// The scheduler selected 3 clusters (A/B/C) and should divide 12 replicas to them.
// Workload:
// Desired replica: 12
// Cluster:
// A: Max available replica: 6
// B: Max available replica: 12
// C: Max available replica: 18
// The weight of cluster A:B:C will be 6:12:18 (equals to 1:2:3). At last, the assignment would be 'A: 2, B: 4, C: 6'.
DynamicWeightByAvailableReplicas DynamicWeightFactor = "AvailableReplicas"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSchedulingPolicyList contains a list of ReplicaSchedulingPolicy.
type ReplicaSchedulingPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ReplicaSchedulingPolicy `json:"items"`
}

View File

@ -555,90 +555,6 @@ func (in *PropagationSpec) DeepCopy() *PropagationSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSchedulingPolicy) DeepCopyInto(out *ReplicaSchedulingPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSchedulingPolicy.
func (in *ReplicaSchedulingPolicy) DeepCopy() *ReplicaSchedulingPolicy {
if in == nil {
return nil
}
out := new(ReplicaSchedulingPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSchedulingPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSchedulingPolicyList) DeepCopyInto(out *ReplicaSchedulingPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ReplicaSchedulingPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSchedulingPolicyList.
func (in *ReplicaSchedulingPolicyList) DeepCopy() *ReplicaSchedulingPolicyList {
if in == nil {
return nil
}
out := new(ReplicaSchedulingPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ReplicaSchedulingPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSchedulingSpec) DeepCopyInto(out *ReplicaSchedulingSpec) {
*out = *in
if in.ResourceSelectors != nil {
in, out := &in.ResourceSelectors, &out.ResourceSelectors
*out = make([]ResourceSelector, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
in.Preferences.DeepCopyInto(&out.Preferences)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSchedulingSpec.
func (in *ReplicaSchedulingSpec) DeepCopy() *ReplicaSchedulingSpec {
if in == nil {
return nil
}
out := new(ReplicaSchedulingSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ReplicaSchedulingStrategy) DeepCopyInto(out *ReplicaSchedulingStrategy) {
*out = *in

View File

@ -50,8 +50,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&OverridePolicyList{},
&PropagationPolicy{},
&PropagationPolicyList{},
&ReplicaSchedulingPolicy{},
&ReplicaSchedulingPolicyList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)

View File

@ -169,7 +169,6 @@ func (c *ResourceBindingController) SetupWithManager(mgr controllerruntime.Manag
Watches(&source.Kind{Type: &workv1alpha1.Work{}}, handler.EnqueueRequestsFromMapFunc(workFn), workPredicateFn).
Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
Watches(&source.Kind{Type: &policyv1alpha1.ReplicaSchedulingPolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newReplicaSchedulingPolicyFunc())).
Complete(c)
}
@ -210,32 +209,3 @@ func (c *ResourceBindingController) newOverridePolicyFunc() handler.MapFunc {
return requests
}
}
func (c *ResourceBindingController) newReplicaSchedulingPolicyFunc() handler.MapFunc {
return func(a client.Object) []reconcile.Request {
rspResourceSelectors := a.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors
bindingList := &workv1alpha2.ResourceBindingList{}
if err := c.Client.List(context.TODO(), bindingList); err != nil {
klog.Errorf("Failed to list resourceBindings, error: %v", err)
return nil
}
var requests []reconcile.Request
for _, binding := range bindingList.Items {
workload, err := helper.FetchWorkload(c.DynamicClient, c.InformerManager, c.RESTMapper, binding.Spec.Resource)
if err != nil {
klog.Errorf("Failed to fetch workload for resourceBinding(%s/%s). Error: %v.", binding.Namespace, binding.Name, err)
return nil
}
for _, rs := range rspResourceSelectors {
if util.ResourceMatches(workload, rs) {
klog.V(2).Infof("Enqueue ResourceBinding(%s/%s) as replica scheduling policy(%s/%s) changes.", binding.Namespace, binding.Name, a.GetNamespace(), a.GetName())
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: binding.Namespace, Name: binding.Name}})
break
}
}
}
return requests
}
}

View File

@ -158,7 +158,6 @@ func (c *ClusterResourceBindingController) SetupWithManager(mgr controllerruntim
Watches(&source.Kind{Type: &workv1alpha1.Work{}}, handler.EnqueueRequestsFromMapFunc(workFn), workPredicateFn).
Watches(&source.Kind{Type: &policyv1alpha1.OverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
Watches(&source.Kind{Type: &policyv1alpha1.ClusterOverridePolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())).
Watches(&source.Kind{Type: &policyv1alpha1.ReplicaSchedulingPolicy{}}, handler.EnqueueRequestsFromMapFunc(c.newReplicaSchedulingPolicyFunc())).
Complete(c)
}
@ -199,32 +198,3 @@ func (c *ClusterResourceBindingController) newOverridePolicyFunc() handler.MapFu
return requests
}
}
func (c *ClusterResourceBindingController) newReplicaSchedulingPolicyFunc() handler.MapFunc {
return func(a client.Object) []reconcile.Request {
rspResourceSelectors := a.(*policyv1alpha1.ReplicaSchedulingPolicy).Spec.ResourceSelectors
bindingList := &workv1alpha2.ClusterResourceBindingList{}
if err := c.Client.List(context.TODO(), bindingList); err != nil {
klog.Errorf("Failed to list clusterResourceBindings, error: %v", err)
return nil
}
var requests []reconcile.Request
for _, binding := range bindingList.Items {
workload, err := helper.FetchWorkload(c.DynamicClient, c.InformerManager, c.RESTMapper, binding.Spec.Resource)
if err != nil {
klog.Errorf("Failed to fetch workload for clusterResourceBinding(%s). Error: %v.", binding.Name, err)
return nil
}
for _, rs := range rspResourceSelectors {
if util.ResourceMatches(workload, rs) {
klog.V(2).Infof("Enqueue ClusterResourceBinding(%s) as replica scheduling policy(%s/%s) changes.", binding.Name, a.GetNamespace(), a.GetName())
requests = append(requests, reconcile.Request{NamespacedName: types.NamespacedName{Name: binding.Name}})
break
}
}
}
return requests
}
}

View File

@ -1,9 +1,7 @@
package binding
import (
"context"
"reflect"
"sort"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -14,9 +12,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
"github.com/karmada-io/karmada/pkg/resourceinterpreter"
@ -74,10 +70,7 @@ func ensureWork(
targetClusters = bindingObj.Spec.Clusters
}
hasScheduledReplica, _, desireReplicaInfos, err := getRSPAndReplicaInfos(c, workload, targetClusters)
if err != nil {
return err
}
hasScheduledReplica, desireReplicaInfos := getReplicaInfos(targetClusters)
var jobCompletions []workv1alpha2.TargetCluster
var jobHasCompletions = false
@ -150,18 +143,19 @@ func ensureWork(
return nil
}
func getRSPAndReplicaInfos(c client.Client, workload *unstructured.Unstructured, targetClusters []workv1alpha2.TargetCluster) (bool, *policyv1alpha1.ReplicaSchedulingPolicy, map[string]int64, error) {
func getReplicaInfos(targetClusters []workv1alpha2.TargetCluster) (bool, map[string]int64) {
if helper.HasScheduledReplica(targetClusters) {
return true, nil, transScheduleResultToMap(targetClusters), nil
return true, transScheduleResultToMap(targetClusters)
}
return false, nil
}
referenceRSP, desireReplicaInfos, err := calculateReplicasIfNeeded(c, workload, helper.GetBindingClusterNames(targetClusters))
if err != nil {
klog.Errorf("Failed to get ReplicaSchedulingPolicy for %s/%s/%s, err is: %v", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
return false, nil, nil, err
func transScheduleResultToMap(scheduleResult []workv1alpha2.TargetCluster) map[string]int64 {
var desireReplicaInfos = make(map[string]int64, len(scheduleResult))
for _, clusterInfo := range scheduleResult {
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas)
}
return false, referenceRSP, desireReplicaInfos, nil
return desireReplicaInfos
}
func mergeLabel(workload *unstructured.Unstructured, workNamespace string, binding metav1.Object, scope apiextensionsv1.ResourceScope) map[string]string {
@ -221,122 +215,3 @@ func recordAppliedOverrides(cops *overridemanager.AppliedOverrides, ops *overrid
return annotations, nil
}
func transScheduleResultToMap(scheduleResult []workv1alpha2.TargetCluster) map[string]int64 {
var desireReplicaInfos = make(map[string]int64, len(scheduleResult))
for _, clusterInfo := range scheduleResult {
desireReplicaInfos[clusterInfo.Name] = int64(clusterInfo.Replicas)
}
return desireReplicaInfos
}
func calculateReplicasIfNeeded(c client.Client, workload *unstructured.Unstructured, clusterNames []string) (*policyv1alpha1.ReplicaSchedulingPolicy, map[string]int64, error) {
var err error
var referenceRSP *policyv1alpha1.ReplicaSchedulingPolicy
var desireReplicaInfos = make(map[string]int64)
if workload.GetKind() == util.DeploymentKind {
referenceRSP, err = matchReplicaSchedulingPolicy(c, workload)
if err != nil {
return nil, nil, err
}
if referenceRSP != nil {
desireReplicaInfos, err = calculateReplicas(c, referenceRSP, clusterNames)
if err != nil {
klog.Errorf("Failed to get desire replicas for %s/%s/%s, err is: %v", workload.GetKind(), workload.GetNamespace(), workload.GetName(), err)
return nil, nil, err
}
klog.V(4).Infof("DesireReplicaInfos with replica scheduling policies(%s/%s) is %v", referenceRSP.Namespace, referenceRSP.Name, desireReplicaInfos)
}
}
return referenceRSP, desireReplicaInfos, nil
}
func matchReplicaSchedulingPolicy(c client.Client, workload *unstructured.Unstructured) (*policyv1alpha1.ReplicaSchedulingPolicy, error) {
// get all namespace-scoped replica scheduling policies
policyList := &policyv1alpha1.ReplicaSchedulingPolicyList{}
if err := c.List(context.TODO(), policyList, &client.ListOptions{Namespace: workload.GetNamespace()}); err != nil {
klog.Errorf("Failed to list replica scheduling policies from namespace: %s, error: %v", workload.GetNamespace(), err)
return nil, err
}
if len(policyList.Items) == 0 {
return nil, nil
}
matchedPolicies := getMatchedReplicaSchedulingPolicy(policyList.Items, workload)
if len(matchedPolicies) == 0 {
klog.V(2).Infof("No replica scheduling policy for resource: %s/%s", workload.GetNamespace(), workload.GetName())
return nil, nil
}
return &matchedPolicies[0], nil
}
func getMatchedReplicaSchedulingPolicy(policies []policyv1alpha1.ReplicaSchedulingPolicy, resource *unstructured.Unstructured) []policyv1alpha1.ReplicaSchedulingPolicy {
// select policy in which at least one resource selector matches target resource.
resourceMatches := make([]policyv1alpha1.ReplicaSchedulingPolicy, 0)
for _, policy := range policies {
if util.ResourceMatchSelectors(resource, policy.Spec.ResourceSelectors...) {
resourceMatches = append(resourceMatches, policy)
}
}
// Sort by policy names.
sort.Slice(resourceMatches, func(i, j int) bool {
return resourceMatches[i].Name < resourceMatches[j].Name
})
return resourceMatches
}
func calculateReplicas(c client.Client, policy *policyv1alpha1.ReplicaSchedulingPolicy, clusterNames []string) (map[string]int64, error) {
weightSum := int64(0)
matchClusters := make(map[string]int64)
desireReplicaInfos := make(map[string]int64)
// found out clusters matched the given ReplicaSchedulingPolicy
for _, clusterName := range clusterNames {
clusterObj := &clusterv1alpha1.Cluster{}
if err := c.Get(context.TODO(), client.ObjectKey{Name: clusterName}, clusterObj); err != nil {
klog.Errorf("Failed to get member cluster: %s, error: %v", clusterName, err)
return nil, err
}
for _, staticWeightRule := range policy.Spec.Preferences.StaticWeightList {
if util.ClusterMatches(clusterObj, staticWeightRule.TargetCluster) {
weightSum += staticWeightRule.Weight
matchClusters[clusterName] = staticWeightRule.Weight
break
}
}
}
if weightSum == 0 {
return desireReplicaInfos, nil
}
allocatedReplicas := int32(0)
for clusterName, weight := range matchClusters {
desireReplicaInfos[clusterName] = weight * int64(policy.Spec.TotalReplicas) / weightSum
allocatedReplicas += int32(desireReplicaInfos[clusterName])
}
if remainReplicas := policy.Spec.TotalReplicas - allocatedReplicas; remainReplicas > 0 && len(matchClusters) > 0 {
sortedClusters := helper.SortClusterByWeight(matchClusters)
for i := 0; remainReplicas > 0; i++ {
desireReplicaInfos[sortedClusters[i].ClusterName]++
remainReplicas--
if i == len(desireReplicaInfos) {
i = 0
}
}
}
for _, clusterName := range clusterNames {
if _, exist := matchClusters[clusterName]; !exist {
desireReplicaInfos[clusterName] = 0
}
}
return desireReplicaInfos, nil
}

View File

@ -28,10 +28,6 @@ func (c *FakePolicyV1alpha1) PropagationPolicies(namespace string) v1alpha1.Prop
return &FakePropagationPolicies{c, namespace}
}
func (c *FakePolicyV1alpha1) ReplicaSchedulingPolicies(namespace string) v1alpha1.ReplicaSchedulingPolicyInterface {
return &FakeReplicaSchedulingPolicies{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakePolicyV1alpha1) RESTClient() rest.Interface {

View File

@ -1,114 +0,0 @@
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeReplicaSchedulingPolicies implements ReplicaSchedulingPolicyInterface
type FakeReplicaSchedulingPolicies struct {
Fake *FakePolicyV1alpha1
ns string
}
var replicaschedulingpoliciesResource = schema.GroupVersionResource{Group: "policy.karmada.io", Version: "v1alpha1", Resource: "replicaschedulingpolicies"}
var replicaschedulingpoliciesKind = schema.GroupVersionKind{Group: "policy.karmada.io", Version: "v1alpha1", Kind: "ReplicaSchedulingPolicy"}
// Get takes name of the replicaSchedulingPolicy, and returns the corresponding replicaSchedulingPolicy object, and an error if there is any.
func (c *FakeReplicaSchedulingPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(replicaschedulingpoliciesResource, c.ns, name), &v1alpha1.ReplicaSchedulingPolicy{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ReplicaSchedulingPolicy), err
}
// List takes label and field selectors, and returns the list of ReplicaSchedulingPolicies that match those selectors.
func (c *FakeReplicaSchedulingPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ReplicaSchedulingPolicyList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(replicaschedulingpoliciesResource, replicaschedulingpoliciesKind, c.ns, opts), &v1alpha1.ReplicaSchedulingPolicyList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.ReplicaSchedulingPolicyList{ListMeta: obj.(*v1alpha1.ReplicaSchedulingPolicyList).ListMeta}
for _, item := range obj.(*v1alpha1.ReplicaSchedulingPolicyList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested replicaSchedulingPolicies.
func (c *FakeReplicaSchedulingPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(replicaschedulingpoliciesResource, c.ns, opts))
}
// Create takes the representation of a replicaSchedulingPolicy and creates it. Returns the server's representation of the replicaSchedulingPolicy, and an error, if there is any.
func (c *FakeReplicaSchedulingPolicies) Create(ctx context.Context, replicaSchedulingPolicy *v1alpha1.ReplicaSchedulingPolicy, opts v1.CreateOptions) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(replicaschedulingpoliciesResource, c.ns, replicaSchedulingPolicy), &v1alpha1.ReplicaSchedulingPolicy{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ReplicaSchedulingPolicy), err
}
// Update takes the representation of a replicaSchedulingPolicy and updates it. Returns the server's representation of the replicaSchedulingPolicy, and an error, if there is any.
func (c *FakeReplicaSchedulingPolicies) Update(ctx context.Context, replicaSchedulingPolicy *v1alpha1.ReplicaSchedulingPolicy, opts v1.UpdateOptions) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(replicaschedulingpoliciesResource, c.ns, replicaSchedulingPolicy), &v1alpha1.ReplicaSchedulingPolicy{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ReplicaSchedulingPolicy), err
}
// Delete takes name of the replicaSchedulingPolicy and deletes it. Returns an error if one occurs.
func (c *FakeReplicaSchedulingPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(replicaschedulingpoliciesResource, c.ns, name), &v1alpha1.ReplicaSchedulingPolicy{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeReplicaSchedulingPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(replicaschedulingpoliciesResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.ReplicaSchedulingPolicyList{})
return err
}
// Patch applies the patch and returns the patched replicaSchedulingPolicy.
func (c *FakeReplicaSchedulingPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(replicaschedulingpoliciesResource, c.ns, name, pt, data, subresources...), &v1alpha1.ReplicaSchedulingPolicy{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ReplicaSchedulingPolicy), err
}

View File

@ -9,5 +9,3 @@ type ClusterPropagationPolicyExpansion interface{}
type OverridePolicyExpansion interface{}
type PropagationPolicyExpansion interface{}
type ReplicaSchedulingPolicyExpansion interface{}

View File

@ -14,7 +14,6 @@ type PolicyV1alpha1Interface interface {
ClusterPropagationPoliciesGetter
OverridePoliciesGetter
PropagationPoliciesGetter
ReplicaSchedulingPoliciesGetter
}
// PolicyV1alpha1Client is used to interact with features provided by the policy.karmada.io group.
@ -38,10 +37,6 @@ func (c *PolicyV1alpha1Client) PropagationPolicies(namespace string) Propagation
return newPropagationPolicies(c, namespace)
}
func (c *PolicyV1alpha1Client) ReplicaSchedulingPolicies(namespace string) ReplicaSchedulingPolicyInterface {
return newReplicaSchedulingPolicies(c, namespace)
}
// NewForConfig creates a new PolicyV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*PolicyV1alpha1Client, error) {
config := *c

View File

@ -1,162 +0,0 @@
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
scheme "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ReplicaSchedulingPoliciesGetter has a method to return a ReplicaSchedulingPolicyInterface.
// A group's client should implement this interface.
type ReplicaSchedulingPoliciesGetter interface {
ReplicaSchedulingPolicies(namespace string) ReplicaSchedulingPolicyInterface
}
// ReplicaSchedulingPolicyInterface has methods to work with ReplicaSchedulingPolicy resources.
type ReplicaSchedulingPolicyInterface interface {
Create(ctx context.Context, replicaSchedulingPolicy *v1alpha1.ReplicaSchedulingPolicy, opts v1.CreateOptions) (*v1alpha1.ReplicaSchedulingPolicy, error)
Update(ctx context.Context, replicaSchedulingPolicy *v1alpha1.ReplicaSchedulingPolicy, opts v1.UpdateOptions) (*v1alpha1.ReplicaSchedulingPolicy, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ReplicaSchedulingPolicy, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ReplicaSchedulingPolicyList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ReplicaSchedulingPolicy, err error)
ReplicaSchedulingPolicyExpansion
}
// replicaSchedulingPolicies implements ReplicaSchedulingPolicyInterface
type replicaSchedulingPolicies struct {
client rest.Interface
ns string
}
// newReplicaSchedulingPolicies returns a ReplicaSchedulingPolicies
func newReplicaSchedulingPolicies(c *PolicyV1alpha1Client, namespace string) *replicaSchedulingPolicies {
return &replicaSchedulingPolicies{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the replicaSchedulingPolicy, and returns the corresponding replicaSchedulingPolicy object, and an error if there is any.
func (c *replicaSchedulingPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
result = &v1alpha1.ReplicaSchedulingPolicy{}
err = c.client.Get().
Namespace(c.ns).
Resource("replicaschedulingpolicies").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ReplicaSchedulingPolicies that match those selectors.
func (c *replicaSchedulingPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ReplicaSchedulingPolicyList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.ReplicaSchedulingPolicyList{}
err = c.client.Get().
Namespace(c.ns).
Resource("replicaschedulingpolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested replicaSchedulingPolicies.
func (c *replicaSchedulingPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("replicaschedulingpolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a replicaSchedulingPolicy and creates it. Returns the server's representation of the replicaSchedulingPolicy, and an error, if there is any.
func (c *replicaSchedulingPolicies) Create(ctx context.Context, replicaSchedulingPolicy *v1alpha1.ReplicaSchedulingPolicy, opts v1.CreateOptions) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
result = &v1alpha1.ReplicaSchedulingPolicy{}
err = c.client.Post().
Namespace(c.ns).
Resource("replicaschedulingpolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Body(replicaSchedulingPolicy).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a replicaSchedulingPolicy and updates it. Returns the server's representation of the replicaSchedulingPolicy, and an error, if there is any.
func (c *replicaSchedulingPolicies) Update(ctx context.Context, replicaSchedulingPolicy *v1alpha1.ReplicaSchedulingPolicy, opts v1.UpdateOptions) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
result = &v1alpha1.ReplicaSchedulingPolicy{}
err = c.client.Put().
Namespace(c.ns).
Resource("replicaschedulingpolicies").
Name(replicaSchedulingPolicy.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(replicaSchedulingPolicy).
Do(ctx).
Into(result)
return
}
// Delete takes name of the replicaSchedulingPolicy and deletes it. Returns an error if one occurs.
func (c *replicaSchedulingPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("replicaschedulingpolicies").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *replicaSchedulingPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("replicaschedulingpolicies").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched replicaSchedulingPolicy.
func (c *replicaSchedulingPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ReplicaSchedulingPolicy, err error) {
result = &v1alpha1.ReplicaSchedulingPolicy{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("replicaschedulingpolicies").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@ -57,8 +57,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1alpha1().OverridePolicies().Informer()}, nil
case policyv1alpha1.SchemeGroupVersion.WithResource("propagationpolicies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1alpha1().PropagationPolicies().Informer()}, nil
case policyv1alpha1.SchemeGroupVersion.WithResource("replicaschedulingpolicies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1alpha1().ReplicaSchedulingPolicies().Informer()}, nil
// Group=work.karmada.io, Version=v1alpha1
case workv1alpha1.SchemeGroupVersion.WithResource("clusterresourcebindings"):

View File

@ -16,8 +16,6 @@ type Interface interface {
OverridePolicies() OverridePolicyInformer
// PropagationPolicies returns a PropagationPolicyInformer.
PropagationPolicies() PropagationPolicyInformer
// ReplicaSchedulingPolicies returns a ReplicaSchedulingPolicyInformer.
ReplicaSchedulingPolicies() ReplicaSchedulingPolicyInformer
}
type version struct {
@ -50,8 +48,3 @@ func (v *version) OverridePolicies() OverridePolicyInformer {
func (v *version) PropagationPolicies() PropagationPolicyInformer {
return &propagationPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// ReplicaSchedulingPolicies returns a ReplicaSchedulingPolicyInformer.
func (v *version) ReplicaSchedulingPolicies() ReplicaSchedulingPolicyInformer {
return &replicaSchedulingPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}

View File

@ -1,74 +0,0 @@
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
versioned "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
internalinterfaces "github.com/karmada-io/karmada/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// ReplicaSchedulingPolicyInformer provides access to a shared informer and lister for
// ReplicaSchedulingPolicies.
type ReplicaSchedulingPolicyInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.ReplicaSchedulingPolicyLister
}
type replicaSchedulingPolicyInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewReplicaSchedulingPolicyInformer constructs a new informer for ReplicaSchedulingPolicy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewReplicaSchedulingPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredReplicaSchedulingPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredReplicaSchedulingPolicyInformer constructs a new informer for ReplicaSchedulingPolicy type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredReplicaSchedulingPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.PolicyV1alpha1().ReplicaSchedulingPolicies(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.PolicyV1alpha1().ReplicaSchedulingPolicies(namespace).Watch(context.TODO(), options)
},
},
&policyv1alpha1.ReplicaSchedulingPolicy{},
resyncPeriod,
indexers,
)
}
func (f *replicaSchedulingPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredReplicaSchedulingPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *replicaSchedulingPolicyInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&policyv1alpha1.ReplicaSchedulingPolicy{}, f.defaultInformer)
}
func (f *replicaSchedulingPolicyInformer) Lister() v1alpha1.ReplicaSchedulingPolicyLister {
return v1alpha1.NewReplicaSchedulingPolicyLister(f.Informer().GetIndexer())
}

View File

@ -25,11 +25,3 @@ type PropagationPolicyListerExpansion interface{}
// PropagationPolicyNamespaceListerExpansion allows custom methods to be added to
// PropagationPolicyNamespaceLister.
type PropagationPolicyNamespaceListerExpansion interface{}
// ReplicaSchedulingPolicyListerExpansion allows custom methods to be added to
// ReplicaSchedulingPolicyLister.
type ReplicaSchedulingPolicyListerExpansion interface{}
// ReplicaSchedulingPolicyNamespaceListerExpansion allows custom methods to be added to
// ReplicaSchedulingPolicyNamespaceLister.
type ReplicaSchedulingPolicyNamespaceListerExpansion interface{}

View File

@ -1,83 +0,0 @@
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// ReplicaSchedulingPolicyLister helps list ReplicaSchedulingPolicies.
// All objects returned here must be treated as read-only.
type ReplicaSchedulingPolicyLister interface {
// List lists all ReplicaSchedulingPolicies in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.ReplicaSchedulingPolicy, err error)
// ReplicaSchedulingPolicies returns an object that can list and get ReplicaSchedulingPolicies.
ReplicaSchedulingPolicies(namespace string) ReplicaSchedulingPolicyNamespaceLister
ReplicaSchedulingPolicyListerExpansion
}
// replicaSchedulingPolicyLister implements the ReplicaSchedulingPolicyLister interface.
type replicaSchedulingPolicyLister struct {
indexer cache.Indexer
}
// NewReplicaSchedulingPolicyLister returns a new ReplicaSchedulingPolicyLister.
func NewReplicaSchedulingPolicyLister(indexer cache.Indexer) ReplicaSchedulingPolicyLister {
return &replicaSchedulingPolicyLister{indexer: indexer}
}
// List lists all ReplicaSchedulingPolicies in the indexer.
func (s *replicaSchedulingPolicyLister) List(selector labels.Selector) (ret []*v1alpha1.ReplicaSchedulingPolicy, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ReplicaSchedulingPolicy))
})
return ret, err
}
// ReplicaSchedulingPolicies returns an object that can list and get ReplicaSchedulingPolicies.
func (s *replicaSchedulingPolicyLister) ReplicaSchedulingPolicies(namespace string) ReplicaSchedulingPolicyNamespaceLister {
return replicaSchedulingPolicyNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// ReplicaSchedulingPolicyNamespaceLister helps list and get ReplicaSchedulingPolicies.
// All objects returned here must be treated as read-only.
type ReplicaSchedulingPolicyNamespaceLister interface {
// List lists all ReplicaSchedulingPolicies in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.ReplicaSchedulingPolicy, err error)
// Get retrieves the ReplicaSchedulingPolicy from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha1.ReplicaSchedulingPolicy, error)
ReplicaSchedulingPolicyNamespaceListerExpansion
}
// replicaSchedulingPolicyNamespaceLister implements the ReplicaSchedulingPolicyNamespaceLister
// interface.
type replicaSchedulingPolicyNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all ReplicaSchedulingPolicies in the indexer for a given namespace.
func (s replicaSchedulingPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ReplicaSchedulingPolicy, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ReplicaSchedulingPolicy))
})
return ret, err
}
// Get retrieves the ReplicaSchedulingPolicy from the indexer for a given namespace and name.
func (s replicaSchedulingPolicyNamespaceLister) Get(name string) (*v1alpha1.ReplicaSchedulingPolicy, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("replicaschedulingpolicy"), name)
}
return obj.(*v1alpha1.ReplicaSchedulingPolicy), nil
}