add placement for binding
Signed-off-by: Poor12 <shentiecheng@huawei.com>
This commit is contained in:
parent
69ecff65d8
commit
5eff625925
|
|
@ -16651,6 +16651,10 @@
|
|||
"$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.work.v1alpha2.GracefulEvictionTask"
|
||||
}
|
||||
},
|
||||
"placement": {
|
||||
"description": "Placement represents the rule for select clusters to propagate resources.",
|
||||
"$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Placement"
|
||||
},
|
||||
"propagateDeps": {
|
||||
"description": "PropagateDeps tells if relevant resources should be propagated automatically. It is inherited from PropagationPolicy or ClusterPropagationPolicy. default false.",
|
||||
"type": "boolean"
|
||||
|
|
|
|||
|
|
@ -324,6 +324,372 @@ spec:
|
|||
- reason
|
||||
type: object
|
||||
type: array
|
||||
placement:
|
||||
description: Placement represents the rule for select clusters to
|
||||
propagate resources.
|
||||
properties:
|
||||
clusterAffinity:
|
||||
description: ClusterAffinity represents scheduling restrictions
|
||||
to a certain set of clusters. If not set, any cluster can be
|
||||
scheduling candidate.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
exclude:
|
||||
description: ExcludedClusters is the list of clusters to be
|
||||
ignored.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
fieldSelector:
|
||||
description: FieldSelector is a filter to select member clusters
|
||||
by fields. If non-nil and non-empty, only the clusters match
|
||||
this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of field selector requirements.
|
||||
items:
|
||||
description: A node selector requirement is a selector
|
||||
that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship to
|
||||
a set of values. Valid operators are In, NotIn,
|
||||
Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values. If the operator
|
||||
is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the
|
||||
values array must be empty. If the operator is
|
||||
Gt or Lt, the values array must have a single
|
||||
element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
labelSelector:
|
||||
description: LabelSelector is a filter to select member clusters
|
||||
by labels. If non-nil and non-empty, only the clusters match
|
||||
this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector
|
||||
that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship
|
||||
to a set of values. Valid operators are In, NotIn,
|
||||
Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values.
|
||||
If the operator is In or NotIn, the values array
|
||||
must be non-empty. If the operator is Exists or
|
||||
DoesNotExist, the values array must be empty.
|
||||
This array is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs.
|
||||
A single {key,value} in the matchLabels map is equivalent
|
||||
to an element of matchExpressions, whose key field is
|
||||
"key", the operator is "In", and the values array contains
|
||||
only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
clusterTolerations:
|
||||
description: ClusterTolerations represents the tolerations.
|
||||
items:
|
||||
description: The pod this Toleration is attached to tolerates
|
||||
any taint that matches the triple <key,value,effect> using
|
||||
the matching operator <operator>.
|
||||
properties:
|
||||
effect:
|
||||
description: Effect indicates the taint effect to match.
|
||||
Empty means match all taint effects. When specified, allowed
|
||||
values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
type: string
|
||||
key:
|
||||
description: Key is the taint key that the toleration applies
|
||||
to. Empty means match all taint keys. If the key is empty,
|
||||
operator must be Exists; this combination means to match
|
||||
all values and all keys.
|
||||
type: string
|
||||
operator:
|
||||
description: Operator represents a key's relationship to
|
||||
the value. Valid operators are Exists and Equal. Defaults
|
||||
to Equal. Exists is equivalent to wildcard for value,
|
||||
so that a pod can tolerate all taints of a particular
|
||||
category.
|
||||
type: string
|
||||
tolerationSeconds:
|
||||
description: TolerationSeconds represents the period of
|
||||
time the toleration (which must be of effect NoExecute,
|
||||
otherwise this field is ignored) tolerates the taint.
|
||||
By default, it is not set, which means tolerate the taint
|
||||
forever (do not evict). Zero and negative values will
|
||||
be treated as 0 (evict immediately) by the system.
|
||||
format: int64
|
||||
type: integer
|
||||
value:
|
||||
description: Value is the taint value the toleration matches
|
||||
to. If the operator is Exists, the value should be empty,
|
||||
otherwise just a regular string.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
replicaScheduling:
|
||||
description: ReplicaScheduling represents the scheduling policy
|
||||
on dealing with the number of replicas when propagating resources
|
||||
that have replicas in spec (e.g. deployments, statefulsets)
|
||||
to member clusters.
|
||||
properties:
|
||||
replicaDivisionPreference:
|
||||
description: ReplicaDivisionPreference determines how the
|
||||
replicas is divided when ReplicaSchedulingType is "Divided".
|
||||
Valid options are Aggregated and Weighted. "Aggregated"
|
||||
divides replicas into clusters as few as possible, while
|
||||
respecting clusters' resource availabilities during the
|
||||
division. "Weighted" divides replicas by weight according
|
||||
to WeightPreference.
|
||||
enum:
|
||||
- Aggregated
|
||||
- Weighted
|
||||
type: string
|
||||
replicaSchedulingType:
|
||||
default: Divided
|
||||
description: ReplicaSchedulingType determines how the replicas
|
||||
is scheduled when karmada propagating a resource. Valid
|
||||
options are Duplicated and Divided. "Duplicated" duplicates
|
||||
the same replicas to each candidate member cluster from
|
||||
resource. "Divided" divides replicas into parts according
|
||||
to number of valid candidate member clusters, and exact
|
||||
replicas for each cluster are determined by ReplicaDivisionPreference.
|
||||
enum:
|
||||
- Duplicated
|
||||
- Divided
|
||||
type: string
|
||||
weightPreference:
|
||||
description: WeightPreference describes weight for each cluster
|
||||
or for each group of cluster If ReplicaDivisionPreference
|
||||
is set to "Weighted", and WeightPreference is not set, scheduler
|
||||
will weight all clusters the same.
|
||||
properties:
|
||||
dynamicWeight:
|
||||
description: DynamicWeight specifies the factor to generates
|
||||
dynamic weight list. If specified, StaticWeightList
|
||||
will be ignored.
|
||||
enum:
|
||||
- AvailableReplicas
|
||||
type: string
|
||||
staticWeightList:
|
||||
description: StaticWeightList defines the static cluster
|
||||
weight.
|
||||
items:
|
||||
description: StaticClusterWeight defines the static
|
||||
cluster weight.
|
||||
properties:
|
||||
targetCluster:
|
||||
description: TargetCluster describes the filter
|
||||
to select clusters.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters
|
||||
to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
exclude:
|
||||
description: ExcludedClusters is the list of
|
||||
clusters to be ignored.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
fieldSelector:
|
||||
description: FieldSelector is a filter to select
|
||||
member clusters by fields. If non-nil and
|
||||
non-empty, only the clusters match this filter
|
||||
will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of field selector requirements.
|
||||
items:
|
||||
description: A node selector requirement
|
||||
is a selector that contains values,
|
||||
a key, and an operator that relates
|
||||
the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship
|
||||
to a set of values. Valid operators
|
||||
are In, NotIn, Exists, DoesNotExist.
|
||||
Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values.
|
||||
If the operator is In or NotIn,
|
||||
the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty.
|
||||
If the operator is Gt or Lt, the
|
||||
values array must have a single
|
||||
element, which will be interpreted
|
||||
as an integer. This array is replaced
|
||||
during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
labelSelector:
|
||||
description: LabelSelector is a filter to select
|
||||
member clusters by labels. If non-nil and
|
||||
non-empty, only the clusters match this filter
|
||||
will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list
|
||||
of label selector requirements. The requirements
|
||||
are ANDed.
|
||||
items:
|
||||
description: A label selector requirement
|
||||
is a selector that contains values,
|
||||
a key, and an operator that relates
|
||||
the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key
|
||||
that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a
|
||||
key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists
|
||||
and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of
|
||||
string values. If the operator is
|
||||
In or NotIn, the values array must
|
||||
be non-empty. If the operator is
|
||||
Exists or DoesNotExist, the values
|
||||
array must be empty. This array
|
||||
is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator
|
||||
is "In", and the values array contains
|
||||
only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
weight:
|
||||
description: Weight expressing the preference to
|
||||
the cluster(s) specified by 'TargetCluster'.
|
||||
format: int64
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- targetCluster
|
||||
- weight
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
spreadConstraints:
|
||||
description: SpreadConstraints represents a list of the scheduling
|
||||
constraints.
|
||||
items:
|
||||
description: SpreadConstraint represents the spread constraints
|
||||
on resources.
|
||||
properties:
|
||||
maxGroups:
|
||||
description: MaxGroups restricts the maximum number of cluster
|
||||
groups to be selected.
|
||||
type: integer
|
||||
minGroups:
|
||||
description: MinGroups restricts the minimum number of cluster
|
||||
groups to be selected. Defaults to 1.
|
||||
type: integer
|
||||
spreadByField:
|
||||
description: 'SpreadByField represents the fields on Karmada
|
||||
cluster API used for dynamically grouping member clusters
|
||||
into different groups. Resources will be spread among
|
||||
different cluster groups. Available fields for spreading
|
||||
are: cluster, region, zone, and provider. SpreadByField
|
||||
should not co-exist with SpreadByLabel. If both SpreadByField
|
||||
and SpreadByLabel are empty, SpreadByField will be set
|
||||
to "cluster" by system.'
|
||||
enum:
|
||||
- cluster
|
||||
- region
|
||||
- zone
|
||||
- provider
|
||||
type: string
|
||||
spreadByLabel:
|
||||
description: SpreadByLabel represents the label key used
|
||||
for grouping member clusters into different groups. Resources
|
||||
will be spread among different cluster groups. SpreadByLabel
|
||||
should not co-exist with SpreadByField.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
propagateDeps:
|
||||
description: PropagateDeps tells if relevant resources should be propagated
|
||||
automatically. It is inherited from PropagationPolicy or ClusterPropagationPolicy.
|
||||
|
|
|
|||
|
|
@ -324,6 +324,372 @@ spec:
|
|||
- reason
|
||||
type: object
|
||||
type: array
|
||||
placement:
|
||||
description: Placement represents the rule for select clusters to
|
||||
propagate resources.
|
||||
properties:
|
||||
clusterAffinity:
|
||||
description: ClusterAffinity represents scheduling restrictions
|
||||
to a certain set of clusters. If not set, any cluster can be
|
||||
scheduling candidate.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
exclude:
|
||||
description: ExcludedClusters is the list of clusters to be
|
||||
ignored.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
fieldSelector:
|
||||
description: FieldSelector is a filter to select member clusters
|
||||
by fields. If non-nil and non-empty, only the clusters match
|
||||
this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of field selector requirements.
|
||||
items:
|
||||
description: A node selector requirement is a selector
|
||||
that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship to
|
||||
a set of values. Valid operators are In, NotIn,
|
||||
Exists, DoesNotExist. Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values. If the operator
|
||||
is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the
|
||||
values array must be empty. If the operator is
|
||||
Gt or Lt, the values array must have a single
|
||||
element, which will be interpreted as an integer.
|
||||
This array is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
labelSelector:
|
||||
description: LabelSelector is a filter to select member clusters
|
||||
by labels. If non-nil and non-empty, only the clusters match
|
||||
this filter will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector
|
||||
that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship
|
||||
to a set of values. Valid operators are In, NotIn,
|
||||
Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values.
|
||||
If the operator is In or NotIn, the values array
|
||||
must be non-empty. If the operator is Exists or
|
||||
DoesNotExist, the values array must be empty.
|
||||
This array is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs.
|
||||
A single {key,value} in the matchLabels map is equivalent
|
||||
to an element of matchExpressions, whose key field is
|
||||
"key", the operator is "In", and the values array contains
|
||||
only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
clusterTolerations:
|
||||
description: ClusterTolerations represents the tolerations.
|
||||
items:
|
||||
description: The pod this Toleration is attached to tolerates
|
||||
any taint that matches the triple <key,value,effect> using
|
||||
the matching operator <operator>.
|
||||
properties:
|
||||
effect:
|
||||
description: Effect indicates the taint effect to match.
|
||||
Empty means match all taint effects. When specified, allowed
|
||||
values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
type: string
|
||||
key:
|
||||
description: Key is the taint key that the toleration applies
|
||||
to. Empty means match all taint keys. If the key is empty,
|
||||
operator must be Exists; this combination means to match
|
||||
all values and all keys.
|
||||
type: string
|
||||
operator:
|
||||
description: Operator represents a key's relationship to
|
||||
the value. Valid operators are Exists and Equal. Defaults
|
||||
to Equal. Exists is equivalent to wildcard for value,
|
||||
so that a pod can tolerate all taints of a particular
|
||||
category.
|
||||
type: string
|
||||
tolerationSeconds:
|
||||
description: TolerationSeconds represents the period of
|
||||
time the toleration (which must be of effect NoExecute,
|
||||
otherwise this field is ignored) tolerates the taint.
|
||||
By default, it is not set, which means tolerate the taint
|
||||
forever (do not evict). Zero and negative values will
|
||||
be treated as 0 (evict immediately) by the system.
|
||||
format: int64
|
||||
type: integer
|
||||
value:
|
||||
description: Value is the taint value the toleration matches
|
||||
to. If the operator is Exists, the value should be empty,
|
||||
otherwise just a regular string.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
replicaScheduling:
|
||||
description: ReplicaScheduling represents the scheduling policy
|
||||
on dealing with the number of replicas when propagating resources
|
||||
that have replicas in spec (e.g. deployments, statefulsets)
|
||||
to member clusters.
|
||||
properties:
|
||||
replicaDivisionPreference:
|
||||
description: ReplicaDivisionPreference determines how the
|
||||
replicas is divided when ReplicaSchedulingType is "Divided".
|
||||
Valid options are Aggregated and Weighted. "Aggregated"
|
||||
divides replicas into clusters as few as possible, while
|
||||
respecting clusters' resource availabilities during the
|
||||
division. "Weighted" divides replicas by weight according
|
||||
to WeightPreference.
|
||||
enum:
|
||||
- Aggregated
|
||||
- Weighted
|
||||
type: string
|
||||
replicaSchedulingType:
|
||||
default: Divided
|
||||
description: ReplicaSchedulingType determines how the replicas
|
||||
is scheduled when karmada propagating a resource. Valid
|
||||
options are Duplicated and Divided. "Duplicated" duplicates
|
||||
the same replicas to each candidate member cluster from
|
||||
resource. "Divided" divides replicas into parts according
|
||||
to number of valid candidate member clusters, and exact
|
||||
replicas for each cluster are determined by ReplicaDivisionPreference.
|
||||
enum:
|
||||
- Duplicated
|
||||
- Divided
|
||||
type: string
|
||||
weightPreference:
|
||||
description: WeightPreference describes weight for each cluster
|
||||
or for each group of cluster If ReplicaDivisionPreference
|
||||
is set to "Weighted", and WeightPreference is not set, scheduler
|
||||
will weight all clusters the same.
|
||||
properties:
|
||||
dynamicWeight:
|
||||
description: DynamicWeight specifies the factor to generates
|
||||
dynamic weight list. If specified, StaticWeightList
|
||||
will be ignored.
|
||||
enum:
|
||||
- AvailableReplicas
|
||||
type: string
|
||||
staticWeightList:
|
||||
description: StaticWeightList defines the static cluster
|
||||
weight.
|
||||
items:
|
||||
description: StaticClusterWeight defines the static
|
||||
cluster weight.
|
||||
properties:
|
||||
targetCluster:
|
||||
description: TargetCluster describes the filter
|
||||
to select clusters.
|
||||
properties:
|
||||
clusterNames:
|
||||
description: ClusterNames is the list of clusters
|
||||
to be selected.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
exclude:
|
||||
description: ExcludedClusters is the list of
|
||||
clusters to be ignored.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
fieldSelector:
|
||||
description: FieldSelector is a filter to select
|
||||
member clusters by fields. If non-nil and
|
||||
non-empty, only the clusters match this filter
|
||||
will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of field selector requirements.
|
||||
items:
|
||||
description: A node selector requirement
|
||||
is a selector that contains values,
|
||||
a key, and an operator that relates
|
||||
the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: The label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: Represents a key's relationship
|
||||
to a set of values. Valid operators
|
||||
are In, NotIn, Exists, DoesNotExist.
|
||||
Gt, and Lt.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values.
|
||||
If the operator is In or NotIn,
|
||||
the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty.
|
||||
If the operator is Gt or Lt, the
|
||||
values array must have a single
|
||||
element, which will be interpreted
|
||||
as an integer. This array is replaced
|
||||
during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
labelSelector:
|
||||
description: LabelSelector is a filter to select
|
||||
member clusters by labels. If non-nil and
|
||||
non-empty, only the clusters match this filter
|
||||
will be selected.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list
|
||||
of label selector requirements. The requirements
|
||||
are ANDed.
|
||||
items:
|
||||
description: A label selector requirement
|
||||
is a selector that contains values,
|
||||
a key, and an operator that relates
|
||||
the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key
|
||||
that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a
|
||||
key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists
|
||||
and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of
|
||||
string values. If the operator is
|
||||
In or NotIn, the values array must
|
||||
be non-empty. If the operator is
|
||||
Exists or DoesNotExist, the values
|
||||
array must be empty. This array
|
||||
is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator
|
||||
is "In", and the values array contains
|
||||
only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
weight:
|
||||
description: Weight expressing the preference to
|
||||
the cluster(s) specified by 'TargetCluster'.
|
||||
format: int64
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- targetCluster
|
||||
- weight
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
spreadConstraints:
|
||||
description: SpreadConstraints represents a list of the scheduling
|
||||
constraints.
|
||||
items:
|
||||
description: SpreadConstraint represents the spread constraints
|
||||
on resources.
|
||||
properties:
|
||||
maxGroups:
|
||||
description: MaxGroups restricts the maximum number of cluster
|
||||
groups to be selected.
|
||||
type: integer
|
||||
minGroups:
|
||||
description: MinGroups restricts the minimum number of cluster
|
||||
groups to be selected. Defaults to 1.
|
||||
type: integer
|
||||
spreadByField:
|
||||
description: 'SpreadByField represents the fields on Karmada
|
||||
cluster API used for dynamically grouping member clusters
|
||||
into different groups. Resources will be spread among
|
||||
different cluster groups. Available fields for spreading
|
||||
are: cluster, region, zone, and provider. SpreadByField
|
||||
should not co-exist with SpreadByLabel. If both SpreadByField
|
||||
and SpreadByLabel are empty, SpreadByField will be set
|
||||
to "cluster" by system.'
|
||||
enum:
|
||||
- cluster
|
||||
- region
|
||||
- zone
|
||||
- provider
|
||||
type: string
|
||||
spreadByLabel:
|
||||
description: SpreadByLabel represents the label key used
|
||||
for grouping member clusters into different groups. Resources
|
||||
will be spread among different cluster groups. SpreadByLabel
|
||||
should not co-exist with SpreadByField.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
propagateDeps:
|
||||
description: PropagateDeps tells if relevant resources should be propagated
|
||||
automatically. It is inherited from PropagationPolicy or ClusterPropagationPolicy.
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -72,6 +74,10 @@ type ResourceBindingSpec struct {
|
|||
// +optional
|
||||
Clusters []TargetCluster `json:"clusters,omitempty"`
|
||||
|
||||
// Placement represents the rule for select clusters to propagate resources.
|
||||
// +optional
|
||||
Placement *policyv1alpha1.Placement `json:"placement,omitempty"`
|
||||
|
||||
// GracefulEvictionTasks holds the eviction tasks that are expected to perform
|
||||
// the eviction in a graceful way.
|
||||
// The intended workflow is:
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
package v1alpha2
|
||||
|
||||
import (
|
||||
v1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
@ -290,6 +291,11 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) {
|
|||
*out = make([]TargetCluster, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Placement != nil {
|
||||
in, out := &in.Placement, &out.Placement
|
||||
*out = new(v1alpha1.Placement)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.GracefulEvictionTasks != nil {
|
||||
in, out := &in.GracefulEvictionTasks, &out.GracefulEvictionTasks
|
||||
*out = make([]GracefulEvictionTask, len(*in))
|
||||
|
|
|
|||
|
|
@ -412,6 +412,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object
|
|||
bindingCopy.Spec.Replicas = binding.Spec.Replicas
|
||||
bindingCopy.Spec.PropagateDeps = binding.Spec.PropagateDeps
|
||||
bindingCopy.Spec.SchedulerName = binding.Spec.SchedulerName
|
||||
bindingCopy.Spec.Placement = binding.Spec.Placement
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -486,6 +487,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
bindingCopy.Spec.Replicas = binding.Spec.Replicas
|
||||
bindingCopy.Spec.PropagateDeps = binding.Spec.PropagateDeps
|
||||
bindingCopy.Spec.SchedulerName = binding.Spec.SchedulerName
|
||||
bindingCopy.Spec.Placement = binding.Spec.Placement
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -528,6 +530,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured,
|
|||
bindingCopy.Spec.ReplicaRequirements = binding.Spec.ReplicaRequirements
|
||||
bindingCopy.Spec.Replicas = binding.Spec.Replicas
|
||||
bindingCopy.Spec.SchedulerName = binding.Spec.SchedulerName
|
||||
bindingCopy.Spec.Placement = binding.Spec.Placement
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
|
@ -625,6 +628,7 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure
|
|||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
PropagateDeps: policySpec.PropagateDeps,
|
||||
SchedulerName: policySpec.SchedulerName,
|
||||
Placement: &policySpec.Placement,
|
||||
Resource: workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
|
|
@ -664,6 +668,7 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst
|
|||
Spec: workv1alpha2.ResourceBindingSpec{
|
||||
PropagateDeps: policySpec.PropagateDeps,
|
||||
SchedulerName: policySpec.SchedulerName,
|
||||
Placement: &policySpec.Placement,
|
||||
Resource: workv1alpha2.ObjectReference{
|
||||
APIVersion: object.GetAPIVersion(),
|
||||
Kind: object.GetKind(),
|
||||
|
|
|
|||
|
|
@ -5209,6 +5209,12 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingSpec(ref common.ReferenceCallb
|
|||
},
|
||||
},
|
||||
},
|
||||
"placement": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Placement represents the rule for select clusters to propagate resources.",
|
||||
Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement"),
|
||||
},
|
||||
},
|
||||
"gracefulEvictionTasks": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "GracefulEvictionTasks holds the eviction tasks that are expected to perform the eviction in a graceful way. The intended workflow is: 1. Once the controller(such as 'taint-manager') decided to evict the resource that\n is referenced by current ResourceBinding or ClusterResourceBinding from a target\n cluster, it removes(or scale down the replicas) the target from Clusters(.spec.Clusters)\n and builds a graceful eviction task.\n2. The scheduler may perform a re-scheduler and probably select a substitute cluster\n to take over the evicting workload(resource).\n3. The graceful eviction controller takes care of the graceful eviction tasks and\n performs the final removal after the workload(resource) is available on the substitute\n cluster or exceed the grace termination period(defaults to 10 minutes).",
|
||||
|
|
@ -5249,7 +5255,7 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingSpec(ref common.ReferenceCallb
|
|||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.BindingSnapshot", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.GracefulEvictionTask", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ObjectReference", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ReplicaRequirements", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.TargetCluster"},
|
||||
"github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Placement", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.BindingSnapshot", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.GracefulEvictionTask", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ObjectReference", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.ReplicaRequirements", "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2.TargetCluster"},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
|
||||
// ScheduleAlgorithm is the interface that should be implemented to schedule a resource to the target clusters.
|
||||
type ScheduleAlgorithm interface {
|
||||
Schedule(context.Context, *policyv1alpha1.Placement, *workv1alpha2.ResourceBindingSpec, *ScheduleAlgorithmOption) (scheduleResult ScheduleResult, err error)
|
||||
Schedule(context.Context, *workv1alpha2.ResourceBindingSpec, *ScheduleAlgorithmOption) (scheduleResult ScheduleResult, err error)
|
||||
}
|
||||
|
||||
// ScheduleAlgorithmOption represents the option for ScheduleAlgorithm.
|
||||
|
|
@ -52,13 +52,13 @@ func NewGenericScheduler(
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (g *genericScheduler) Schedule(ctx context.Context, placement *policyv1alpha1.Placement, spec *workv1alpha2.ResourceBindingSpec, scheduleAlgorithmOption *ScheduleAlgorithmOption) (result ScheduleResult, err error) {
|
||||
func (g *genericScheduler) Schedule(ctx context.Context, spec *workv1alpha2.ResourceBindingSpec, scheduleAlgorithmOption *ScheduleAlgorithmOption) (result ScheduleResult, err error) {
|
||||
clusterInfoSnapshot := g.schedulerCache.Snapshot()
|
||||
if clusterInfoSnapshot.NumOfClusters() == 0 {
|
||||
return result, fmt.Errorf("no clusters available to schedule")
|
||||
}
|
||||
|
||||
feasibleClusters, diagnosis, err := g.findClustersThatFit(ctx, g.scheduleFramework, placement, spec, &clusterInfoSnapshot)
|
||||
feasibleClusters, diagnosis, err := g.findClustersThatFit(ctx, g.scheduleFramework, spec.Placement, spec, &clusterInfoSnapshot)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to findClustersThatFit: %v", err)
|
||||
}
|
||||
|
|
@ -72,19 +72,19 @@ func (g *genericScheduler) Schedule(ctx context.Context, placement *policyv1alph
|
|||
}
|
||||
klog.V(4).Infof("Feasible clusters found: %v", feasibleClusters)
|
||||
|
||||
clustersScore, err := g.prioritizeClusters(ctx, g.scheduleFramework, placement, spec, feasibleClusters)
|
||||
clustersScore, err := g.prioritizeClusters(ctx, g.scheduleFramework, spec.Placement, spec, feasibleClusters)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to prioritizeClusters: %v", err)
|
||||
}
|
||||
klog.V(4).Infof("Feasible clusters scores: %v", clustersScore)
|
||||
|
||||
clusters, err := g.selectClusters(clustersScore, placement, spec)
|
||||
clusters, err := g.selectClusters(clustersScore, spec.Placement, spec)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to select clusters: %v", err)
|
||||
}
|
||||
klog.V(4).Infof("Selected clusters: %v", clusters)
|
||||
|
||||
clustersWithReplicas, err := g.assignReplicas(clusters, placement.ReplicaScheduling, spec)
|
||||
clustersWithReplicas, err := g.assignReplicas(clusters, spec.Placement.ReplicaScheduling, spec)
|
||||
if err != nil {
|
||||
return result, fmt.Errorf("failed to assignReplicas: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,17 +34,6 @@ func (s *Scheduler) addAllEventHandlers() {
|
|||
klog.Errorf("Failed to add handlers for ResourceBindings: %v", err)
|
||||
}
|
||||
|
||||
policyInformer := s.informerFactory.Policy().V1alpha1().PropagationPolicies().Informer()
|
||||
_, err = policyInformer.AddEventHandler(cache.FilteringResourceEventHandler{
|
||||
FilterFunc: s.policyEventFilter,
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: s.onPropagationPolicyUpdate,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to add handlers for PropagationPolicies: %v", err)
|
||||
}
|
||||
|
||||
clusterBindingInformer := s.informerFactory.Work().V1alpha2().ClusterResourceBindings().Informer()
|
||||
_, err = clusterBindingInformer.AddEventHandler(cache.FilteringResourceEventHandler{
|
||||
FilterFunc: s.resourceBindingEventFilter,
|
||||
|
|
@ -57,17 +46,6 @@ func (s *Scheduler) addAllEventHandlers() {
|
|||
klog.Errorf("Failed to add handlers for ClusterResourceBindings: %v", err)
|
||||
}
|
||||
|
||||
clusterPolicyInformer := s.informerFactory.Policy().V1alpha1().ClusterPropagationPolicies().Informer()
|
||||
_, err = clusterPolicyInformer.AddEventHandler(cache.FilteringResourceEventHandler{
|
||||
FilterFunc: s.policyEventFilter,
|
||||
Handler: cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: s.onClusterPropagationPolicyUpdate,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to add handlers for ClusterPropagationPolicies: %v", err)
|
||||
}
|
||||
|
||||
memClusterInformer := s.informerFactory.Cluster().V1alpha1().Clusters().Informer()
|
||||
_, err = memClusterInformer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
|
|
@ -82,9 +60,7 @@ func (s *Scheduler) addAllEventHandlers() {
|
|||
|
||||
// ignore the error here because the informers haven't been started
|
||||
_ = bindingInformer.SetTransform(fedinformer.StripUnusedFields)
|
||||
_ = policyInformer.SetTransform(fedinformer.StripUnusedFields)
|
||||
_ = clusterBindingInformer.SetTransform(fedinformer.StripUnusedFields)
|
||||
_ = clusterPolicyInformer.SetTransform(fedinformer.StripUnusedFields)
|
||||
_ = memClusterInformer.SetTransform(fedinformer.StripUnusedFields)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
|
|
@ -114,17 +90,6 @@ func (s *Scheduler) resourceBindingEventFilter(obj interface{}) bool {
|
|||
util.GetLabelValue(accessor.GetLabels(), policyv1alpha1.ClusterPropagationPolicyLabel) != ""
|
||||
}
|
||||
|
||||
func (s *Scheduler) policyEventFilter(obj interface{}) bool {
|
||||
switch t := obj.(type) {
|
||||
case *policyv1alpha1.PropagationPolicy:
|
||||
return schedulerNameFilter(s.schedulerName, t.Spec.SchedulerName)
|
||||
case *policyv1alpha1.ClusterPropagationPolicy:
|
||||
return schedulerNameFilter(s.schedulerName, t.Spec.SchedulerName)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Scheduler) onResourceBindingAdd(obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err != nil {
|
||||
|
|
@ -169,77 +134,6 @@ func (s *Scheduler) onClusterResourceBindingRequeue(clusterResourceBinding *work
|
|||
metrics.CountSchedulerBindings(event)
|
||||
}
|
||||
|
||||
func (s *Scheduler) onPropagationPolicyUpdate(old, cur interface{}) {
|
||||
oldPropagationPolicy := old.(*policyv1alpha1.PropagationPolicy)
|
||||
curPropagationPolicy := cur.(*policyv1alpha1.PropagationPolicy)
|
||||
if equality.Semantic.DeepEqual(oldPropagationPolicy.Spec.Placement, curPropagationPolicy.Spec.Placement) {
|
||||
klog.V(2).Infof("Ignore PropagationPolicy(%s/%s) which placement unchanged.", oldPropagationPolicy.Namespace, oldPropagationPolicy.Name)
|
||||
return
|
||||
}
|
||||
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
policyv1alpha1.PropagationPolicyNamespaceLabel: oldPropagationPolicy.Namespace,
|
||||
policyv1alpha1.PropagationPolicyNameLabel: oldPropagationPolicy.Name,
|
||||
})
|
||||
|
||||
err := s.requeueResourceBindings(selector, metrics.PolicyChanged)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to requeue ResourceBinding, error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// requeueClusterResourceBindings will retrieve all ClusterResourceBinding objects by the label selector and put them to queue.
|
||||
func (s *Scheduler) requeueClusterResourceBindings(selector labels.Selector, event string) error {
|
||||
referenceClusterResourceBindings, err := s.clusterBindingLister.List(selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list ClusterResourceBinding by selector: %s, error: %v", selector.String(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, clusterResourceBinding := range referenceClusterResourceBindings {
|
||||
s.onClusterResourceBindingRequeue(clusterResourceBinding, event)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// requeueResourceBindings will retrieve all ResourceBinding objects by the label selector and put them to queue.
|
||||
func (s *Scheduler) requeueResourceBindings(selector labels.Selector, event string) error {
|
||||
referenceBindings, err := s.bindingLister.List(selector)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to list ResourceBinding by selector: %s, error: %v", selector.String(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, binding := range referenceBindings {
|
||||
s.onResourceBindingRequeue(binding, event)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) onClusterPropagationPolicyUpdate(old, cur interface{}) {
|
||||
oldClusterPropagationPolicy := old.(*policyv1alpha1.ClusterPropagationPolicy)
|
||||
curClusterPropagationPolicy := cur.(*policyv1alpha1.ClusterPropagationPolicy)
|
||||
if equality.Semantic.DeepEqual(oldClusterPropagationPolicy.Spec.Placement, curClusterPropagationPolicy.Spec.Placement) {
|
||||
klog.V(2).Infof("Ignore ClusterPropagationPolicy(%s) which placement unchanged.", oldClusterPropagationPolicy.Name)
|
||||
return
|
||||
}
|
||||
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
policyv1alpha1.ClusterPropagationPolicyLabel: oldClusterPropagationPolicy.Name,
|
||||
})
|
||||
|
||||
err := s.requeueClusterResourceBindings(selector, metrics.PolicyChanged)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to requeue ClusterResourceBinding, error: %v", err)
|
||||
}
|
||||
|
||||
err = s.requeueResourceBindings(selector, metrics.PolicyChanged)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to requeue ResourceBinding, error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scheduler) addCluster(obj interface{}) {
|
||||
cluster, ok := obj.(*clusterv1alpha1.Cluster)
|
||||
if !ok {
|
||||
|
|
@ -273,20 +167,19 @@ func (s *Scheduler) updateCluster(oldObj, newObj interface{}) {
|
|||
case !equality.Semantic.DeepEqual(oldCluster.Labels, newCluster.Labels):
|
||||
fallthrough
|
||||
case !equality.Semantic.DeepEqual(oldCluster.Spec, newCluster.Spec):
|
||||
s.enqueueAffectedPolicy(oldCluster, newCluster)
|
||||
s.enqueueAffectedClusterPolicy(oldCluster, newCluster)
|
||||
s.enqueueAffectedBindings(oldCluster, newCluster)
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueAffectedPolicy find all propagation policies related to the cluster and reschedule the RBs
|
||||
func (s *Scheduler) enqueueAffectedPolicy(oldCluster, newCluster *clusterv1alpha1.Cluster) {
|
||||
policies, _ := s.policyLister.List(labels.Everything())
|
||||
for _, policy := range policies {
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
policyv1alpha1.PropagationPolicyNamespaceLabel: policy.Namespace,
|
||||
policyv1alpha1.PropagationPolicyNameLabel: policy.Name,
|
||||
})
|
||||
affinity := policy.Spec.Placement.ClusterAffinity
|
||||
// enqueueAffectedBinding find all RBs/CRBs related to the cluster and reschedule them
|
||||
func (s *Scheduler) enqueueAffectedBindings(oldCluster, newCluster *clusterv1alpha1.Cluster) {
|
||||
bindings, _ := s.bindingLister.List(labels.Everything())
|
||||
for _, binding := range bindings {
|
||||
placementPtr := binding.Spec.Placement
|
||||
if placementPtr == nil {
|
||||
continue
|
||||
}
|
||||
affinity := placementPtr.ClusterAffinity
|
||||
switch {
|
||||
case affinity == nil:
|
||||
// If no clusters specified, add it to the queue
|
||||
|
|
@ -296,22 +189,17 @@ func (s *Scheduler) enqueueAffectedPolicy(oldCluster, newCluster *clusterv1alpha
|
|||
fallthrough
|
||||
case util.ClusterMatches(oldCluster, *affinity):
|
||||
// If the old cluster manifest match the affinity, add it to the queue, trigger rescheduling
|
||||
err := s.requeueResourceBindings(selector, metrics.ClusterChanged)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to requeue ResourceBinding, error: %v", err)
|
||||
}
|
||||
s.onResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// enqueueAffectedClusterPolicy find all cluster propagation policies related to the cluster and reschedule the RBs/CRBs
|
||||
func (s *Scheduler) enqueueAffectedClusterPolicy(oldCluster, newCluster *clusterv1alpha1.Cluster) {
|
||||
clusterPolicies, _ := s.clusterPolicyLister.List(labels.Everything())
|
||||
for _, policy := range clusterPolicies {
|
||||
selector := labels.SelectorFromSet(labels.Set{
|
||||
policyv1alpha1.ClusterPropagationPolicyLabel: policy.Name,
|
||||
})
|
||||
affinity := policy.Spec.Placement.ClusterAffinity
|
||||
clusterBindings, _ := s.clusterBindingLister.List(labels.Everything())
|
||||
for _, binding := range clusterBindings {
|
||||
placementPtr := binding.Spec.Placement
|
||||
if placementPtr == nil {
|
||||
continue
|
||||
}
|
||||
affinity := placementPtr.ClusterAffinity
|
||||
switch {
|
||||
case affinity == nil:
|
||||
// If no clusters specified, add it to the queue
|
||||
|
|
@ -321,14 +209,7 @@ func (s *Scheduler) enqueueAffectedClusterPolicy(oldCluster, newCluster *cluster
|
|||
fallthrough
|
||||
case util.ClusterMatches(oldCluster, *affinity):
|
||||
// If the old cluster manifest match the affinity, add it to the queue, trigger rescheduling
|
||||
err := s.requeueClusterResourceBindings(selector, metrics.ClusterChanged)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to requeue ClusterResourceBinding, error: %v", err)
|
||||
}
|
||||
err = s.requeueResourceBindings(selector, metrics.ClusterChanged)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to requeue ResourceBinding, error: %v", err)
|
||||
}
|
||||
s.onClusterResourceBindingRequeue(binding, metrics.ClusterChanged)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ import (
|
|||
karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned"
|
||||
informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions"
|
||||
clusterlister "github.com/karmada-io/karmada/pkg/generated/listers/cluster/v1alpha1"
|
||||
policylister "github.com/karmada-io/karmada/pkg/generated/listers/policy/v1alpha1"
|
||||
worklister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2"
|
||||
schedulercache "github.com/karmada-io/karmada/pkg/scheduler/cache"
|
||||
"github.com/karmada-io/karmada/pkg/scheduler/core"
|
||||
|
|
@ -72,9 +71,7 @@ type Scheduler struct {
|
|||
KarmadaClient karmadaclientset.Interface
|
||||
KubeClient kubernetes.Interface
|
||||
bindingLister worklister.ResourceBindingLister
|
||||
policyLister policylister.PropagationPolicyLister
|
||||
clusterBindingLister worklister.ClusterResourceBindingLister
|
||||
clusterPolicyLister policylister.ClusterPropagationPolicyLister
|
||||
clusterLister clusterlister.ClusterLister
|
||||
informerFactory informerfactory.SharedInformerFactory
|
||||
|
||||
|
|
@ -189,9 +186,7 @@ func WithOutOfTreeRegistry(registry runtime.Registry) Option {
|
|||
func NewScheduler(dynamicClient dynamic.Interface, karmadaClient karmadaclientset.Interface, kubeClient kubernetes.Interface, opts ...Option) (*Scheduler, error) {
|
||||
factory := informerfactory.NewSharedInformerFactory(karmadaClient, 0)
|
||||
bindingLister := factory.Work().V1alpha2().ResourceBindings().Lister()
|
||||
policyLister := factory.Policy().V1alpha1().PropagationPolicies().Lister()
|
||||
clusterBindingLister := factory.Work().V1alpha2().ClusterResourceBindings().Lister()
|
||||
clusterPolicyLister := factory.Policy().V1alpha1().ClusterPropagationPolicies().Lister()
|
||||
clusterLister := factory.Cluster().V1alpha1().Clusters().Lister()
|
||||
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "scheduler-queue")
|
||||
schedulerCache := schedulercache.NewCache(clusterLister)
|
||||
|
|
@ -216,9 +211,7 @@ func NewScheduler(dynamicClient dynamic.Interface, karmadaClient karmadaclientse
|
|||
KarmadaClient: karmadaClient,
|
||||
KubeClient: kubeClient,
|
||||
bindingLister: bindingLister,
|
||||
policyLister: policyLister,
|
||||
clusterBindingLister: clusterBindingLister,
|
||||
clusterPolicyLister: clusterPolicyLister,
|
||||
clusterLister: clusterLister,
|
||||
informerFactory: factory,
|
||||
queue: queue,
|
||||
|
|
@ -275,65 +268,43 @@ func (s *Scheduler) worker() {
|
|||
|
||||
func (s *Scheduler) getPlacement(resourceBinding *workv1alpha2.ResourceBinding) (policyv1alpha1.Placement, string, error) {
|
||||
var placement policyv1alpha1.Placement
|
||||
var clusterPolicyName string
|
||||
var policyName string
|
||||
var policyNamespace string
|
||||
var err error
|
||||
if clusterPolicyName = util.GetLabelValue(resourceBinding.Labels, policyv1alpha1.ClusterPropagationPolicyLabel); clusterPolicyName != "" {
|
||||
var clusterPolicy *policyv1alpha1.ClusterPropagationPolicy
|
||||
clusterPolicy, err = s.clusterPolicyLister.Get(clusterPolicyName)
|
||||
if err != nil {
|
||||
return placement, "", err
|
||||
}
|
||||
|
||||
placement = clusterPolicy.Spec.Placement
|
||||
}
|
||||
|
||||
if policyName = util.GetLabelValue(resourceBinding.Labels, policyv1alpha1.PropagationPolicyNameLabel); policyName != "" {
|
||||
policyNamespace = util.GetLabelValue(resourceBinding.Labels, policyv1alpha1.PropagationPolicyNamespaceLabel)
|
||||
var policy *policyv1alpha1.PropagationPolicy
|
||||
policy, err = s.policyLister.PropagationPolicies(policyNamespace).Get(policyName)
|
||||
if err != nil {
|
||||
return placement, "", err
|
||||
}
|
||||
|
||||
placement = policy.Spec.Placement
|
||||
placementPtr := resourceBinding.Spec.Placement
|
||||
if placementPtr == nil {
|
||||
err = fmt.Errorf("failed to get placement from resourceBinding(%s/%s)", resourceBinding.Namespace, resourceBinding.Name)
|
||||
klog.Error(err)
|
||||
return placement, "", err
|
||||
}
|
||||
|
||||
placement = *placementPtr
|
||||
var placementBytes []byte
|
||||
placementBytes, err = json.Marshal(placement)
|
||||
if err != nil {
|
||||
return placement, "", err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if clusterPolicyName != "" {
|
||||
klog.Errorf("Failed to get placement of clusterPropagationPolicy %s, error: %v", clusterPolicyName, err)
|
||||
} else {
|
||||
klog.Errorf("Failed to get placement of propagationPolicy %s/%s, error: %v", policyNamespace, policyName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return placement, string(placementBytes), nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) getClusterPlacement(crb *workv1alpha2.ClusterResourceBinding) (policyv1alpha1.Placement, string, error) {
|
||||
var placement policyv1alpha1.Placement
|
||||
policyName := util.GetLabelValue(crb.Labels, policyv1alpha1.ClusterPropagationPolicyLabel)
|
||||
var err error
|
||||
|
||||
policy, err := s.clusterPolicyLister.Get(policyName)
|
||||
placementPtr := crb.Spec.Placement
|
||||
if placementPtr == nil {
|
||||
err = fmt.Errorf("failed to get placement from clusterResourceBinding(%s)", crb.Name)
|
||||
klog.Error(err)
|
||||
return placement, "", err
|
||||
}
|
||||
|
||||
placement = *placementPtr
|
||||
var placementBytes []byte
|
||||
placementBytes, err = json.Marshal(placement)
|
||||
if err != nil {
|
||||
return placement, "", err
|
||||
}
|
||||
|
||||
placement = policy.Spec.Placement
|
||||
placementBytes, err := json.Marshal(placement)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to marshal placement of propagationPolicy %s/%s, error: %v", policy.Namespace, policy.Name, err)
|
||||
return placement, "", err
|
||||
}
|
||||
return placement, string(placementBytes), nil
|
||||
}
|
||||
|
||||
|
|
@ -397,14 +368,14 @@ func (s *Scheduler) doScheduleBinding(namespace, name string) (err error) {
|
|||
if appliedPlacement := util.GetLabelValue(rb.Annotations, util.PolicyPlacementAnnotation); policyPlacementStr != appliedPlacement {
|
||||
// policy placement changed, need schedule
|
||||
klog.Infof("Start to schedule ResourceBinding(%s/%s) as placement changed", namespace, name)
|
||||
err = s.scheduleResourceBinding(rb)
|
||||
err = s.scheduleResourceBinding(rb, policyPlacementStr)
|
||||
metrics.BindingSchedule(string(ReconcileSchedule), utilmetrics.DurationInSeconds(start), err)
|
||||
return err
|
||||
}
|
||||
if policyPlacement.ReplicaScheduling != nil && util.IsBindingReplicasChanged(&rb.Spec, policyPlacement.ReplicaScheduling) {
|
||||
// binding replicas changed, need reschedule
|
||||
klog.Infof("Reschedule ResourceBinding(%s/%s) as replicas scaled down or scaled up", namespace, name)
|
||||
err = s.scheduleResourceBinding(rb)
|
||||
err = s.scheduleResourceBinding(rb, policyPlacementStr)
|
||||
metrics.BindingSchedule(string(ScaleSchedule), utilmetrics.DurationInSeconds(start), err)
|
||||
return err
|
||||
}
|
||||
|
|
@ -414,7 +385,7 @@ func (s *Scheduler) doScheduleBinding(namespace, name string) (err error) {
|
|||
// Duplicated resources should always be scheduled. Note: non-workload is considered as duplicated
|
||||
// even if scheduling type is divided.
|
||||
klog.V(3).Infof("Start to schedule ResourceBinding(%s/%s) as scheduling type is duplicated", namespace, name)
|
||||
err = s.scheduleResourceBinding(rb)
|
||||
err = s.scheduleResourceBinding(rb, policyPlacementStr)
|
||||
metrics.BindingSchedule(string(ReconcileSchedule), utilmetrics.DurationInSeconds(start), err)
|
||||
return err
|
||||
}
|
||||
|
|
@ -459,14 +430,14 @@ func (s *Scheduler) doScheduleClusterBinding(name string) (err error) {
|
|||
if appliedPlacement := util.GetLabelValue(crb.Annotations, util.PolicyPlacementAnnotation); policyPlacementStr != appliedPlacement {
|
||||
// policy placement changed, need schedule
|
||||
klog.Infof("Start to schedule ClusterResourceBinding(%s) as placement changed", name)
|
||||
err = s.scheduleClusterResourceBinding(crb)
|
||||
err = s.scheduleClusterResourceBinding(crb, policyPlacementStr)
|
||||
metrics.BindingSchedule(string(ReconcileSchedule), utilmetrics.DurationInSeconds(start), err)
|
||||
return err
|
||||
}
|
||||
if policyPlacement.ReplicaScheduling != nil && util.IsBindingReplicasChanged(&crb.Spec, policyPlacement.ReplicaScheduling) {
|
||||
// binding replicas changed, need reschedule
|
||||
klog.Infof("Reschedule ClusterResourceBinding(%s) as replicas scaled down or scaled up", name)
|
||||
err = s.scheduleClusterResourceBinding(crb)
|
||||
err = s.scheduleClusterResourceBinding(crb, policyPlacementStr)
|
||||
metrics.BindingSchedule(string(ScaleSchedule), utilmetrics.DurationInSeconds(start), err)
|
||||
return err
|
||||
}
|
||||
|
|
@ -476,7 +447,7 @@ func (s *Scheduler) doScheduleClusterBinding(name string) (err error) {
|
|||
// Duplicated resources should always be scheduled. Note: non-workload is considered as duplicated
|
||||
// even if scheduling type is divided.
|
||||
klog.V(3).Infof("Start to schedule ClusterResourceBinding(%s) as scheduling type is duplicated", name)
|
||||
err = s.scheduleClusterResourceBinding(crb)
|
||||
err = s.scheduleClusterResourceBinding(crb, policyPlacementStr)
|
||||
metrics.BindingSchedule(string(ReconcileSchedule), utilmetrics.DurationInSeconds(start), err)
|
||||
return err
|
||||
}
|
||||
|
|
@ -485,16 +456,11 @@ func (s *Scheduler) doScheduleClusterBinding(name string) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) scheduleResourceBinding(resourceBinding *workv1alpha2.ResourceBinding) (err error) {
|
||||
func (s *Scheduler) scheduleResourceBinding(resourceBinding *workv1alpha2.ResourceBinding, placementStr string) (err error) {
|
||||
klog.V(4).InfoS("Begin scheduling resource binding", "resourceBinding", klog.KObj(resourceBinding))
|
||||
defer klog.V(4).InfoS("End scheduling resource binding", "resourceBinding", klog.KObj(resourceBinding))
|
||||
|
||||
placement, placementStr, err := s.getPlacement(resourceBinding)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scheduleResult, err := s.Algorithm.Schedule(context.TODO(), &placement, &resourceBinding.Spec, &core.ScheduleAlgorithmOption{EnableEmptyWorkloadPropagation: s.enableEmptyWorkloadPropagation})
|
||||
scheduleResult, err := s.Algorithm.Schedule(context.TODO(), &resourceBinding.Spec, &core.ScheduleAlgorithmOption{EnableEmptyWorkloadPropagation: s.enableEmptyWorkloadPropagation})
|
||||
var noClusterFit *framework.FitError
|
||||
// in case of no cluster fit, can not return but continue to patch(cleanup) the result.
|
||||
if err != nil && !errors.As(err, &noClusterFit) {
|
||||
|
|
@ -535,23 +501,11 @@ func (s *Scheduler) patchScheduleResultForResourceBinding(oldBinding *workv1alph
|
|||
return err
|
||||
}
|
||||
|
||||
func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv1alpha2.ClusterResourceBinding) (err error) {
|
||||
func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv1alpha2.ClusterResourceBinding, placementStr string) (err error) {
|
||||
klog.V(4).InfoS("Begin scheduling cluster resource binding", "clusterResourceBinding", klog.KObj(clusterResourceBinding))
|
||||
defer klog.V(4).InfoS("End scheduling cluster resource binding", "clusterResourceBinding", klog.KObj(clusterResourceBinding))
|
||||
|
||||
clusterPolicyName := util.GetLabelValue(clusterResourceBinding.Labels, policyv1alpha1.ClusterPropagationPolicyLabel)
|
||||
policy, err := s.clusterPolicyLister.Get(clusterPolicyName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
placement, err := json.Marshal(policy.Spec.Placement)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed to marshal placement of clusterPropagationPolicy %s, error: %v", policy.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
scheduleResult, err := s.Algorithm.Schedule(context.TODO(), &policy.Spec.Placement, &clusterResourceBinding.Spec, &core.ScheduleAlgorithmOption{EnableEmptyWorkloadPropagation: s.enableEmptyWorkloadPropagation})
|
||||
scheduleResult, err := s.Algorithm.Schedule(context.TODO(), &clusterResourceBinding.Spec, &core.ScheduleAlgorithmOption{EnableEmptyWorkloadPropagation: s.enableEmptyWorkloadPropagation})
|
||||
var noClusterFit *framework.FitError
|
||||
// in case of no cluster fit, can not return but continue to patch(cleanup) the result.
|
||||
if err != nil && !errors.As(err, &noClusterFit) {
|
||||
|
|
@ -560,7 +514,7 @@ func (s *Scheduler) scheduleClusterResourceBinding(clusterResourceBinding *workv
|
|||
}
|
||||
|
||||
klog.V(4).Infof("ClusterResourceBinding %s scheduled to clusters %v", clusterResourceBinding.Name, scheduleResult.SuggestedClusters)
|
||||
scheduleErr := s.patchScheduleResultForClusterResourceBinding(clusterResourceBinding, string(placement), scheduleResult.SuggestedClusters)
|
||||
scheduleErr := s.patchScheduleResultForClusterResourceBinding(clusterResourceBinding, placementStr, scheduleResult.SuggestedClusters)
|
||||
return utilerrors.NewAggregate([]error{err, scheduleErr})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -325,15 +325,16 @@ var _ = ginkgo.Describe("[AdvancedClusterPropagation] propagation testing", func
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Edit ClusterPropagationPolicy PropagateDeps", func() {
|
||||
ginkgo.Context("Edit ClusterPropagationPolicy fields other than resourceSelector", func() {
|
||||
|
||||
ginkgo.When("namespace scope resource", func() {
|
||||
var policy *policyv1alpha1.ClusterPropagationPolicy
|
||||
var deployment *appsv1.Deployment
|
||||
var targetMember string
|
||||
var targetMember, updatedMember string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
targetMember = framework.ClusterNames()[0]
|
||||
updatedMember = framework.ClusterNames()[1]
|
||||
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
|
||||
|
||||
deployment = testhelper.NewDeployment(testNamespace, policyName+"01")
|
||||
|
|
@ -392,6 +393,83 @@ var _ = ginkgo.Describe("[AdvancedClusterPropagation] propagation testing", func
|
|||
return bindings.Items[0].Spec.PropagateDeps == true
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
})
|
||||
|
||||
ginkgo.It("update policy placement", func() {
|
||||
updatedPlacement := policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{updatedMember},
|
||||
}}
|
||||
patch := []map[string]interface{}{
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/spec/placement",
|
||||
"value": updatedPlacement,
|
||||
},
|
||||
}
|
||||
framework.PatchClusterPropagationPolicy(karmadaClient, policy.Name, patch, types.JSONPatchType)
|
||||
framework.WaitDeploymentDisappearOnCluster(targetMember, deployment.Namespace, deployment.Name)
|
||||
framework.WaitDeploymentPresentOnClusterFitWith(updatedMember, deployment.Namespace, deployment.Name,
|
||||
func(deployment *appsv1.Deployment) bool { return true })
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.When("cluster scope resource", func() {
|
||||
var policy *policyv1alpha1.ClusterPropagationPolicy
|
||||
var clusterRole *rbacv1.ClusterRole
|
||||
var targetMember, updatedMember string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
targetMember = framework.ClusterNames()[0]
|
||||
updatedMember = framework.ClusterNames()[1]
|
||||
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
|
||||
|
||||
clusterRole = testhelper.NewClusterRole(fmt.Sprintf("system:test-%s-01", policyName), nil)
|
||||
|
||||
policy = testhelper.NewClusterPropagationPolicy(policyName, []policyv1alpha1.ResourceSelector{
|
||||
{
|
||||
APIVersion: clusterRole.APIVersion,
|
||||
Kind: clusterRole.Kind,
|
||||
Name: clusterRole.Name,
|
||||
}}, policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{targetMember},
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.CreateClusterPropagationPolicy(karmadaClient, policy)
|
||||
framework.CreateClusterRole(kubeClient, clusterRole)
|
||||
ginkgo.DeferCleanup(func() {
|
||||
framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name)
|
||||
framework.RemoveClusterRole(kubeClient, clusterRole.Name)
|
||||
})
|
||||
|
||||
framework.WaitClusterRolePresentOnClusterFitWith(targetMember, clusterRole.Name,
|
||||
func(role *rbacv1.ClusterRole) bool {
|
||||
return true
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("update policy placement", func() {
|
||||
updatedPlacement := policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{updatedMember},
|
||||
}}
|
||||
patch := []map[string]interface{}{
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/spec/placement",
|
||||
"value": updatedPlacement,
|
||||
},
|
||||
}
|
||||
framework.PatchClusterPropagationPolicy(karmadaClient, policy.Name, patch, types.JSONPatchType)
|
||||
framework.WaitClusterRoleDisappearOnCluster(targetMember, clusterRole.Name)
|
||||
framework.WaitClusterRolePresentOnClusterFitWith(updatedMember, clusterRole.Name,
|
||||
func(role *rbacv1.ClusterRole) bool {
|
||||
return true
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
|
|
@ -939,13 +939,14 @@ var _ = ginkgo.Describe("[AdvancedPropagation] propagation testing", func() {
|
|||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Edit PropagationPolicy PropagateDeps", func() {
|
||||
ginkgo.Context("Edit PropagationPolicy fields other than resourceSelectors", func() {
|
||||
var policy *policyv1alpha1.PropagationPolicy
|
||||
var deployment *appsv1.Deployment
|
||||
var targetMember string
|
||||
var targetMember, updatedMember string
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
targetMember = framework.ClusterNames()[0]
|
||||
updatedMember = framework.ClusterNames()[1]
|
||||
policyNamespace := testNamespace
|
||||
policyName := deploymentNamePrefix + rand.String(RandomStrLength)
|
||||
|
||||
|
|
@ -1007,5 +1008,23 @@ var _ = ginkgo.Describe("[AdvancedPropagation] propagation testing", func() {
|
|||
return bindings.Items[0].Spec.PropagateDeps == true
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
})
|
||||
|
||||
ginkgo.It("update policy placement", func() {
|
||||
updatedPlacement := policyv1alpha1.Placement{
|
||||
ClusterAffinity: &policyv1alpha1.ClusterAffinity{
|
||||
ClusterNames: []string{updatedMember},
|
||||
}}
|
||||
patch := []map[string]interface{}{
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/spec/placement",
|
||||
"value": updatedPlacement,
|
||||
},
|
||||
}
|
||||
framework.PatchPropagationPolicy(karmadaClient, policy.Namespace, policy.Name, patch, types.JSONPatchType)
|
||||
framework.WaitDeploymentDisappearOnCluster(targetMember, deployment.Namespace, deployment.Name)
|
||||
framework.WaitDeploymentPresentOnClusterFitWith(updatedMember, deployment.Namespace, deployment.Name,
|
||||
func(deployment *appsv1.Deployment) bool { return true })
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
|
|||
Loading…
Reference in New Issue