Compare commits

...

10 Commits

Author SHA1 Message Date
karmada-bot 2a2d22adf4
Merge pull request #30 from RainbowMango/pr_sync_140
Sync APIs from karmada repo based on v1.14.0
2025-06-23 09:35:00 +08:00
RainbowMango 518c40cffc Sync APIs from karmada repo based on v1.14.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-06-20 12:36:27 +08:00
karmada-bot 4ff33322f2
Merge pull request #28 from RainbowMango/pr_add_code_of_conduct
Add Karmada code of conduct
2025-03-10 15:59:31 +08:00
RainbowMango 86df91cac5 Add Karmada code of conduct
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-03-10 15:57:31 +08:00
karmada-bot 23fd016454
Merge pull request #26 from RainbowMango/pr_sync_130
Sync APIs from karmada repo based on v1.13.0
2025-03-07 17:08:29 +08:00
RainbowMango 2dd201275c Sync APIs from karmada repo based on v1.13.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-03-07 15:43:28 +08:00
karmada-bot 0a396ea23e
Merge pull request #25 from RainbowMango/pr_sync_120
Sync APIs from karmada repo based on v1.12.0
2025-03-06 14:36:28 +08:00
RainbowMango bd61308fb9 Sync APIs from karmada repo based on v1.12.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2025-03-04 20:16:48 +08:00
karmada-bot 909667dcc9
Merge pull request #24 from RainbowMango/pr_sync_111
Sync APIs from karmada repo based on v1.11.0
2024-10-12 11:01:22 +08:00
RainbowMango 76d6ebe8b2 Sync APIs from karmada repo based on v1.11.0
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
2024-10-11 11:46:54 +08:00
41 changed files with 1590 additions and 1072 deletions

3
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,3 @@
# Karmada Community Code of Conduct
Please refer to our [Karmada Community Code of Conduct](https://github.com/karmada-io/community/blob/main/CODE_OF_CONDUCT.md).

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -1,9 +1,12 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

View File

@ -1,42 +1,42 @@
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// FederatedHPAKind is the kind of FederatedHPA in group autoscaling.karmada.io
FederatedHPAKind = "FederatedHPA"
// QuerySourceAnnotationKey is the annotation used in karmada-metrics-adapter to
// record the query source cluster
QuerySourceAnnotationKey = "resource.karmada.io/query-from-cluster"
// ResourceSingularFederatedHPA is singular name of FederatedHPA.
ResourceSingularFederatedHPA = "federatedhpa"
// ResourcePluralFederatedHPA is plural name of FederatedHPA.
ResourcePluralFederatedHPA = "federatedhpas"
// ResourceNamespaceScopedFederatedHPA is the scope of the FederatedHPA
ResourceNamespaceScopedFederatedHPA = true
// ResourceKindCronFederatedHPA is kind name of CronFederatedHPA.
ResourceKindCronFederatedHPA = "CronFederatedHPA"
// ResourceSingularCronFederatedHPA is singular name of CronFederatedHPA.
ResourceSingularCronFederatedHPA = "cronfederatedhpa"
// ResourcePluralCronFederatedHPA is plural name of CronFederatedHPA.
ResourcePluralCronFederatedHPA = "cronfederatedhpas"
// ResourceNamespaceScopedCronFederatedHPA is the scope of the CronFederatedHPA
ResourceNamespaceScopedCronFederatedHPA = true
)
/*
Copyright 2023 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
const (
// FederatedHPAKind is the kind of FederatedHPA in group autoscaling.karmada.io
FederatedHPAKind = "FederatedHPA"
// QuerySourceAnnotationKey is the annotation used in karmada-metrics-adapter to
// record the query source cluster
QuerySourceAnnotationKey = "resource.karmada.io/query-from-cluster"
// ResourceSingularFederatedHPA is singular name of FederatedHPA.
ResourceSingularFederatedHPA = "federatedhpa"
// ResourcePluralFederatedHPA is plural name of FederatedHPA.
ResourcePluralFederatedHPA = "federatedhpas"
// ResourceNamespaceScopedFederatedHPA is the scope of the FederatedHPA
ResourceNamespaceScopedFederatedHPA = true
// ResourceKindCronFederatedHPA is kind name of CronFederatedHPA.
ResourceKindCronFederatedHPA = "CronFederatedHPA"
// ResourceSingularCronFederatedHPA is singular name of CronFederatedHPA.
ResourceSingularCronFederatedHPA = "cronfederatedhpa"
// ResourcePluralCronFederatedHPA is plural name of CronFederatedHPA.
ResourcePluralCronFederatedHPA = "cronfederatedhpas"
// ResourceNamespaceScopedCronFederatedHPA is the scope of the CronFederatedHPA
ResourceNamespaceScopedCronFederatedHPA = true
)

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -18,6 +18,7 @@ package install
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/karmada-io/api/cluster"
@ -27,6 +28,6 @@ import (
// Install registers the API group and adds types to a scheme.
func Install(scheme *runtime.Scheme) {
utilruntime.Must(cluster.AddToScheme(scheme))
utilruntime.Must(clusterv1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(clusterv1alpha1.SchemeGroupVersion))
utilruntime.Must(clusterv1alpha1.Install(scheme))
utilruntime.Must(scheme.SetVersionPriority(schema.GroupVersion{Group: clusterv1alpha1.GroupVersion.Group, Version: clusterv1alpha1.GroupVersion.Version}))
}

View File

@ -1,368 +0,0 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mutation
import (
"fmt"
"math"
"reflect"
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
clusterapis "github.com/karmada-io/api/cluster"
)
func TestMutateCluster(t *testing.T) {
type args struct {
cluster *clusterapis.Cluster
}
tests := []struct {
name string
args args
fun func(args) error
}{
{
name: "test mutate cluster Taints",
args: args{
cluster: &clusterapis.Cluster{
Spec: clusterapis.ClusterSpec{
Taints: []corev1.Taint{
{
Key: "foo",
Value: "abc",
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "bar",
Effect: corev1.TaintEffectNoExecute,
}}}},
},
fun: func(data args) error {
for i := range data.cluster.Spec.Taints {
if data.cluster.Spec.Taints[i].Effect == corev1.TaintEffectNoExecute && data.cluster.Spec.Taints[i].TimeAdded == nil {
return fmt.Errorf("failed to mutate cluster, taints TimeAdded should not be nil")
}
}
return nil
},
},
{
name: "test mutate cluster Zone",
args: args{
cluster: &clusterapis.Cluster{
Spec: clusterapis.ClusterSpec{
Zone: "zone1",
},
},
},
fun: func(data args) error {
if data.cluster.Spec.Zone != "" && len(data.cluster.Spec.Zones) == 0 {
return fmt.Errorf("failed to mutate cluster, zones should not be nil")
}
return nil
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
MutateCluster(tt.args.cluster)
if err := tt.fun(tt.args); err != nil {
t.Error(err)
}
})
}
}
func TestStandardizeClusterResourceModels(t *testing.T) {
testCases := map[string]struct {
models []clusterapis.ResourceModel
expectedModels []clusterapis.ResourceModel
}{
"sort models": {
models: []clusterapis.ResourceModel{
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
"start with 0": {
models: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
"end with MaxInt64": {
models: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
},
},
},
expectedModels: []clusterapis.ResourceModel{
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
},
},
},
},
}
for name, testCase := range testCases {
StandardizeClusterResourceModels(testCase.models)
if !reflect.DeepEqual(testCase.models, testCase.expectedModels) {
t.Errorf("expected sorted resource models for %q, but it did not work", name)
return
}
}
}
func TestSetDefaultClusterResourceModels(t *testing.T) {
type args struct {
cluster *clusterapis.Cluster
}
tests := []struct {
name string
args args
wantModels []clusterapis.ResourceModel
}{
{
name: "test set default Cluster",
args: args{
cluster: &clusterapis.Cluster{},
},
wantModels: []clusterapis.ResourceModel{
{
Grade: 0,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(0, resource.DecimalSI),
Max: *resource.NewQuantity(1, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(0, resource.BinarySI),
Max: *resource.NewQuantity(4*GB, resource.BinarySI),
},
},
},
{
Grade: 1,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(1, resource.DecimalSI),
Max: *resource.NewQuantity(2, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(4*GB, resource.BinarySI),
Max: *resource.NewQuantity(16*GB, resource.BinarySI),
},
},
},
{
Grade: 2,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(2, resource.DecimalSI),
Max: *resource.NewQuantity(4, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(16*GB, resource.BinarySI),
Max: *resource.NewQuantity(32*GB, resource.BinarySI),
},
},
},
{
Grade: 3,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(4, resource.DecimalSI),
Max: *resource.NewQuantity(8, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(32*GB, resource.BinarySI),
Max: *resource.NewQuantity(64*GB, resource.BinarySI),
},
},
},
{
Grade: 4,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(8, resource.DecimalSI),
Max: *resource.NewQuantity(16, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(64*GB, resource.BinarySI),
Max: *resource.NewQuantity(128*GB, resource.BinarySI),
},
},
},
{
Grade: 5,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(16, resource.DecimalSI),
Max: *resource.NewQuantity(32, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(128*GB, resource.BinarySI),
Max: *resource.NewQuantity(256*GB, resource.BinarySI),
},
},
},
{
Grade: 6,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(32, resource.DecimalSI),
Max: *resource.NewQuantity(64, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(256*GB, resource.BinarySI),
Max: *resource.NewQuantity(512*GB, resource.BinarySI),
},
},
},
{
Grade: 7,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(64, resource.DecimalSI),
Max: *resource.NewQuantity(128, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(512*GB, resource.BinarySI),
Max: *resource.NewQuantity(1024*GB, resource.BinarySI),
},
},
},
{
Grade: 8,
Ranges: []clusterapis.ResourceModelRange{
{
Name: corev1.ResourceCPU,
Min: *resource.NewQuantity(128, resource.DecimalSI),
Max: *resource.NewQuantity(math.MaxInt64, resource.DecimalSI),
},
{
Name: corev1.ResourceMemory,
Min: *resource.NewQuantity(1024*GB, resource.BinarySI),
Max: *resource.NewQuantity(math.MaxInt64, resource.BinarySI),
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(*testing.T) {
SetDefaultClusterResourceModels(tt.args.cluster)
})
if !reflect.DeepEqual(tt.args.cluster.Spec.ResourceModels, tt.wantModels) {
t.Errorf("SetDefaultClusterResourceModels expected resourceModels %+v, bud get %+v", tt.wantModels, tt.args.cluster.Spec.ResourceModels)
return
}
}
}

View File

@ -28,7 +28,7 @@ import (
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Cluster represents the desire state and status of a member cluster.
// Cluster represents the desired state and status of a member cluster.
type Cluster struct {
metav1.TypeMeta
metav1.ObjectMeta
@ -44,8 +44,8 @@ type Cluster struct {
// ClusterSpec defines the desired state of a member cluster.
type ClusterSpec struct {
// ID is the unique identifier for the cluster.
// It is different from the object uid(.metadata.uid) and typically collected automatically
// from member cluster during the progress of registration.
// It is different from the object uid(.metadata.uid) and is typically collected automatically
// from each member cluster during the process of registration.
//
// The value is collected in order:
// 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by
@ -63,7 +63,7 @@ type ClusterSpec struct {
// +kubebuilder:validation:Maxlength=128000
ID string `json:"id,omitempty"`
// SyncMode describes how a cluster sync resources from karmada control plane.
// SyncMode describes how a cluster syncs resources from karmada control plane.
// +required
SyncMode ClusterSyncMode
@ -72,14 +72,14 @@ type ClusterSpec struct {
// +optional
APIEndpoint string
// SecretRef represents the secret contains mandatory credentials to access the member cluster.
// SecretRef represents the secret that contains mandatory credentials to access the member cluster.
// The secret should hold credentials as follows:
// - secret.data.token
// - secret.data.caBundle
// +optional
SecretRef *LocalSecretReference
// ImpersonatorSecretRef represents the secret contains the token of impersonator.
// ImpersonatorSecretRef represents the secret that contains the token of impersonator.
// The secret should hold credentials as follows:
// - secret.data.token
// +optional
@ -94,12 +94,12 @@ type ClusterSpec struct {
// ProxyURL is the proxy URL for the cluster.
// If not empty, the karmada control plane will use this proxy to talk to the cluster.
// More details please refer to: https://github.com/kubernetes/client-go/issues/351
// For more details please refer to: https://github.com/kubernetes/client-go/issues/351
// +optional
ProxyURL string
// ProxyHeader is the HTTP header required by proxy server.
// The key in the key-value pair is HTTP header key and value is the associated header payloads.
// The key in the key-value pair is HTTP header key and the value is the associated header payloads.
// For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3').
// +optional
ProxyHeader map[string]string
@ -108,12 +108,12 @@ type ClusterSpec struct {
// +optional
Provider string
// Region represents the region of the member cluster locate in.
// Region represents the region in which the member cluster is located.
// +optional
Region string
// Zone represents the zone of the member cluster locate in.
// Deprecated: This filed was never been used by Karmada, and it will not be
// Zone represents the zone in which the member cluster is located.
// Deprecated: This field was never used by Karmada, and it will not be
// removed from v1alpha1 for backward compatibility, use Zones instead.
// +optional
Zone string
@ -126,7 +126,7 @@ type ClusterSpec struct {
// +optional
Zones []string `json:"zones,omitempty"`
// Taints attached to the member cluster.
// Taints are attached to the member cluster.
// Taints on the cluster have the "effect" on
// any resource that does not tolerate the Taint.
// +optional
@ -204,8 +204,8 @@ type ResourceModel struct {
// ResourceModelRange describes the detail of each modeling quota that ranges from min to max.
// Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive.
// E.g. in an interval, min = 2, max =10 is set, which means the interval [2,10).
// This rule ensure that all intervals have the same meaning. If the last interval is infinite,
// E.g. in an interval, min = 2, max = 10 is set, which means the interval [2,10).
// This rule ensures that all intervals have the same meaning. If the last interval is infinite,
// it is definitely unreachable. Therefore, we define the right interval as the open interval.
// For a valid interval, the value on the right is greater than the value on the left,
// in other words, max must be greater than min.
@ -242,13 +242,13 @@ const (
type ClusterSyncMode string
const (
// Push means that the controller on the karmada control plane will in charge of synchronization.
// The controller watches resources change on karmada control plane then pushes them to member cluster.
// Push means that the controller on the karmada control plane will be in charge of synchronization.
// The controller watches resources change on karmada control plane and then pushes them to member cluster.
Push ClusterSyncMode = "Push"
// Pull means that the controller running on the member cluster will in charge of synchronization.
// The controller, as well known as 'agent', watches resources change on karmada control plane then fetches them
// and applies locally on the member cluster.
// Pull means that the controller running on the member cluster will be in charge of synchronization.
// The controller, also known as 'agent', watches resources change on karmada control plane, then fetches them
// and applies them locally on the member cluster.
Pull ClusterSyncMode = "Pull"
)
@ -258,7 +258,7 @@ type LocalSecretReference struct {
// Namespace is the namespace for the resource being referenced.
Namespace string
// Name is the name of resource being referenced.
// Name is the name of the resource being referenced.
Name string
}
@ -266,6 +266,9 @@ type LocalSecretReference struct {
const (
// ClusterConditionReady means the cluster is healthy and ready to accept workloads.
ClusterConditionReady = "Ready"
// ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) are complete.
ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements"
)
// ClusterStatus contains information about the current status of a
@ -275,7 +278,7 @@ type ClusterStatus struct {
// +optional
KubernetesVersion string
// APIEnablements represents the list of APIs installed in the member cluster.
// APIEnablements represents the list of APIs installed on the member cluster.
// +optional
APIEnablements []APIEnablement
@ -366,7 +369,7 @@ type AllocatableModeling struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterList contains a list of member cluster
// ClusterList contains a list of member clusters
type ClusterList struct {
metav1.TypeMeta
metav1.ListMeta

View File

@ -39,7 +39,7 @@ const (
// +kubebuilder:resource:scope="Cluster"
// +kubebuilder:subresource:status
// Cluster represents the desire state and status of a member cluster.
// Cluster represents the desired state and status of a member cluster.
type Cluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
@ -55,8 +55,8 @@ type Cluster struct {
// ClusterSpec defines the desired state of a member cluster.
type ClusterSpec struct {
// ID is the unique identifier for the cluster.
// It is different from the object uid(.metadata.uid) and typically collected automatically
// from member cluster during the progress of registration.
// It is different from the object uid(.metadata.uid) and is typically collected automatically
// from each member cluster during the process of registration.
//
// The value is collected in order:
// 1. If the registering cluster enabled ClusterProperty API and defined the cluster ID by
@ -74,7 +74,7 @@ type ClusterSpec struct {
// +kubebuilder:validation:Maxlength=128000
ID string `json:"id,omitempty"`
// SyncMode describes how a cluster sync resources from karmada control plane.
// SyncMode describes how a cluster syncs resources from karmada control plane.
// +kubebuilder:validation:Enum=Push;Pull
// +required
SyncMode ClusterSyncMode `json:"syncMode"`
@ -84,14 +84,14 @@ type ClusterSpec struct {
// +optional
APIEndpoint string `json:"apiEndpoint,omitempty"`
// SecretRef represents the secret contains mandatory credentials to access the member cluster.
// SecretRef represents the secret that contains mandatory credentials to access the member cluster.
// The secret should hold credentials as follows:
// - secret.data.token
// - secret.data.caBundle
// +optional
SecretRef *LocalSecretReference `json:"secretRef,omitempty"`
// ImpersonatorSecretRef represents the secret contains the token of impersonator.
// ImpersonatorSecretRef represents the secret that contains the token of impersonator.
// The secret should hold credentials as follows:
// - secret.data.token
// +optional
@ -106,12 +106,12 @@ type ClusterSpec struct {
// ProxyURL is the proxy URL for the cluster.
// If not empty, the karmada control plane will use this proxy to talk to the cluster.
// More details please refer to: https://github.com/kubernetes/client-go/issues/351
// For more details please refer to: https://github.com/kubernetes/client-go/issues/351
// +optional
ProxyURL string `json:"proxyURL,omitempty"`
// ProxyHeader is the HTTP header required by proxy server.
// The key in the key-value pair is HTTP header key and value is the associated header payloads.
// The key in the key-value pair is HTTP header key and the value is the associated header payloads.
// For the header with multiple values, the values should be separated by comma(e.g. 'k1': 'v1,v2,v3').
// +optional
ProxyHeader map[string]string `json:"proxyHeader,omitempty"`
@ -120,12 +120,12 @@ type ClusterSpec struct {
// +optional
Provider string `json:"provider,omitempty"`
// Region represents the region of the member cluster locate in.
// Region represents the region in which the member cluster is located.
// +optional
Region string `json:"region,omitempty"`
// Zone represents the zone of the member cluster locate in.
// Deprecated: This filed was never been used by Karmada, and it will not be
// Zone represents the zone in which the member cluster is located.
// Deprecated: This field was never been used by Karmada, and it will not be
// removed from v1alpha1 for backward compatibility, use Zones instead.
// +optional
Zone string `json:"zone,omitempty"`
@ -138,7 +138,7 @@ type ClusterSpec struct {
// +optional
Zones []string `json:"zones,omitempty"`
// Taints attached to the member cluster.
// Taints are attached to the member cluster.
// Taints on the cluster have the "effect" on
// any resource that does not tolerate the Taint.
// +optional
@ -216,8 +216,8 @@ type ResourceModel struct {
// ResourceModelRange describes the detail of each modeling quota that ranges from min to max.
// Please pay attention, by default, the value of min can be inclusive, and the value of max cannot be inclusive.
// E.g. in an interval, min = 2, max =10 is set, which means the interval [2,10).
// This rule ensure that all intervals have the same meaning. If the last interval is infinite,
// E.g. in an interval, min = 2, max = 10 is set, which means the interval [2,10).
// This rule ensures that all intervals have the same meaning. If the last interval is infinite,
// it is definitely unreachable. Therefore, we define the right interval as the open interval.
// For a valid interval, the value on the right is greater than the value on the left,
// in other words, max must be greater than min.
@ -254,13 +254,13 @@ const (
type ClusterSyncMode string
const (
// Push means that the controller on the karmada control plane will in charge of synchronization.
// The controller watches resources change on karmada control plane then pushes them to member cluster.
// Push means that the controller on the karmada control plane will be in charge of synchronization.
// The controller watches resources change on karmada control plane and then pushes them to member cluster.
Push ClusterSyncMode = "Push"
// Pull means that the controller running on the member cluster will in charge of synchronization.
// The controller, as well known as 'agent', watches resources change on karmada control plane then fetches them
// and applies locally on the member cluster.
// Pull means that the controller running on the member cluster will be in charge of synchronization.
// The controller, also known as 'agent', watches resources change on karmada control plane, then fetches them
// and applies them locally on the member cluster.
Pull ClusterSyncMode = "Pull"
)
@ -270,7 +270,7 @@ type LocalSecretReference struct {
// Namespace is the namespace for the resource being referenced.
Namespace string `json:"namespace"`
// Name is the name of resource being referenced.
// Name is the name of the resource being referenced.
Name string `json:"name"`
}
@ -278,6 +278,9 @@ type LocalSecretReference struct {
const (
// ClusterConditionReady means the cluster is healthy and ready to accept workloads.
ClusterConditionReady = "Ready"
// ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) are complete.
ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements"
)
// ClusterStatus contains information about the current status of a
@ -287,7 +290,7 @@ type ClusterStatus struct {
// +optional
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// APIEnablements represents the list of APIs installed in the member cluster.
// APIEnablements represents the list of APIs installed on the member cluster.
// +optional
APIEnablements []APIEnablement `json:"apiEnablements,omitempty"`
@ -375,7 +378,7 @@ type AllocatableModeling struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterList contains a list of member cluster
// ClusterList contains a list of member clusters
type ClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`

View File

@ -27,8 +27,6 @@ const (
// (corresponding to ClusterConditionReady status ConditionUnknown)
// and removed when cluster becomes reachable (ClusterConditionReady status ConditionTrue).
TaintClusterUnreachable = "cluster.karmada.io/unreachable"
// TaintClusterTerminating will be added when cluster is terminating.
TaintClusterTerminating = "cluster.karmada.io/terminating"
// CacheSourceAnnotationKey is the annotation that added to a resource to
// represent which cluster it cached from.

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -36,6 +36,9 @@ const (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=resourceinterpretercustomizations,scope="Cluster",shortName=ric,categories={karmada-io}
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.spec.target.apiVersion`,name="TARGET-API-VERSION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.target.kind`,name="TARGET-KIND",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ResourceInterpreterCustomization describes the configuration of a specific
// resource for Karmada to get the structure.
@ -322,13 +325,13 @@ type DependencyInterpretation struct {
// luaScript: >
// function GetDependencies(desiredObj)
// dependencies = {}
// if desiredObj.spec.serviceAccountName ~= nil and desiredObj.spec.serviceAccountName ~= "default" then
// serviceAccountName = desiredObj.spec.template.spec.serviceAccountName
// if serviceAccountName ~= nil and serviceAccountName ~= "default" then
// dependency = {}
// dependency.apiVersion = "v1"
// dependency.kind = "ServiceAccount"
// dependency.name = desiredObj.spec.serviceAccountName
// dependency.namespace = desiredObj.namespace
// dependencies[1] = {}
// dependency.name = serviceAccountName
// dependency.namespace = desiredObj.metadata.namespace
// dependencies[1] = dependency
// end
// return dependencies

View File

@ -56,6 +56,24 @@ type ResourceInterpreterWebhook struct {
Name string `json:"name"`
// ClientConfig defines how to communicate with the hook.
// It supports two mutually exclusive configuration modes:
//
// 1. URL - Directly specify the webhook URL with format `scheme://host:port/path`.
// Example: https://webhook.example.com:8443/my-interpreter
//
// 2. Service - Reference a Kubernetes Service that exposes the webhook.
// When using Service reference, Karmada resolves the endpoint through following steps:
// a) First attempts to locate the Service in karmada-apiserver
// b) If found, constructs URL based on Service type:
// - ClusterIP/LoadBalancer/NodePort: Uses ClusterIP with port from Service spec
// (Note: Services with ClusterIP "None" are rejected), Example:
// `https://<cluster ip>:<port>`
// - ExternalName: Uses external DNS name format: `https://<external name>:<port>`
// c) If NOT found in karmada-apiserver, falls back to standard Kubernetes
// service DNS name format: `https://<service>.<namespace>.svc:<port>`
//
// Note: When both URL and Service are specified, the Service reference takes precedence
// and the URL configuration will be ignored.
// +required
ClientConfig admissionregistrationv1.WebhookClientConfig `json:"clientConfig"`
@ -99,7 +117,7 @@ type RuleWithOperations struct {
type InterpreterOperation string
const (
// InterpreterOperationAll indicates math all InterpreterOperation.
// InterpreterOperationAll indicates matching all InterpreterOperation.
InterpreterOperationAll InterpreterOperation = "*"
// InterpreterOperationInterpretReplica indicates that karmada want to figure out the replica declaration of a specific object.

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

28
go.mod
View File

@ -1,28 +1,30 @@
module github.com/karmada-io/api
go 1.21.10
go 1.23.8
require (
k8s.io/api v0.29.4
k8s.io/apiextensions-apiserver v0.29.4
k8s.io/apimachinery v0.29.4
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
sigs.k8s.io/controller-runtime v0.17.5
k8s.io/api v0.32.3
k8s.io/apiextensions-apiserver v0.32.3
k8s.io/apimachinery v0.32.3
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.20.4
)
require (
github.com/go-logr/logr v1.4.1 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/text v0.14.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/text v0.24.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/klog/v2 v2.110.1 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)

67
go.sum
View File

@ -1,9 +1,12 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@ -25,16 +28,19 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -46,8 +52,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -56,8 +62,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@ -71,26 +77,23 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w=
k8s.io/api v0.29.4/go.mod h1:DetSv0t4FBTcEpfA84NJV3g9a7+rSzlUHk5ADAYHUv0=
k8s.io/apiextensions-apiserver v0.29.4 h1:M7hbuHU/ckbibR7yPbe6DyNWgTFKNmZDbdZKD8q1Smk=
k8s.io/apiextensions-apiserver v0.29.4/go.mod h1:TTDC9fB+0kHY2rogf5hgBR03KBKCwED+GHUsXGpR7SM=
k8s.io/apimachinery v0.29.4 h1:RaFdJiDmuKs/8cm1M6Dh1Kvyh59YQFDcFuFTSmXes6Q=
k8s.io/apimachinery v0.29.4/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.17.5 h1:1FI9Lm7NiOOmBsgTV36/s2XrEFXnO2C4sbg/Zme72Rw=
sigs.k8s.io/controller-runtime v0.17.5/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY=
k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss=
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=
sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -1,107 +0,0 @@
package constants
import (
"time"
"k8s.io/apimachinery/pkg/labels"
)
const (
// KubeDefaultRepository defines the default of the k8s image repository
KubeDefaultRepository = "registry.k8s.io"
// KarmadaDefaultRepository defines the default of the karmada image repository
KarmadaDefaultRepository = "docker.io/karmada"
// EtcdDefaultVersion defines the default of the karmada etcd image tag
EtcdDefaultVersion = "3.5.3-0"
// KarmadaDefaultVersion defines the default of the karmada components image tag
KarmadaDefaultVersion = "v1.4.0"
// KubeDefaultVersion defines the default of the karmada apiserver and kubeControllerManager image tag
KubeDefaultVersion = "v1.25.4"
// KarmadaDefaultServiceSubnet defines the default of the subnet used by k8s services.
KarmadaDefaultServiceSubnet = "10.96.0.0/12"
// KarmadaDefaultDNSDomain defines the default of the DNSDomain
KarmadaDefaultDNSDomain = "cluster.local"
// KarmadaOperator defines the name of the karmada operator.
KarmadaOperator = "karmada-operator"
// Etcd defines the name of the built-in etcd cluster component
Etcd = "etcd"
// KarmadaAPIServer defines the name of the karmada-apiserver component
KarmadaAPIServer = "karmada-apiserver"
// KubeAPIServer defines the repository name of the kube apiserver
KubeAPIServer = "kube-apiserver"
// KarmadaAggregatedAPIServer defines the name of the karmada-aggregated-apiserver component
KarmadaAggregatedAPIServer = "karmada-aggregated-apiserver"
// KubeControllerManager defines the name of the kube-controller-manager component
KubeControllerManager = "kube-controller-manager"
// KarmadaControllerManager defines the name of the karmada-controller-manager component
KarmadaControllerManager = "karmada-controller-manager"
// KarmadaScheduler defines the name of the karmada-scheduler component
KarmadaScheduler = "karmada-scheduler"
// KarmadaWebhook defines the name of the karmada-webhook component
KarmadaWebhook = "karmada-webhook"
// KarmadaDescheduler defines the name of the karmada-descheduler component
KarmadaDescheduler = "karmada-descheduler"
// KarmadaSystemNamespace defines the leader selection namespace for karmada components
KarmadaSystemNamespace = "karmada-system"
// KarmadaDataDir defines the karmada data dir
KarmadaDataDir = "/var/lib/karmada"
// EtcdListenClientPort defines the port etcd listen on for client traffic
EtcdListenClientPort = 2379
// EtcdMetricsPort is the port at which to obtain etcd metrics and health status
EtcdMetricsPort = 2381
// EtcdListenPeerPort defines the port etcd listen on for peer traffic
EtcdListenPeerPort = 2380
// KarmadaAPIserverListenClientPort defines the port karmada apiserver listen on for client traffic
KarmadaAPIserverListenClientPort = 5443
// EtcdDataVolumeName defines the name to etcd data volume
EtcdDataVolumeName = "etcd-data"
// CertificateValidity Certificate validity period
CertificateValidity = time.Hour * 24 * 365
// CaCertAndKeyName ca certificate key name
CaCertAndKeyName = "ca"
// EtcdCaCertAndKeyName etcd ca certificate key name
EtcdCaCertAndKeyName = "etcd-ca"
// EtcdServerCertAndKeyName etcd server certificate key name
EtcdServerCertAndKeyName = "etcd-server"
// EtcdClientCertAndKeyName etcd client certificate key name
EtcdClientCertAndKeyName = "etcd-client"
// KarmadaCertAndKeyName karmada certificate key name
KarmadaCertAndKeyName = "karmada"
// ApiserverCertAndKeyName karmada apiserver certificate key name
ApiserverCertAndKeyName = "apiserver"
// FrontProxyCaCertAndKeyName front-proxy-client certificate key name
FrontProxyCaCertAndKeyName = "front-proxy-ca"
// FrontProxyClientCertAndKeyName front-proxy-client certificate key name
FrontProxyClientCertAndKeyName = "front-proxy-client"
// ClusterName karmada cluster name
ClusterName = "karmada-apiserver"
// UserName karmada cluster user name
UserName = "karmada-admin"
// KarmadaAPIserverComponent defines the name of karmada-apiserver component
KarmadaAPIserverComponent = "KarmadaAPIServer"
// KarmadaAggregatedAPIServerComponent defines the name of karmada-aggregated-apiserver component
KarmadaAggregatedAPIServerComponent = "KarmadaAggregatedAPIServer"
// KubeControllerManagerComponent defines the name of kube-controller-manager-component
KubeControllerManagerComponent = "KubeControllerManager"
// KarmadaControllerManagerComponent defines the name of karmada-controller-manager component
KarmadaControllerManagerComponent = "KarmadaControllerManager"
// KarmadaSchedulerComponent defines the name of karmada-scheduler component
KarmadaSchedulerComponent = "KarmadaScheduler"
// KarmadaWebhookComponent defines the name of the karmada-webhook component
KarmadaWebhookComponent = "KarmadaWebhook"
// KarmadaDeschedulerComponent defines the name of the karmada-descheduler component
KarmadaDeschedulerComponent = "KarmadaDescheduler"
// KarmadaOperatorLabelKeyName defines a label key used by all of resources created by karmada operator
KarmadaOperatorLabelKeyName = "app.kubernetes.io/managed-by"
)
var (
// KarmadaOperatorLabel defines the default labels in the resource create by karmada operator
KarmadaOperatorLabel = labels.Set{KarmadaOperatorLabelKeyName: KarmadaOperator}
)

View File

@ -28,6 +28,7 @@ func (image *Image) Name() string {
return fmt.Sprintf("%s:%s", image.ImageRepository, image.ImageTag)
}
// KarmadaInProgressing sets the Karmada condition to Progressing.
func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
@ -40,6 +41,7 @@ func KarmadaInProgressing(karmada *Karmada, conditionType ConditionType, message
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
}
// KarmadaCompleted sets the Karmada condition to Completed.
func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
@ -52,6 +54,7 @@ func KarmadaCompleted(karmada *Karmada, conditionType ConditionType, message str
apimeta.SetStatusCondition(&karmada.Status.Conditions, newCondition)
}
// KarmadaFailed sets the Karmada condition to Failed.
func KarmadaFailed(karmada *Karmada, conditionType ConditionType, message string) {
karmada.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{

View File

@ -0,0 +1,65 @@
/*
Copyright 2020 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
const GroupName = "operator.karmada.io"
// GroupVersion specifies the group and the version used to register the objects.
var GroupVersion = v1.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// SchemeGroupVersion is group version used to register these objects
// Deprecated: use GroupVersion instead.
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
// localSchemeBuilder will stay in k8s.io/kubernetes.
localSchemeBuilder = &SchemeBuilder
// AddToScheme applies all the stored functions to the scheme
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Karmada{},
&KarmadaList{},
)
// AddToGroupVersion allows the serialization of client types like ListOptions.
v1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -24,9 +24,9 @@ import (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:path=karmadas,scope=Namespaced,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="Ready",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:resource:path=karmadas,scope=Namespaced,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Ready")].status`,name="READY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// Karmada enables declarative installation of karmada.
type Karmada struct {
@ -44,6 +44,38 @@ type Karmada struct {
Status KarmadaStatus `json:"status,omitempty"`
}
// CRDDownloadPolicy specifies a policy for how the operator will download the Karmada CRD tarball
type CRDDownloadPolicy string
const (
// DownloadAlways instructs the Karmada operator to always download the CRD tarball from a remote location.
DownloadAlways CRDDownloadPolicy = "Always"
// DownloadIfNotPresent instructs the Karmada operator to download the CRDs tarball from a remote location only if it is not yet present in the local cache.
DownloadIfNotPresent CRDDownloadPolicy = "IfNotPresent"
)
// HTTPSource specifies how to download the CRD tarball via either HTTP or HTTPS protocol.
type HTTPSource struct {
// URL specifies the URL of the CRD tarball resource.
URL string `json:"url,omitempty"`
}
// CRDTarball specifies the source from which the Karmada CRD tarball should be downloaded, along with the download policy to use.
type CRDTarball struct {
// HTTPSource specifies how to download the CRD tarball via either HTTP or HTTPS protocol.
// +optional
HTTPSource *HTTPSource `json:"httpSource,omitempty"`
// CRDDownloadPolicy specifies a policy that should be used to download the CRD tarball.
// Valid values are "Always" and "IfNotPresent".
// Defaults to "IfNotPresent".
// +kubebuilder:validation:Enum=Always;IfNotPresent
// +kubebuilder:default=IfNotPresent
// +optional
CRDDownloadPolicy *CRDDownloadPolicy `json:"crdDownloadPolicy,omitempty"`
}
// KarmadaSpec is the specification of the desired behavior of the Karmada.
type KarmadaSpec struct {
// HostCluster represents the cluster where to install the Karmada control plane.
@ -72,6 +104,47 @@ type KarmadaSpec struct {
// More info: https://github.com/karmada-io/karmada/blob/master/pkg/features/features.go
// +optional
FeatureGates map[string]bool `json:"featureGates,omitempty"`
// CRDTarball specifies the source from which the Karmada CRD tarball should be downloaded, along with the download policy to use.
// If not set, the operator will download the tarball from a GitHub release.
// By default, it will download the tarball of the same version as the operator itself.
// For instance, if the operator's version is v1.10.0, the tarball will be downloaded from the following location:
// https://github.com/karmada-io/karmada/releases/download/v1.10.0/crds.tar.gz
// By default, the operator will only attempt to download the tarball if it's not yet present in the local cache.
// +optional
CRDTarball *CRDTarball `json:"crdTarball,omitempty"`
// CustomCertificate specifies the configuration to customize the certificates
// for Karmada components or control the certificate generation process, such as
// the algorithm, validity period, etc.
// Currently, it only supports customizing the CA certificate for limited components.
// +optional
CustomCertificate *CustomCertificate `json:"customCertificate,omitempty"`
// Suspend indicates that the operator should suspend reconciliation
// for this Karmada control plane and all its managed resources.
// Karmada instances for which this field is not explicitly set to `true` will continue to be reconciled as usual.
// +optional
Suspend *bool `json:"suspend,omitempty"`
}
// CustomCertificate holds the configuration for generating the certificate.
type CustomCertificate struct {
// APIServerCACert references a Kubernetes secret containing the CA certificate
// for component karmada-apiserver.
// The secret must contain the following data keys:
// - tls.crt: The TLS certificate.
// - tls.key: The TLS private key.
// If specified, this CA will be used to issue client certificates for
// all components that access the APIServer as clients.
// +optional
APIServerCACert *LocalSecretReference `json:"apiServerCACert,omitempty"`
// LeafCertValidityDays specifies the validity period of leaf certificates (e.g., API Server certificate) in days.
// If not specified, the default validity period of 1 year will be used.
// +kubebuilder:validation:Minimum=1
// +optional
LeafCertValidityDays *int32 `json:"leafCertValidityDays,omitempty"`
}
// ImageRegistry represents an image registry as well as the
@ -198,19 +271,32 @@ type VolumeData struct {
// operator has no knowledge of where certificate files live, and they must be supplied.
type ExternalEtcd struct {
// Endpoints of etcd members. Required for ExternalEtcd.
// +required
Endpoints []string `json:"endpoints"`
// CAData is an SSL Certificate Authority file used to secure etcd communication.
// Required if using a TLS connection.
CAData []byte `json:"caData"`
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials.
CAData []byte `json:"caData,omitempty"`
// CertData is an SSL certification file used to secure etcd communication.
// Required if using a TLS connection.
CertData []byte `json:"certData"`
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials.
CertData []byte `json:"certData,omitempty"`
// KeyData is an SSL key file used to secure etcd communication.
// Required if using a TLS connection.
KeyData []byte `json:"keyData"`
// Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials.
KeyData []byte `json:"keyData,omitempty"`
// SecretRef references a Kubernetes secret containing the etcd connection credentials.
// The secret must contain the following data keys:
// ca.crt: The Certificate Authority (CA) certificate data.
// tls.crt: The TLS certificate data used for verifying the etcd server's certificate.
// tls.key: The TLS private key.
// Required to configure the connection to an external etcd cluster.
// +required
SecretRef LocalSecretReference `json:"secretRef"`
}
// KarmadaAPIServer holds settings to kube-apiserver component of the kubernetes.
@ -223,11 +309,31 @@ type KarmadaAPIServer struct {
// +optional
ServiceSubnet *string `json:"serviceSubnet,omitempty"`
// ServiceType represents the service type of karmada apiserver.
// it is NodePort by default.
// ServiceType represents the service type of Karmada API server.
// Valid options are: "ClusterIP", "NodePort", "LoadBalancer".
// Defaults to "ClusterIP".
//
// +kubebuilder:default="ClusterIP"
// +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer
// +optional
ServiceType corev1.ServiceType `json:"serviceType,omitempty"`
// LoadBalancerClass specifies the load balancer implementation class for the Karmada API server.
// This field is applicable only when ServiceType is set to LoadBalancer.
// If specified, the service will be processed by the load balancer implementation that matches the specified class.
// By default, this is not set and the LoadBalancer type of Service uses the cloud provider's default load balancer
// implementation.
// Once set, it cannot be changed. The value must be a label-style identifier, with an optional prefix such as
// "internal-vip" or "example.com/internal-vip".
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class
// +optional
LoadBalancerClass *string `json:"loadBalancerClass,omitempty"`
// ServiceAnnotations is an extra set of annotations for service of karmada apiserver.
// more info: https://github.com/karmada-io/karmada/issues/4634
// +optional
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
// ExtraArgs is an extra set of flags to pass to the kube-apiserver component or
// override. A key in this map is the flag name as it appears on the command line except
// without leading dash(es).
@ -244,6 +350,24 @@ type KarmadaAPIServer struct {
// +optional
ExtraArgs map[string]string `json:"extraArgs,omitempty"`
// ExtraVolumes specifies a list of extra volumes for the API server's pod
// To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance,
// the operator will automatically attach volumes for the API server pod needed to configure things such as TLS,
// SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability,
// there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumes, in conjunction
// with ExtraArgs and ExtraVolumeMounts can be used to fulfil those use cases.
// +optional
ExtraVolumes []corev1.Volume `json:"extraVolumes,omitempty"`
// ExtraVolumeMounts specifies a list of extra volume mounts to be mounted into the API server's container
// To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance,
// the operator will automatically mount volumes into the API server container needed to configure things such as TLS,
// SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability,
// there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumeMounts, in conjunction
// with ExtraArgs and ExtraVolumes can be used to fulfil those use cases.
// +optional
ExtraVolumeMounts []corev1.VolumeMount `json:"extraVolumeMounts,omitempty"`
// CertSANs sets extra Subject Alternative Names for the API Server signing cert.
// +optional
CertSANs []string `json:"certSANs,omitempty"`
@ -252,6 +376,12 @@ type KarmadaAPIServer struct {
// More info: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/
// +optional
FeatureGates map[string]bool `json:"featureGates,omitempty"`
// SidecarContainers specifies a list of sidecar containers to be deployed
// within the Karmada API server pod.
// This enables users to integrate auxiliary services such as KMS plugins for configuring encryption at rest.
// +optional
SidecarContainers []corev1.Container `json:"sidecarContainers,omitempty"`
}
// KarmadaAggregatedAPIServer holds settings to karmada-aggregated-apiserver component of the karmada.
@ -519,6 +649,11 @@ type CommonSettings struct {
// Image allows to customize the image used for the component.
Image `json:",inline"`
// ImagePullPolicy defines the policy for pulling the container image.
// If not specified, it defaults to IfNotPresent.
// +optional
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
// Number of desired pods. This is a pointer to distinguish between explicit
// zero and not specified. Defaults to 1.
// +optional
@ -542,6 +677,12 @@ type CommonSettings struct {
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Resources corev1.ResourceRequirements `json:"resources,omitempty"`
// PriorityClassName specifies the priority class name for the component.
// If not specified, it defaults to "system-node-critical".
// +kubebuilder:default="system-node-critical"
// +optional
PriorityClassName string `json:"priorityClassName,omitempty"`
}
// Image allows to customize the image used for components.
@ -608,6 +749,21 @@ type KarmadaStatus struct {
// Conditions represents the latest available observations of a karmada's current state.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// APIServerService reports the location of the Karmada API server service which
// can be used by third-party applications to discover the Karmada Service, e.g.
// expose the service outside the cluster by Ingress.
// +optional
APIServerService *APIServerService `json:"apiServerService,omitempty"`
}
// APIServerService tells the location of Karmada API server service.
// Currently, it only includes the name of the service. The namespace
// of the service is the same as the namespace of the current Karmada object.
type APIServerService struct {
// Name represents the name of the Karmada API Server service.
// +required
Name string `json:"name"`
}
// LocalSecretReference is a reference to a secret within the enclosing

View File

@ -1,16 +1,74 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIServerService) DeepCopyInto(out *APIServerService) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerService.
func (in *APIServerService) DeepCopy() *APIServerService {
if in == nil {
return nil
}
out := new(APIServerService)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CRDTarball) DeepCopyInto(out *CRDTarball) {
*out = *in
if in.HTTPSource != nil {
in, out := &in.HTTPSource, &out.HTTPSource
*out = new(HTTPSource)
**out = **in
}
if in.CRDDownloadPolicy != nil {
in, out := &in.CRDDownloadPolicy, &out.CRDDownloadPolicy
*out = new(CRDDownloadPolicy)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDTarball.
func (in *CRDTarball) DeepCopy() *CRDTarball {
if in == nil {
return nil
}
out := new(CRDTarball)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommonSettings) DeepCopyInto(out *CommonSettings) {
*out = *in
@ -48,6 +106,32 @@ func (in *CommonSettings) DeepCopy() *CommonSettings {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CustomCertificate) DeepCopyInto(out *CustomCertificate) {
*out = *in
if in.APIServerCACert != nil {
in, out := &in.APIServerCACert, &out.APIServerCACert
*out = new(LocalSecretReference)
**out = **in
}
if in.LeafCertValidityDays != nil {
in, out := &in.LeafCertValidityDays, &out.LeafCertValidityDays
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCertificate.
func (in *CustomCertificate) DeepCopy() *CustomCertificate {
if in == nil {
return nil
}
out := new(CustomCertificate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Etcd) DeepCopyInto(out *Etcd) {
*out = *in
@ -97,6 +181,7 @@ func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) {
*out = make([]byte, len(*in))
copy(*out, *in)
}
out.SecretRef = in.SecretRef
return
}
@ -110,6 +195,22 @@ func (in *ExternalEtcd) DeepCopy() *ExternalEtcd {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPSource) DeepCopyInto(out *HTTPSource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPSource.
func (in *HTTPSource) DeepCopy() *HTTPSource {
if in == nil {
return nil
}
out := new(HTTPSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HostCluster) DeepCopyInto(out *HostCluster) {
*out = *in
@ -205,6 +306,18 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
*out = new(string)
**out = **in
}
if in.LoadBalancerClass != nil {
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
*out = new(string)
**out = **in
}
if in.ServiceAnnotations != nil {
in, out := &in.ServiceAnnotations, &out.ServiceAnnotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ExtraArgs != nil {
in, out := &in.ExtraArgs, &out.ExtraArgs
*out = make(map[string]string, len(*in))
@ -212,6 +325,20 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
(*out)[key] = val
}
}
if in.ExtraVolumes != nil {
in, out := &in.ExtraVolumes, &out.ExtraVolumes
*out = make([]v1.Volume, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExtraVolumeMounts != nil {
in, out := &in.ExtraVolumeMounts, &out.ExtraVolumeMounts
*out = make([]v1.VolumeMount, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.CertSANs != nil {
in, out := &in.CertSANs, &out.CertSANs
*out = make([]string, len(*in))
@ -224,6 +351,13 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) {
(*out)[key] = val
}
}
if in.SidecarContainers != nil {
in, out := &in.SidecarContainers, &out.SidecarContainers
*out = make([]v1.Container, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -536,6 +670,21 @@ func (in *KarmadaSpec) DeepCopyInto(out *KarmadaSpec) {
(*out)[key] = val
}
}
if in.CRDTarball != nil {
in, out := &in.CRDTarball, &out.CRDTarball
*out = new(CRDTarball)
(*in).DeepCopyInto(*out)
}
if in.CustomCertificate != nil {
in, out := &in.CustomCertificate, &out.CustomCertificate
*out = new(CustomCertificate)
(*in).DeepCopyInto(*out)
}
if in.Suspend != nil {
in, out := &in.Suspend, &out.Suspend
*out = new(bool)
**out = **in
}
return
}
@ -559,11 +708,16 @@ func (in *KarmadaStatus) DeepCopyInto(out *KarmadaStatus) {
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.APIServerService != nil {
in, out := &in.APIServerService, &out.APIServerService
*out = new(APIServerService)
**out = **in
}
return
}
@ -711,17 +865,17 @@ func (in *VolumeData) DeepCopyInto(out *VolumeData) {
*out = *in
if in.VolumeClaim != nil {
in, out := &in.VolumeClaim, &out.VolumeClaim
*out = new(corev1.PersistentVolumeClaimTemplate)
*out = new(v1.PersistentVolumeClaimTemplate)
(*in).DeepCopyInto(*out)
}
if in.HostPath != nil {
in, out := &in.HostPath, &out.HostPath
*out = new(corev1.HostPathVolumeSource)
*out = new(v1.HostPathVolumeSource)
(*in).DeepCopyInto(*out)
}
if in.EmptyDir != nil {
in, out := &in.EmptyDir, &out.EmptyDir
*out = new(corev1.EmptyDirVolumeSource)
*out = new(v1.EmptyDirVolumeSource)
(*in).DeepCopyInto(*out)
}
return

View File

@ -0,0 +1,129 @@
/*
Copyright 2025 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +kubebuilder:resource:path=clustertaintpolicies,scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTaintPolicy automates taint management on Cluster objects based
// on declarative conditions.
// The system evaluates AddOnConditions to determine when to add taints,
// and RemoveOnConditions to determine when to remove taints.
// AddOnConditions are evaluated before RemoveOnConditions.
// Taints are NEVER automatically removed when the ClusterTaintPolicy is deleted.
type ClusterTaintPolicy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// Spec represents the desired behavior of ClusterTaintPolicy.
// +required
Spec ClusterTaintPolicySpec `json:"spec"`
}
// ClusterTaintPolicySpec represents the desired behavior of ClusterTaintPolicy.
type ClusterTaintPolicySpec struct {
// TargetClusters specifies the clusters that ClusterTaintPolicy needs
// to pay attention to.
// For clusters that no longer match the TargetClusters, the taints
// will be kept unchanged.
// If targetClusters is not set, any cluster can be selected.
// +optional
TargetClusters *ClusterAffinity `json:"targetClusters,omitempty"`
// AddOnConditions defines the conditions to match for triggering
// the controller to add taints on the cluster object.
// The match conditions are ANDed.
// If AddOnConditions is empty, no taints will be added.
// +optional
AddOnConditions []MatchCondition `json:"addOnConditions,omitempty"`
// RemoveOnConditions defines the conditions to match for triggering
// the controller to remove taints from the cluster object.
// The match conditions are ANDed.
// If RemoveOnConditions is empty, no taints will be removed.
// +optional
RemoveOnConditions []MatchCondition `json:"removeOnConditions,omitempty"`
// Taints specifies the taints that need to be added or removed on
// the cluster object which match with TargetClusters.
// If the Taints is modified, the system will process the taints based on
// the latest value of Taints during the next condition-triggered execution,
// regardless of whether the taint has been added or removed.
// +kubebuilder:validation:MinItems=1
// +required
Taints []Taint `json:"taints"`
}
// MatchCondition represents the condition match detail of activating the failover
// relevant taints on target clusters.
type MatchCondition struct {
// ConditionType specifies the ClusterStatus condition type.
// +required
ConditionType string `json:"conditionType"`
// Operator represents a relationship to a set of values.
// Valid operators are In, NotIn.
// +required
Operator MatchConditionOperator `json:"operator"`
// StatusValues is an array of metav1.ConditionStatus values.
// The item specifies the ClusterStatus condition status.
// +required
StatusValues []metav1.ConditionStatus `json:"statusValues"`
}
// A MatchConditionOperator operator is the set of operators that can be used in the match condition.
type MatchConditionOperator string
const (
// MatchConditionOpIn represents the operator In.
MatchConditionOpIn MatchConditionOperator = "In"
// MatchConditionOpNotIn represents the operator NotIn.
MatchConditionOpNotIn MatchConditionOperator = "NotIn"
)
// Taint describes the taint that needs to be applied to the cluster.
type Taint struct {
// Key represents the taint key to be applied to a cluster.
// +required
Key string `json:"key"`
// Effect represents the taint effect to be applied to a cluster.
// +required
Effect corev1.TaintEffect `json:"effect"`
// Value represents the taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty"`
}
// +kubebuilder:resource:scope="Cluster"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterTaintPolicyList contains a list of ClusterTaintPolicy
type ClusterTaintPolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterTaintPolicy `json:"items"`
}

View File

@ -37,6 +37,8 @@ const (
// +kubebuilder:resource:path=federatedresourcequotas,scope=Namespaced,categories={karmada-io}
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.overall`,name=`OVERALL`,type=string
// +kubebuilder:printcolumn:JSONPath=`.status.overallUsed`,name=`OVERALL_USED`,type=string
// FederatedResourceQuota sets aggregate quota restrictions enforced per namespace across all clusters.
type FederatedResourceQuota struct {
@ -58,9 +60,16 @@ type FederatedResourceQuotaSpec struct {
// +required
Overall corev1.ResourceList `json:"overall"`
// StaticAssignments represents the subset of desired hard limits for each cluster.
// Note: for clusters not present in this list, Karmada will set an empty ResourceQuota to them, which means these
// clusters will have no quotas in the referencing namespace.
// StaticAssignments specifies ResourceQuota settings for specific clusters.
// If non-empty, Karmada will create ResourceQuotas in the corresponding clusters.
// Clusters not listed here or when StaticAssignments is empty will have no ResourceQuotas created.
//
// This field addresses multi-cluster configuration management challenges by allowing centralized
// control over ResourceQuotas across clusters.
//
// Note: The Karmada scheduler currently does NOT use this configuration for scheduling decisions.
// Future updates may integrate it into the scheduling logic.
//
// +optional
StaticAssignments []StaticClusterAssignment `json:"staticAssignments,omitempty"`

View File

@ -101,6 +101,7 @@ type RuleWithCluster struct {
// - ArgsOverrider
// - LabelsOverrider
// - AnnotationsOverrider
// - FieldOverrider
// - Plaintext
type Overriders struct {
// Plaintext represents override rules defined with plaintext overriders.
@ -126,6 +127,13 @@ type Overriders struct {
// AnnotationsOverrider represents the rules dedicated to handling workload annotations
// +optional
AnnotationsOverrider []LabelAnnotationOverrider `json:"annotationsOverrider,omitempty"`
// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource.
// This allows changing a single field within the resource with multiple operations.
// It is designed to handle structured field values such as those found in ConfigMaps or Secrets.
// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.
// +optional
FieldOverrider []FieldOverrider `json:"fieldOverrider,omitempty"`
}
// LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations
@ -255,6 +263,65 @@ const (
OverriderOpReplace OverriderOperator = "replace"
)
// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource.
// This allows changing a single field within the resource with multiple operations.
// It is designed to handle structured field values such as those found in ConfigMaps or Secrets.
// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.
// Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously.
type FieldOverrider struct {
// FieldPath specifies the initial location in the instance document where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml"
// specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml".
// +required
FieldPath string `json:"fieldPath"`
// JSON represents the operations performed on the JSON document specified by the FieldPath.
// +optional
JSON []JSONPatchOperation `json:"json,omitempty"`
// YAML represents the operations performed on the YAML document specified by the FieldPath.
// +optional
YAML []YAMLPatchOperation `json:"yaml,omitempty"`
}
// JSONPatchOperation represents a single field modification operation for JSON format.
type JSONPatchOperation struct {
// SubPath specifies the relative location within the initial FieldPath where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures.
// +required
SubPath string `json:"subPath"`
// Operator indicates the operation on target field.
// Available operators are: "add", "remove", and "replace".
// +kubebuilder:validation:Enum=add;remove;replace
// +required
Operator OverriderOperator `json:"operator"`
// Value is the new value to set for the specified field if the operation is "add" or "replace".
// For "remove" operation, this field is ignored.
// +optional
Value apiextensionsv1.JSON `json:"value,omitempty"`
}
// YAMLPatchOperation represents a single field modification operation for YAML format.
type YAMLPatchOperation struct {
// SubPath specifies the relative location within the initial FieldPath where the operation should take place.
// The path uses RFC 6901 for navigating into nested structures.
// +required
SubPath string `json:"subPath"`
// Operator indicates the operation on target field.
// Available operators are: "add", "remove", and "replace".
// +kubebuilder:validation:Enum=add;remove;replace
// +required
Operator OverriderOperator `json:"operator"`
// Value is the new value to set for the specified field if the operation is "add" or "replace".
// For "remove" operation, this field is ignored.
// +optional
Value apiextensionsv1.JSON `json:"value,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// OverridePolicyList is a collection of OverridePolicy.

View File

@ -44,6 +44,9 @@ const (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=propagationpolicies,scope=Namespaced,shortName=pp,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.conflictResolution`,name="CONFLICT-RESOLUTION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.priority`,name="PRIORITY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// PropagationPolicy represents the policy that propagates a group of resources to one or more clusters.
type PropagationPolicy struct {
@ -173,6 +176,47 @@ type PropagationSpec struct {
// +kubebuilder:validation:Enum=Lazy
// +optional
ActivationPreference ActivationPreference `json:"activationPreference,omitempty"`
// Suspension declares the policy for suspending different aspects of propagation.
// nil means no suspension. no default values.
// +optional
Suspension *Suspension `json:"suspension,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member clusters when the resource template is deleted.
// If set to true, resources will be preserved on the member clusters.
// Default is false, which means resources will be deleted along with the resource template.
//
// This setting is particularly useful during workload migration scenarios to ensure
// that rollback can occur quickly without affecting the workloads running on the
// member clusters.
//
// Additionally, this setting applies uniformly across all member clusters and will not
// selectively control preservation on only some clusters.
//
// Note: This setting does not apply to the deletion of the policy itself.
// When the policy is deleted, the resource templates and their corresponding
// propagated resources in member clusters will remain unchanged unless explicitly deleted.
//
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
// SchedulePriority defines how Karmada should resolve the priority and preemption policy
// for workload scheduling.
//
// This setting is useful for controlling the scheduling behavior of offline workloads.
// By setting a higher or lower priority, users can control which workloads are scheduled first.
// Additionally, it allows specifying a preemption policy where higher-priority workloads can
// preempt lower-priority ones in scenarios of resource contention.
//
// Note: This feature is currently in the alpha stage. The priority-based scheduling functionality is
// controlled by the PriorityBasedScheduling feature gate, and preemption is controlled by the
// PriorityBasedPreemptiveScheduling feature gate. Currently, only priority-based scheduling is
// supported. Preemption functionality is not yet available and will be introduced in future
// releases as the feature matures.
//
// +optional
SchedulePriority *SchedulePriority `json:"schedulePriority,omitempty"`
}
// ResourceSelector the resources will be selected.
@ -207,13 +251,44 @@ type FieldSelector struct {
MatchExpressions []corev1.NodeSelectorRequirement `json:"matchExpressions,omitempty"`
}
// PurgeMode represents that how to deal with the legacy applications on the
// Suspension defines the policy for suspending different aspects of propagation.
type Suspension struct {
// Dispatching controls whether dispatching should be suspended.
// nil means not suspend, no default value, only accepts 'true'.
// Note: true means stop propagating to all clusters. Can not co-exist
// with DispatchingOnClusters which is used to suspend particular clusters.
// +optional
Dispatching *bool `json:"dispatching,omitempty"`
// DispatchingOnClusters declares a list of clusters to which the dispatching
// should be suspended.
// Note: Can not co-exist with Dispatching which is used to suspend all.
// +optional
DispatchingOnClusters *SuspendClusters `json:"dispatchingOnClusters,omitempty"`
}
// SuspendClusters represents a group of clusters that should be suspended from propagating.
// Note: No plan to introduce the label selector or field selector to select clusters yet, as it
// would make the system unpredictable.
type SuspendClusters struct {
// ClusterNames is the list of clusters to be selected.
// +optional
ClusterNames []string `json:"clusterNames,omitempty"`
}
// PurgeMode represents how to deal with the legacy application on the
// cluster from which the application is migrated.
type PurgeMode string
const (
// Immediately represents that Karmada will immediately evict the legacy
// application.
// application. This is useful in scenarios where an application can not
// tolerate two instances running simultaneously.
// For example, the Flink application supports exactly-once state consistency,
// which means it requires that no two instances of the application are running
// at the same time. During a failover, it is crucial to ensure that the old
// application is removed before creating a new one to avoid duplicate
// processing and maintaining state consistency.
Immediately PurgeMode = "Immediately"
// Graciously represents that Karmada will wait for the application to
// come back to healthy on the new cluster or after a timeout is reached
@ -253,6 +328,7 @@ type ApplicationFailoverBehavior struct {
// cluster from which the application is migrated.
// Valid options are "Immediately", "Graciously" and "Never".
// Defaults to "Graciously".
// +kubebuilder:validation:Enum=Immediately;Graciously;Never
// +kubebuilder:default=Graciously
// +optional
PurgeMode PurgeMode `json:"purgeMode,omitempty"`
@ -265,6 +341,23 @@ type ApplicationFailoverBehavior struct {
// Value must be positive integer.
// +optional
GracePeriodSeconds *int32 `json:"gracePeriodSeconds,omitempty"`
// StatePreservation defines the policy for preserving and restoring state data
// during failover events for stateful applications.
//
// When an application fails over from one cluster to another, this policy enables
// the extraction of critical data from the original resource configuration.
// Upon successful migration, the extracted data is then re-injected into the new
// resource, ensuring that the application can resume operation with its previous
// state intact.
// This is particularly useful for stateful applications where maintaining data
// consistency across failover events is crucial.
// If not specified, means no state data will be preserved.
//
// Note: This requires the StatefulFailoverInjection feature gate to be enabled,
// which is alpha.
// +optional
StatePreservation *StatePreservation `json:"statePreservation,omitempty"`
}
// DecisionConditions represents the decision conditions of performing the failover process.
@ -278,6 +371,41 @@ type DecisionConditions struct {
TolerationSeconds *int32 `json:"tolerationSeconds,omitempty"`
}
// StatePreservation defines the policy for preserving state during failover events.
type StatePreservation struct {
// Rules contains a list of StatePreservationRule configurations.
// Each rule specifies a JSONPath expression targeting specific pieces of
// state data to be preserved during failover events. An AliasLabelName is associated
// with each rule, serving as a label key when the preserved data is passed
// to the new cluster.
// +required
Rules []StatePreservationRule `json:"rules"`
}
// StatePreservationRule defines a single rule for state preservation.
// It includes a JSONPath expression and an alias name that will be used
// as a label key when passing state information to the new cluster.
type StatePreservationRule struct {
// AliasLabelName is the name that will be used as a label key when the preserved
// data is passed to the new cluster. This facilitates the injection of the
// preserved state back into the application resources during recovery.
// +required
AliasLabelName string `json:"aliasLabelName"`
// JSONPath is the JSONPath template used to identify the state data
// to be preserved from the original resource configuration.
// The JSONPath syntax follows the Kubernetes specification:
// https://kubernetes.io/docs/reference/kubectl/jsonpath/
//
// Note: The JSONPath expression will start searching from the "status" field of
// the API resource object by default. For example, to extract the "availableReplicas"
// from a Deployment, the JSONPath expression should be "{.availableReplicas}", not
// "{.status.availableReplicas}".
//
// +required
JSONPath string `json:"jsonPath"`
}
// Placement represents the rule for select clusters.
type Placement struct {
// ClusterAffinity represents scheduling restrictions to a certain set of clusters.
@ -549,6 +677,54 @@ const (
LazyActivation ActivationPreference = "Lazy"
)
// SchedulePriority defines how Karmada should resolve the priority and preemption policy
// for workload scheduling.
type SchedulePriority struct {
// PriorityClassSource specifies where Karmada should look for the PriorityClass definition.
// Available options:
// - KubePriorityClass: Uses Kubernetes PriorityClass (scheduling.k8s.io/v1)
// - PodPriorityClass: Uses PriorityClassName from PodTemplate: PodSpec.PriorityClassName (not yet implemented)
// - FederatedPriorityClass: Uses Karmada FederatedPriorityClass (not yet implemented)
//
// +kubebuilder:validation:Enum=KubePriorityClass
// +required
PriorityClassSource PriorityClassSource `json:"priorityClassSource"`
// PriorityClassName specifies which PriorityClass to use. Its behavior depends on PriorityClassSource:
//
// Behavior of PriorityClassName:
//
// For KubePriorityClass:
// - When specified: Uses the named Kubernetes PriorityClass.
//
// For PodPriorityClass:
// - Uses PriorityClassName from the PodTemplate.
// - Not yet implemented.
//
// For FederatedPriorityClass:
// - Not yet implemented.
//
// +required
PriorityClassName string `json:"priorityClassName"`
}
// PriorityClassSource defines the type for PriorityClassSource field.
type PriorityClassSource string
const (
// FederatedPriorityClass specifies to use Karmada FederatedPriorityClass for priority resolution.
// This feature is planned for future releases and is currently not implemented.
FederatedPriorityClass PriorityClassSource = "FederatedPriorityClass"
// KubePriorityClass specifies to use Kubernetes native PriorityClass (scheduling.k8s.io/v1)
// for priority resolution. This is the default source.
KubePriorityClass PriorityClassSource = "KubePriorityClass"
// PodPriorityClass specifies to use the PriorityClassName defined in the workload's
// PodTemplate for priority resolution.
PodPriorityClass PriorityClassSource = "PodPriorityClass"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PropagationPolicyList contains a list of PropagationPolicy.
@ -562,6 +738,9 @@ type PropagationPolicyList struct {
// +genclient:nonNamespaced
// +kubebuilder:resource:path=clusterpropagationpolicies,scope="Cluster",shortName=cpp,categories={karmada-io}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:printcolumn:JSONPath=`.spec.conflictResolution`,name="CONFLICT-RESOLUTION",type=string
// +kubebuilder:printcolumn:JSONPath=`.spec.priority`,name="PRIORITY",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ClusterPropagationPolicy represents the cluster-wide policy that propagates a group of resources to one or more clusters.
// Different with PropagationPolicy that could only propagate resources in its own namespace, ClusterPropagationPolicy

View File

@ -54,18 +54,3 @@ const (
// ClusterPropagationPolicyAnnotation is added to objects to specify associated ClusterPropagationPolicy name.
ClusterPropagationPolicyAnnotation = "clusterpropagationpolicy.karmada.io/name"
)
// TODO(whitewindmills): These deprecated labels will be removed in a future version.
const (
// PropagationPolicyNamespaceLabel is added to objects to specify associated PropagationPolicy namespace.
// Deprecated
PropagationPolicyNamespaceLabel = "propagationpolicy.karmada.io/namespace"
// PropagationPolicyNameLabel is added to objects to specify associated PropagationPolicy's name.
// Deprecated
PropagationPolicyNameLabel = "propagationpolicy.karmada.io/name"
// ClusterPropagationPolicyLabel is added to objects to specify associated ClusterPropagationPolicy.
// Deprecated
ClusterPropagationPolicyLabel = "clusterpropagationpolicy.karmada.io/name"
)

View File

@ -36,6 +36,11 @@ func (in *ApplicationFailoverBehavior) DeepCopyInto(out *ApplicationFailoverBeha
*out = new(int32)
**out = **in
}
if in.StatePreservation != nil {
in, out := &in.StatePreservation, &out.StatePreservation
*out = new(StatePreservation)
(*in).DeepCopyInto(*out)
}
return
}
@ -262,6 +267,106 @@ func (in *ClusterQuotaStatus) DeepCopy() *ClusterQuotaStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicy) DeepCopyInto(out *ClusterTaintPolicy) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicy.
func (in *ClusterTaintPolicy) DeepCopy() *ClusterTaintPolicy {
if in == nil {
return nil
}
out := new(ClusterTaintPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTaintPolicy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicyList) DeepCopyInto(out *ClusterTaintPolicyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterTaintPolicy, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicyList.
func (in *ClusterTaintPolicyList) DeepCopy() *ClusterTaintPolicyList {
if in == nil {
return nil
}
out := new(ClusterTaintPolicyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterTaintPolicyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterTaintPolicySpec) DeepCopyInto(out *ClusterTaintPolicySpec) {
*out = *in
if in.TargetClusters != nil {
in, out := &in.TargetClusters, &out.TargetClusters
*out = new(ClusterAffinity)
(*in).DeepCopyInto(*out)
}
if in.AddOnConditions != nil {
in, out := &in.AddOnConditions, &out.AddOnConditions
*out = make([]MatchCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RemoveOnConditions != nil {
in, out := &in.RemoveOnConditions, &out.RemoveOnConditions
*out = make([]MatchCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Taints != nil {
in, out := &in.Taints, &out.Taints
*out = make([]Taint, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaintPolicySpec.
func (in *ClusterTaintPolicySpec) DeepCopy() *ClusterTaintPolicySpec {
if in == nil {
return nil
}
out := new(ClusterTaintPolicySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommandArgsOverrider) DeepCopyInto(out *CommandArgsOverrider) {
*out = *in
@ -453,6 +558,36 @@ func (in *FederatedResourceQuotaStatus) DeepCopy() *FederatedResourceQuotaStatus
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldOverrider) DeepCopyInto(out *FieldOverrider) {
*out = *in
if in.JSON != nil {
in, out := &in.JSON, &out.JSON
*out = make([]JSONPatchOperation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.YAML != nil {
in, out := &in.YAML, &out.YAML
*out = make([]YAMLPatchOperation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldOverrider.
func (in *FieldOverrider) DeepCopy() *FieldOverrider {
if in == nil {
return nil
}
out := new(FieldOverrider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FieldSelector) DeepCopyInto(out *FieldSelector) {
*out = *in
@ -513,6 +648,23 @@ func (in *ImagePredicate) DeepCopy() *ImagePredicate {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONPatchOperation) DeepCopyInto(out *JSONPatchOperation) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatchOperation.
func (in *JSONPatchOperation) DeepCopy() *JSONPatchOperation {
if in == nil {
return nil
}
out := new(JSONPatchOperation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LabelAnnotationOverrider) DeepCopyInto(out *LabelAnnotationOverrider) {
*out = *in
@ -536,6 +688,27 @@ func (in *LabelAnnotationOverrider) DeepCopy() *LabelAnnotationOverrider {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
*out = *in
if in.StatusValues != nil {
in, out := &in.StatusValues, &out.StatusValues
*out = make([]v1.ConditionStatus, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchCondition.
func (in *MatchCondition) DeepCopy() *MatchCondition {
if in == nil {
return nil
}
out := new(MatchCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OverridePolicy) DeepCopyInto(out *OverridePolicy) {
*out = *in
@ -677,6 +850,13 @@ func (in *Overriders) DeepCopyInto(out *Overriders) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FieldOverrider != nil {
in, out := &in.FieldOverrider, &out.FieldOverrider
*out = make([]FieldOverrider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
@ -838,6 +1018,21 @@ func (in *PropagationSpec) DeepCopyInto(out *PropagationSpec) {
*out = new(FailoverBehavior)
(*in).DeepCopyInto(*out)
}
if in.Suspension != nil {
in, out := &in.Suspension, &out.Suspension
*out = new(Suspension)
(*in).DeepCopyInto(*out)
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
if in.SchedulePriority != nil {
in, out := &in.SchedulePriority, &out.SchedulePriority
*out = new(SchedulePriority)
**out = **in
}
return
}
@ -915,6 +1110,22 @@ func (in *RuleWithCluster) DeepCopy() *RuleWithCluster {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SchedulePriority) DeepCopyInto(out *SchedulePriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePriority.
func (in *SchedulePriority) DeepCopy() *SchedulePriority {
if in == nil {
return nil
}
out := new(SchedulePriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SpreadConstraint) DeepCopyInto(out *SpreadConstraint) {
*out = *in
@ -931,6 +1142,43 @@ func (in *SpreadConstraint) DeepCopy() *SpreadConstraint {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatePreservation) DeepCopyInto(out *StatePreservation) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]StatePreservationRule, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatePreservation.
func (in *StatePreservation) DeepCopy() *StatePreservation {
if in == nil {
return nil
}
out := new(StatePreservation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StatePreservationRule) DeepCopyInto(out *StatePreservationRule) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatePreservationRule.
func (in *StatePreservationRule) DeepCopy() *StatePreservationRule {
if in == nil {
return nil
}
out := new(StatePreservationRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StaticClusterAssignment) DeepCopyInto(out *StaticClusterAssignment) {
*out = *in
@ -970,3 +1218,83 @@ func (in *StaticClusterWeight) DeepCopy() *StaticClusterWeight {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuspendClusters) DeepCopyInto(out *SuspendClusters) {
*out = *in
if in.ClusterNames != nil {
in, out := &in.ClusterNames, &out.ClusterNames
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuspendClusters.
func (in *SuspendClusters) DeepCopy() *SuspendClusters {
if in == nil {
return nil
}
out := new(SuspendClusters)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Suspension) DeepCopyInto(out *Suspension) {
*out = *in
if in.Dispatching != nil {
in, out := &in.Dispatching, &out.Dispatching
*out = new(bool)
**out = **in
}
if in.DispatchingOnClusters != nil {
in, out := &in.DispatchingOnClusters, &out.DispatchingOnClusters
*out = new(SuspendClusters)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suspension.
func (in *Suspension) DeepCopy() *Suspension {
if in == nil {
return nil
}
out := new(Suspension)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Taint) DeepCopyInto(out *Taint) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint.
func (in *Taint) DeepCopy() *Taint {
if in == nil {
return nil
}
out := new(Taint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *YAMLPatchOperation) DeepCopyInto(out *YAMLPatchOperation) {
*out = *in
in.Value.DeepCopyInto(&out.Value)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YAMLPatchOperation.
func (in *YAMLPatchOperation) DeepCopy() *YAMLPatchOperation {
if in == nil {
return nil
}
out := new(YAMLPatchOperation)
in.DeepCopyInto(out)
return out
}

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)
@ -62,6 +65,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ClusterOverridePolicyList{},
&ClusterPropagationPolicy{},
&ClusterPropagationPolicyList{},
&ClusterTaintPolicy{},
&ClusterTaintPolicyList{},
&FederatedResourceQuota{},
&FederatedResourceQuotaList{},
&OverridePolicy{},

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -18,6 +18,7 @@ package install
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/karmada-io/api/search"
@ -27,6 +28,6 @@ import (
// Install registers the API group and adds types to a scheme.
func Install(scheme *runtime.Scheme) {
utilruntime.Must(search.AddToScheme(scheme))
utilruntime.Must(searchv1alpha1.AddToScheme(scheme))
utilruntime.Must(scheme.SetVersionPriority(searchv1alpha1.SchemeGroupVersion))
utilruntime.Must(searchv1alpha1.Install(scheme))
utilruntime.Must(scheme.SetVersionPriority(schema.GroupVersion{Group: searchv1alpha1.GroupVersion.Group, Version: searchv1alpha1.GroupVersion.Version}))
}

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -36,9 +36,9 @@ const (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=works,scope=Namespaced,shortName=wk,categories={karmada-io}
// +kubebuilder:printcolumn:JSONPath=`.spec.workload.manifests[*].kind`,name="Workload-Kind",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="Applied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=`.spec.workload.manifests[*].kind`,name="WORKLOAD-KIND",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Applied")].status`,name="APPLIED",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// Work defines a list of resources to be deployed on the member cluster.
type Work struct {
@ -57,6 +57,20 @@ type Work struct {
type WorkSpec struct {
// Workload represents the manifest workload to be deployed on managed cluster.
Workload WorkloadTemplate `json:"workload,omitempty"`
// SuspendDispatching controls whether dispatching should
// be suspended, nil means not suspend.
// Note: true means stop propagating to the corresponding member cluster, and
// does not prevent status collection.
// +optional
SuspendDispatching *bool `json:"suspendDispatching,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member cluster when the Work object is deleted.
// If set to true, resources will be preserved on the member cluster.
// Default is false, which means resources will be deleted along with the Work object.
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
}
// WorkloadTemplate represents the manifest workload to be deployed on managed cluster.
@ -146,6 +160,8 @@ const (
// WorkDegraded represents that the current state of Work does not match
// the desired state for a certain period.
WorkDegraded string = "Degraded"
// WorkDispatching represents the dispatching or suspension status of the Work resource
WorkDispatching string = "Dispatching"
)
// ResourceHealth represents that the health status of the reference resource.

View File

@ -381,6 +381,16 @@ func (in *WorkList) DeepCopyObject() runtime.Object {
func (in *WorkSpec) DeepCopyInto(out *WorkSpec) {
*out = *in
in.Workload.DeepCopyInto(&out.Workload)
if in.SuspendDispatching != nil {
in, out := &in.SuspendDispatching, &out.SuspendDispatching
*out = new(bool)
**out = **in
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
return
}

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)

View File

@ -50,9 +50,9 @@ const (
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=resourcebindings,scope=Namespaced,shortName=rb,categories={karmada-io}
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="Scheduled",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FullyApplied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="SCHEDULED",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FULLYAPPLIED",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ResourceBinding represents a binding of a kubernetes resource with a propagation policy.
type ResourceBinding struct {
@ -146,6 +146,23 @@ type ResourceBindingSpec struct {
// It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC.
// +optional
RescheduleTriggeredAt *metav1.Time `json:"rescheduleTriggeredAt,omitempty"`
// Suspension declares the policy for suspending different aspects of propagation.
// nil means no suspension. no default values.
// +optional
Suspension *Suspension `json:"suspension,omitempty"`
// PreserveResourcesOnDeletion controls whether resources should be preserved on the
// member clusters when the binding object is deleted.
// If set to true, resources will be preserved on the member clusters.
// Default is false, which means resources will be deleted along with the binding object.
// This setting applies to all Work objects created under this binding object.
// +optional
PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"`
// SchedulePriority represents the scheduling priority assigned to workloads.
// +optional
SchedulePriority *SchedulePriority `json:"schedulePriority,omitempty"`
}
// ObjectReference contains enough information to locate the referenced object inside current cluster.
@ -227,6 +244,13 @@ type GracefulEvictionTask struct {
// +required
FromCluster string `json:"fromCluster"`
// PurgeMode represents how to deal with the legacy applications on the
// cluster from which the application is migrated.
// Valid options are "Immediately", "Graciously" and "Never".
// +kubebuilder:validation:Enum=Immediately;Graciously;Never
// +optional
PurgeMode policyv1alpha1.PurgeMode `json:"purgeMode,omitempty"`
// Replicas indicates the number of replicas should be evicted.
// Should be ignored for resource type that doesn't have replica.
// +optional
@ -267,6 +291,11 @@ type GracefulEvictionTask struct {
// +optional
SuppressDeletion *bool `json:"suppressDeletion,omitempty"`
// PreservedLabelState represents the application state information collected from the original cluster,
// and it will be injected into the new cluster in form of application labels.
// +optional
PreservedLabelState map[string]string `json:"preservedLabelState,omitempty"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created.
// Clients should not set this value to avoid the time inconsistency issue.
@ -275,6 +304,9 @@ type GracefulEvictionTask struct {
// Populated by the system. Read-only.
// +optional
CreationTimestamp *metav1.Time `json:"creationTimestamp,omitempty"`
// ClustersBeforeFailover records the clusters where running the application before failover.
ClustersBeforeFailover []string `json:"clustersBeforeFailover,omitempty"`
}
// BindingSnapshot is a snapshot of a ResourceBinding or ClusterResourceBinding.
@ -294,6 +326,31 @@ type BindingSnapshot struct {
Clusters []TargetCluster `json:"clusters,omitempty"`
}
// Suspension defines the policy for suspending dispatching and scheduling.
type Suspension struct {
policyv1alpha1.Suspension `json:",inline"`
// Scheduling controls whether scheduling should be suspended, the scheduler will pause scheduling and not
// process resource binding when the value is true and resume scheduling when it's false or nil.
// This is designed for third-party systems to temporarily pause the scheduling of applications, which enabling
// manage resource allocation, prioritize critical workloads, etc.
// It is expected that third-party systems use an admission webhook to suspend scheduling at the time of
// ResourceBinding creation. Once a ResourceBinding has been scheduled, it cannot be paused afterward, as it may
// lead to ineffective suspension.
// +optional
Scheduling *bool `json:"scheduling,omitempty"`
}
// SchedulePriority represents the scheduling priority assigned to workloads.
type SchedulePriority struct {
// Priority specifies the scheduling priority for the binding.
// Higher values indicate a higher priority.
// If not explicitly set, the default value is 0.
// +kubebuilder:default=0
// +optional
Priority int32 `json:"priority,omitempty"`
}
// ResourceBindingStatus represents the overall status of the strategy as well as the referenced resources.
type ResourceBindingStatus struct {
// SchedulerObservedGeneration is the generation(.metadata.generation) observed by the scheduler.
@ -409,9 +466,9 @@ const (
// +kubebuilder:resource:path=clusterresourcebindings,scope="Cluster",shortName=crb,categories={karmada-io}
// +kubebuilder:subresource:status
// +kubebuilder:storageversion
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="Scheduled",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FullyApplied",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Scheduled")].status`,name="SCHEDULED",type=string
// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="FullyApplied")].status`,name="FULLYAPPLIED",type=string
// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="AGE",type=date
// ClusterResourceBinding represents a binding of a kubernetes resource with a ClusterPropagationPolicy.
type ClusterResourceBinding struct {

View File

@ -16,13 +16,18 @@ limitations under the License.
package v1alpha2
import policyv1alpha1 "github.com/karmada-io/api/policy/v1alpha1"
// TaskOptions represents options for GracefulEvictionTasks.
type TaskOptions struct {
producer string
reason string
message string
gracePeriodSeconds *int32
suppressDeletion *bool
purgeMode policyv1alpha1.PurgeMode
producer string
reason string
message string
gracePeriodSeconds *int32
suppressDeletion *bool
preservedLabelState map[string]string
clustersBeforeFailover []string
}
// Option configures a TaskOptions
@ -38,6 +43,13 @@ func NewTaskOptions(opts ...Option) *TaskOptions {
return &options
}
// WithPurgeMode sets the purgeMode for TaskOptions
func WithPurgeMode(purgeMode policyv1alpha1.PurgeMode) Option {
return func(o *TaskOptions) {
o.purgeMode = purgeMode
}
}
// WithProducer sets the producer for TaskOptions
func WithProducer(producer string) Option {
return func(o *TaskOptions) {
@ -73,6 +85,20 @@ func WithSuppressDeletion(suppressDeletion *bool) Option {
}
}
// WithPreservedLabelState sets the preservedLabelState for TaskOptions
func WithPreservedLabelState(preservedLabelState map[string]string) Option {
return func(o *TaskOptions) {
o.preservedLabelState = preservedLabelState
}
}
// WithClustersBeforeFailover sets the clustersBeforeFailover for TaskOptions
func WithClustersBeforeFailover(clustersBeforeFailover []string) Option {
return func(o *TaskOptions) {
o.clustersBeforeFailover = clustersBeforeFailover
}
}
// TargetContains checks if specific cluster present on the target list.
func (s *ResourceBindingSpec) TargetContains(name string) bool {
for i := range s.Clusters {
@ -153,15 +179,37 @@ func (s *ResourceBindingSpec) GracefulEvictCluster(name string, options *TaskOpt
// build eviction task
evictingCluster := evictCluster.DeepCopy()
evictionTask := GracefulEvictionTask{
FromCluster: evictingCluster.Name,
Reason: options.reason,
Message: options.message,
Producer: options.producer,
GracePeriodSeconds: options.gracePeriodSeconds,
SuppressDeletion: options.suppressDeletion,
FromCluster: evictingCluster.Name,
PurgeMode: options.purgeMode,
Reason: options.reason,
Message: options.message,
Producer: options.producer,
GracePeriodSeconds: options.gracePeriodSeconds,
SuppressDeletion: options.suppressDeletion,
PreservedLabelState: options.preservedLabelState,
ClustersBeforeFailover: options.clustersBeforeFailover,
}
if evictingCluster.Replicas > 0 {
evictionTask.Replicas = &evictingCluster.Replicas
}
s.GracefulEvictionTasks = append(s.GracefulEvictionTasks, evictionTask)
}
// SchedulingSuspended tells if the scheduling of ResourceBinding or
// ClusterResourceBinding is suspended.
func (s *ResourceBindingSpec) SchedulingSuspended() bool {
if s == nil || s.Suspension == nil || s.Suspension.Scheduling == nil {
return false
}
return *s.Suspension.Scheduling
}
// SchedulePriorityValue returns the scheduling priority declared
// by '.spec.SchedulePriority.Priority'.
func (s *ResourceBindingSpec) SchedulePriorityValue() int32 {
if s.SchedulePriority == nil {
return 0
}
return s.SchedulePriority.Priority
}

View File

@ -1,371 +0,0 @@
/*
Copyright 2022 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"reflect"
"testing"
"k8s.io/utils/ptr"
)
func TestResourceBindingSpec_TargetContains(t *testing.T) {
tests := []struct {
Name string
Spec ResourceBindingSpec
ClusterName string
Expect bool
}{
{
Name: "cluster present in target",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "m1",
Expect: true,
},
{
Name: "cluster not present in target",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "m3",
Expect: false,
},
{
Name: "cluster is empty",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
ClusterName: "",
Expect: false,
},
{
Name: "target list is empty",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ClusterName: "m1",
Expect: false,
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
if tc.Spec.TargetContains(tc.ClusterName) != tc.Expect {
t.Fatalf("expect: %v, but got: %v", tc.Expect, tc.Spec.TargetContains(tc.ClusterName))
}
})
}
}
func TestResourceBindingSpec_AssignedReplicasForCluster(t *testing.T) {
tests := []struct {
Name string
Spec ResourceBindingSpec
ClusterName string
ExpectReplicas int32
}{
{
Name: "returns valid replicas in case cluster present",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}}},
ClusterName: "m1",
ExpectReplicas: 1,
},
{
Name: "returns 0 in case cluster not present",
Spec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}}},
ClusterName: "non-exist",
ExpectReplicas: 0,
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
got := tc.Spec.AssignedReplicasForCluster(tc.ClusterName)
if tc.ExpectReplicas != got {
t.Fatalf("expect: %d, but got: %d", tc.ExpectReplicas, got)
}
})
}
}
func TestResourceBindingSpec_RemoveCluster(t *testing.T) {
tests := []struct {
Name string
InputSpec ResourceBindingSpec
ClusterName string
ExpectSpec ResourceBindingSpec
}{
{
Name: "cluster not exist should do nothing",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "no-exist",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
},
{
Name: "remove cluster from head",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m1",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m2"}, {Name: "m3"}}},
},
{
Name: "remove cluster from middle",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m2",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m3"}}},
},
{
Name: "remove cluster from tail",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}}},
ClusterName: "m3",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}}},
},
{
Name: "remove cluster from empty list",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ClusterName: "na",
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
tc.InputSpec.RemoveCluster(tc.ClusterName)
if !reflect.DeepEqual(tc.InputSpec.Clusters, tc.ExpectSpec.Clusters) {
t.Fatalf("expect: %v, but got: %v", tc.ExpectSpec.Clusters, tc.InputSpec.Clusters)
}
})
}
}
func TestResourceBindingSpec_GracefulEvictCluster(t *testing.T) {
tests := []struct {
Name string
InputSpec ResourceBindingSpec
EvictEvent GracefulEvictionTask
ExpectSpec ResourceBindingSpec
}{
{
Name: "cluster not exist should do nothing",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}},
},
EvictEvent: GracefulEvictionTask{FromCluster: "non-exist"},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1"}, {Name: "m2"}, {Name: "m3"}},
},
},
{
Name: "evict cluster from head",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m1",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: ptr.To[int32](1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from middle",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m2",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m2",
Replicas: ptr.To[int32](2),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from tail",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m3",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m3",
Replicas: ptr.To[int32](3),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "eviction task should be appended to non-empty tasks",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}, {Name: "m3", Replicas: 3}},
GracefulEvictionTasks: []GracefulEvictionTask{{FromCluster: "original-cluster"}},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m3",
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "original-cluster",
},
{
FromCluster: "m3",
Replicas: ptr.To[int32](3),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction",
Producer: EvictionProducerTaintManager,
},
},
},
},
{
Name: "remove cluster from empty list",
InputSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
ExpectSpec: ResourceBindingSpec{Clusters: []TargetCluster{}},
},
{
Name: "same eviction task should not be appended multiple times",
InputSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m1", Replicas: 1}, {Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: ptr.To[int32](1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v1",
Producer: EvictionProducerTaintManager,
},
},
},
EvictEvent: GracefulEvictionTask{
FromCluster: "m1",
Replicas: ptr.To[int32](1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v2",
Producer: EvictionProducerTaintManager,
},
ExpectSpec: ResourceBindingSpec{
Clusters: []TargetCluster{{Name: "m2", Replicas: 2}},
GracefulEvictionTasks: []GracefulEvictionTask{
{
FromCluster: "m1",
Replicas: ptr.To[int32](1),
Reason: EvictionReasonTaintUntolerated,
Message: "graceful eviction v1",
Producer: EvictionProducerTaintManager,
},
},
},
},
}
for _, test := range tests {
tc := test
t.Run(tc.Name, func(t *testing.T) {
tc.InputSpec.GracefulEvictCluster(tc.EvictEvent.FromCluster, NewTaskOptions(WithProducer(tc.EvictEvent.Producer), WithReason(tc.EvictEvent.Reason), WithMessage(tc.EvictEvent.Message)))
if !reflect.DeepEqual(tc.InputSpec.Clusters, tc.ExpectSpec.Clusters) {
t.Fatalf("expect clusters: %v, but got: %v", tc.ExpectSpec.Clusters, tc.InputSpec.Clusters)
}
if !reflect.DeepEqual(tc.InputSpec.GracefulEvictionTasks, tc.ExpectSpec.GracefulEvictionTasks) {
t.Fatalf("expect tasks: %v, but got: %v", tc.ExpectSpec.GracefulEvictionTasks, tc.InputSpec.GracefulEvictionTasks)
}
})
}
}
func TestResourceBindingSpec_ClusterInGracefulEvictionTasks(t *testing.T) {
gracefulEvictionTasks := []GracefulEvictionTask{
{
FromCluster: "member1",
Producer: EvictionProducerTaintManager,
Reason: EvictionReasonTaintUntolerated,
},
{
FromCluster: "member2",
Producer: EvictionProducerTaintManager,
Reason: EvictionReasonTaintUntolerated,
},
}
tests := []struct {
name string
InputSpec ResourceBindingSpec
targetCluster string
expect bool
}{
{
name: "targetCluster is in the process of eviction",
InputSpec: ResourceBindingSpec{
GracefulEvictionTasks: gracefulEvictionTasks,
},
targetCluster: "member1",
expect: true,
},
{
name: "targetCluster is not in the process of eviction",
InputSpec: ResourceBindingSpec{
GracefulEvictionTasks: gracefulEvictionTasks,
},
targetCluster: "member3",
expect: false,
},
}
for _, test := range tests {
tc := test
t.Run(tc.name, func(t *testing.T) {
result := tc.InputSpec.ClusterInGracefulEvictionTasks(tc.targetCluster)
if result != tc.expect {
t.Errorf("expected: %v, but got: %v", tc.expect, result)
}
})
}
}

View File

@ -60,6 +60,17 @@ const (
// BindingManagedByLabel is added to ResourceBinding to represent what kind of resource manages this Binding.
BindingManagedByLabel = "binding.karmada.io/managed-by"
// ResourceTemplateGenerationAnnotationKey records the generation of resource template in Karmada APIServer,
// It will be injected into the resource when propagating to member clusters, to denote the specific version of
// the resource template from which the resource is derived. It might be helpful in the following cases:
// 1. Facilitating observation from member clusters to ascertain if the most recent resource template has been
// completely synced.
// 2. The annotation will be synced back to Karmada during the process of syncing resource status,
// by leveraging this annotation, Karmada can infer if the most recent resource template has been completely
// synced on member clusters, then generates accurate observed generation(like Deployment's .status.observedGeneration)
// which might be required by the release system.
ResourceTemplateGenerationAnnotationKey = "resourcetemplate.karmada.io/generation"
)
// Define resource conflict resolution

View File

@ -149,10 +149,22 @@ func (in *GracefulEvictionTask) DeepCopyInto(out *GracefulEvictionTask) {
*out = new(bool)
**out = **in
}
if in.PreservedLabelState != nil {
in, out := &in.PreservedLabelState, &out.PreservedLabelState
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.CreationTimestamp != nil {
in, out := &in.CreationTimestamp, &out.CreationTimestamp
*out = (*in).DeepCopy()
}
if in.ClustersBeforeFailover != nil {
in, out := &in.ClustersBeforeFailover, &out.ClustersBeforeFailover
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -348,6 +360,21 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) {
in, out := &in.RescheduleTriggeredAt, &out.RescheduleTriggeredAt
*out = (*in).DeepCopy()
}
if in.Suspension != nil {
in, out := &in.Suspension, &out.Suspension
*out = new(Suspension)
(*in).DeepCopyInto(*out)
}
if in.PreserveResourcesOnDeletion != nil {
in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion
*out = new(bool)
**out = **in
}
if in.SchedulePriority != nil {
in, out := &in.SchedulePriority, &out.SchedulePriority
*out = new(SchedulePriority)
**out = **in
}
return
}
@ -395,6 +422,44 @@ func (in *ResourceBindingStatus) DeepCopy() *ResourceBindingStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SchedulePriority) DeepCopyInto(out *SchedulePriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulePriority.
func (in *SchedulePriority) DeepCopy() *SchedulePriority {
if in == nil {
return nil
}
out := new(SchedulePriority)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Suspension) DeepCopyInto(out *Suspension) {
*out = *in
in.Suspension.DeepCopyInto(&out.Suspension)
if in.Scheduling != nil {
in, out := &in.Scheduling, &out.Scheduling
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Suspension.
func (in *Suspension) DeepCopy() *Suspension {
if in == nil {
return nil
}
out := new(Suspension)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TargetCluster) DeepCopyInto(out *TargetCluster) {
*out = *in
@ -424,6 +489,18 @@ func (in *TaskOptions) DeepCopyInto(out *TaskOptions) {
*out = new(bool)
**out = **in
}
if in.preservedLabelState != nil {
in, out := &in.preservedLabelState, &out.preservedLabelState
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.clustersBeforeFailover != nil {
in, out := &in.clustersBeforeFailover, &out.clustersBeforeFailover
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}

View File

@ -1,3 +1,6 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Karmada Authors.
@ -20,8 +23,8 @@ package v1alpha2
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName specifies the group name used to register the objects.
@ -43,7 +46,7 @@ var (
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
// Depreciated: use Install instead
// Deprecated: use Install instead
AddToScheme = localSchemeBuilder.AddToScheme
Install = localSchemeBuilder.AddToScheme
)