commit
7395a8bdf5
|
@ -11,7 +11,7 @@ global:
|
||||||
## - myRegistryKeySecretName
|
## - myRegistryKeySecretName
|
||||||
imagePullSecrets: []
|
imagePullSecrets: []
|
||||||
|
|
||||||
## #param global.commonLabels Common labels applied to all ressources
|
## #param global.commonLabels Common labels applied to all resources
|
||||||
## E.g.
|
## E.g.
|
||||||
## commonLabels:
|
## commonLabels:
|
||||||
## team: infra
|
## team: infra
|
||||||
|
|
|
@ -227,7 +227,7 @@ The process of synchronizing `EndpointSlice` from `ServiceProvisionClusters` to
|
||||||
When creating the Work, in order to facilitate problem investigation, we should add following annotation to record the original `EndpointSlice` information:
|
When creating the Work, in order to facilitate problem investigation, we should add following annotation to record the original `EndpointSlice` information:
|
||||||
* `endpointslice.karmada.io/work-provision-cluster`: the cluster name of the original `EndpointSlice`.
|
* `endpointslice.karmada.io/work-provision-cluster`: the cluster name of the original `EndpointSlice`.
|
||||||
Also, we should add the following annotation to the syned `EndpointSlice` record the original information:
|
Also, we should add the following annotation to the syned `EndpointSlice` record the original information:
|
||||||
* `endpointslice.karmada.io/endpointslice-generation`: the resoruce generation of the `EndpointSlice`, it could be used to check whether the `EndpointSlice` is the newest version.
|
* `endpointslice.karmada.io/endpointslice-generation`: the resource generation of the `EndpointSlice`, it could be used to check whether the `EndpointSlice` is the newest version.
|
||||||
* `endpointslice.karmada.io/provision-cluster`: the cluster location of the original `EndpointSlice`.
|
* `endpointslice.karmada.io/provision-cluster`: the cluster location of the original `EndpointSlice`.
|
||||||
1. Karmada will sync the `EndpointSlice`'s work to the member clusters.
|
1. Karmada will sync the `EndpointSlice`'s work to the member clusters.
|
||||||
|
|
||||||
|
@ -274,9 +274,9 @@ We should have following Condition in `MultiClusterService`:
|
||||||
```go
|
```go
|
||||||
MCSServiceAppliedConditionType = "ServiceApplied"
|
MCSServiceAppliedConditionType = "ServiceApplied"
|
||||||
|
|
||||||
MCSEndpointSliceCollectedCondtionType = "EndpointSliceCollected"
|
MCSEndpointSliceCollectedConditionType = "EndpointSliceCollected"
|
||||||
|
|
||||||
MCSEndpointSliceAppliedCondtionType = "EndpointSliceApplied"
|
MCSEndpointSliceAppliedConditionType = "EndpointSliceApplied"
|
||||||
```
|
```
|
||||||
|
|
||||||
`MCSServiceAppliedConditionType` is used to record the status of `Service` propagation, for example:
|
`MCSServiceAppliedConditionType` is used to record the status of `Service` propagation, for example:
|
||||||
|
@ -361,4 +361,4 @@ Another alternative approach could be to enforce a strict naming convention for
|
||||||
<!--
|
<!--
|
||||||
Note: This is a simplified version of kubernetes enhancement proposal template.
|
Note: This is a simplified version of kubernetes enhancement proposal template.
|
||||||
https://github.com/kubernetes/enhancements/tree/3317d4cb548c396a430d1c1ac6625226018adf6a/keps/NNNN-kep-template
|
https://github.com/kubernetes/enhancements/tree/3317d4cb548c396a430d1c1ac6625226018adf6a/keps/NNNN-kep-template
|
||||||
-->
|
-->
|
||||||
|
|
|
@ -108,7 +108,7 @@ verify_downloader() {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create tempory directory and cleanup when done
|
# Create temporary directory and cleanup when done
|
||||||
setup_tmp() {
|
setup_tmp() {
|
||||||
TMP_DIR=$(mktemp -d -t "${INSTALL_CLI_TYPE}"-install.XXXXXXXXXX)
|
TMP_DIR=$(mktemp -d -t "${INSTALL_CLI_TYPE}"-install.XXXXXXXXXX)
|
||||||
TMP_METADATA="${TMP_DIR}/${INSTALL_CLI_TYPE}.json"
|
TMP_METADATA="${TMP_DIR}/${INSTALL_CLI_TYPE}.json"
|
||||||
|
|
|
@ -381,7 +381,7 @@ function util::wait_pod_ready() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# util::wait_apiservice_ready waits for apiservice state becomes Available until timeout.
|
# util::wait_apiservice_ready waits for apiservice state becomes Available until timeout.
|
||||||
# Parmeters:
|
# Parameters:
|
||||||
# - $1: k8s context name, such as "karmada-apiserver"
|
# - $1: k8s context name, such as "karmada-apiserver"
|
||||||
# - $2: apiservice label, such as "app=etcd"
|
# - $2: apiservice label, such as "app=etcd"
|
||||||
# - $3: time out, such as "200s"
|
# - $3: time out, such as "200s"
|
||||||
|
@ -402,7 +402,7 @@ function util::wait_apiservice_ready() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# util::wait_cluster_ready waits for cluster state becomes ready until timeout.
|
# util::wait_cluster_ready waits for cluster state becomes ready until timeout.
|
||||||
# Parmeters:
|
# Parameters:
|
||||||
# - $1: context name, such as "karmada-apiserver"
|
# - $1: context name, such as "karmada-apiserver"
|
||||||
# - $2: cluster name, such as "member1"
|
# - $2: cluster name, such as "member1"
|
||||||
function util:wait_cluster_ready() {
|
function util:wait_cluster_ready() {
|
||||||
|
@ -444,7 +444,7 @@ function util::kubectl_with_retry() {
|
||||||
|
|
||||||
# util::delete_all_clusters deletes all clusters directly
|
# util::delete_all_clusters deletes all clusters directly
|
||||||
# util::delete_all_clusters actually do three things: delete cluster、remove kubeconfig、record delete log
|
# util::delete_all_clusters actually do three things: delete cluster、remove kubeconfig、record delete log
|
||||||
# Parmeters:
|
# Parameters:
|
||||||
# - $1: KUBECONFIG file of host cluster, such as "~/.kube/karmada.config"
|
# - $1: KUBECONFIG file of host cluster, such as "~/.kube/karmada.config"
|
||||||
# - $2: KUBECONFIG file of member cluster, such as "~/.kube/members.config"
|
# - $2: KUBECONFIG file of member cluster, such as "~/.kube/members.config"
|
||||||
# - $3: log file path, such as "/tmp/karmada/"
|
# - $3: log file path, such as "/tmp/karmada/"
|
||||||
|
@ -466,7 +466,7 @@ function util::delete_all_clusters() {
|
||||||
|
|
||||||
# util::create_cluster creates a kubernetes cluster
|
# util::create_cluster creates a kubernetes cluster
|
||||||
# util::create_cluster creates a kind cluster and don't wait for control plane node to be ready.
|
# util::create_cluster creates a kind cluster and don't wait for control plane node to be ready.
|
||||||
# Parmeters:
|
# Parameters:
|
||||||
# - $1: cluster name, such as "host"
|
# - $1: cluster name, such as "host"
|
||||||
# - $2: KUBECONFIG file, such as "/var/run/host.config"
|
# - $2: KUBECONFIG file, such as "/var/run/host.config"
|
||||||
# - $3: node docker image to use for booting the cluster, such as "kindest/node:v1.19.1"
|
# - $3: node docker image to use for booting the cluster, such as "kindest/node:v1.19.1"
|
||||||
|
|
|
@ -90,7 +90,7 @@ type InitOpt func(o *InitOptions)
|
||||||
var _ tasks.InitData = &initData{}
|
var _ tasks.InitData = &initData{}
|
||||||
|
|
||||||
// initData defines all the runtime information used when ruing init workflow;
|
// initData defines all the runtime information used when ruing init workflow;
|
||||||
// this data is shared across all the tasks tha are included in the workflow.
|
// this data is shared across all the tasks that are included in the workflow.
|
||||||
type initData struct {
|
type initData struct {
|
||||||
sync.Once
|
sync.Once
|
||||||
certs.CertStore
|
certs.CertStore
|
||||||
|
|
|
@ -105,7 +105,7 @@ func runRemoveComponentSubTask(component string, workloadNameFunc util.Namefunc,
|
||||||
constants.KarmadaOperatorLabel,
|
constants.KarmadaOperatorLabel,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to cleanup serivce of component %s, err: %w", component, err)
|
return fmt.Errorf("failed to cleanup service of component %s, err: %w", component, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ func CreateOrUpdateService(client clientset.Interface, service *corev1.Service)
|
||||||
// Ignore if the Service is invalid with this error message:
|
// Ignore if the Service is invalid with this error message:
|
||||||
// Service "apiserver" is invalid: provided Port is already allocated.
|
// Service "apiserver" is invalid: provided Port is already allocated.
|
||||||
if apierrors.IsInvalid(err) && strings.Contains(err.Error(), errAllocated.Error()) {
|
if apierrors.IsInvalid(err) && strings.Contains(err.Error(), errAllocated.Error()) {
|
||||||
klog.V(2).ErrorS(err, "failed to create or update serivce", "service", klog.KObj(service))
|
klog.V(2).ErrorS(err, "failed to create or update service", "service", klog.KObj(service))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unable to create Service: %v", err)
|
return fmt.Errorf("unable to create Service: %v", err)
|
||||||
|
|
|
@ -128,7 +128,7 @@ func Unpack(file, targetPath string) error {
|
||||||
}
|
}
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
default:
|
default:
|
||||||
fmt.Printf("uknown type: %v in %s\n", header.Typeflag, header.Name)
|
fmt.Printf("unknown type: %v in %s\n", header.Typeflag, header.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -34,7 +34,7 @@ import (
|
||||||
type labelEventKind int
|
type labelEventKind int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// addLabelEvent refer to addding util.RetainReplicasLabel to resource scaled by HPA
|
// addLabelEvent refer to adding util.RetainReplicasLabel to resource scaled by HPA
|
||||||
addLabelEvent labelEventKind = iota
|
addLabelEvent labelEventKind = iota
|
||||||
// deleteLabelEvent refer to deleting util.RetainReplicasLabel from resource scaled by HPA
|
// deleteLabelEvent refer to deleting util.RetainReplicasLabel from resource scaled by HPA
|
||||||
deleteLabelEvent
|
deleteLabelEvent
|
||||||
|
|
|
@ -62,7 +62,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// bindingDependedIdLabelKey is the resoruce id of the independent binding which the attached binding depends on.
|
// bindingDependedIdLabelKey is the resource id of the independent binding which the attached binding depends on.
|
||||||
bindingDependedIdLabelKey = "resourcebinding.karmada.io/depended-id"
|
bindingDependedIdLabelKey = "resourcebinding.karmada.io/depended-id"
|
||||||
|
|
||||||
// bindingDependedByLabelKeyPrefix is the prefix to a label key specifying an attached binding referred by which independent binding.
|
// bindingDependedByLabelKeyPrefix is the prefix to a label key specifying an attached binding referred by which independent binding.
|
||||||
|
@ -377,7 +377,7 @@ func (d *DependenciesDistributor) recordDependencies(binding *workv1alpha2.Resou
|
||||||
klog.Errorf("Failed to marshal dependencies of binding(%s/%s): %v", binding.Namespace, binding.Name, err)
|
klog.Errorf("Failed to marshal dependencies of binding(%s/%s): %v", binding.Namespace, binding.Name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
depenciesStr := string(dependenciesBytes)
|
dependenciesStr := string(dependenciesBytes)
|
||||||
|
|
||||||
objectAnnotation := binding.GetAnnotations()
|
objectAnnotation := binding.GetAnnotations()
|
||||||
if objectAnnotation == nil {
|
if objectAnnotation == nil {
|
||||||
|
@ -385,11 +385,11 @@ func (d *DependenciesDistributor) recordDependencies(binding *workv1alpha2.Resou
|
||||||
}
|
}
|
||||||
|
|
||||||
// dependencies are not updated, no need to update annotation.
|
// dependencies are not updated, no need to update annotation.
|
||||||
if oldDependencies, exist := objectAnnotation[bindingDependenciesAnnotationKey]; exist && oldDependencies == depenciesStr {
|
if oldDependencies, exist := objectAnnotation[bindingDependenciesAnnotationKey]; exist && oldDependencies == dependenciesStr {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
objectAnnotation[bindingDependenciesAnnotationKey] = depenciesStr
|
objectAnnotation[bindingDependenciesAnnotationKey] = dependenciesStr
|
||||||
|
|
||||||
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
|
||||||
binding.SetAnnotations(objectAnnotation)
|
binding.SetAnnotations(objectAnnotation)
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
"github.com/karmada-io/karmada/pkg/karmadactl/addons/search"
|
"github.com/karmada-io/karmada/pkg/karmadactl/addons/search"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Install intall the karmada addons process in Addons
|
// Install install the karmada addons process in Addons
|
||||||
func Install() {
|
func Install() {
|
||||||
addonsinit.Addons["karmada-descheduler"] = descheduler.AddonDescheduler
|
addonsinit.Addons["karmada-descheduler"] = descheduler.AddonDescheduler
|
||||||
addonsinit.Addons["karmada-metrics-adapter"] = metricsadapter.AddonMetricsAdapter
|
addonsinit.Addons["karmada-metrics-adapter"] = metricsadapter.AddonMetricsAdapter
|
||||||
|
|
|
@ -190,7 +190,7 @@ func installComponentsOnKarmadaControlPlane(opts *addoninit.CommandAddonsEnableO
|
||||||
caCertName := fmt.Sprintf("%s.crt", options.CaCertAndKeyName)
|
caCertName := fmt.Sprintf("%s.crt", options.CaCertAndKeyName)
|
||||||
karmadaCerts, err := opts.KubeClientSet.CoreV1().Secrets(opts.Namespace).Get(context.TODO(), options.KarmadaCertsName, metav1.GetOptions{})
|
karmadaCerts, err := opts.KubeClientSet.CoreV1().Secrets(opts.Namespace).Get(context.TODO(), options.KarmadaCertsName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APISevice: %+v", opts.Namespace, options.KarmadaCertsName, err)
|
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APIService: %+v", opts.Namespace, options.KarmadaCertsName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
aaService := &corev1.Service{}
|
aaService := &corev1.Service{}
|
||||||
|
|
|
@ -203,7 +203,7 @@ func installComponentsOnKarmadaControlPlane(opts *addoninit.CommandAddonsEnableO
|
||||||
caCertName := fmt.Sprintf("%s.crt", options.CaCertAndKeyName)
|
caCertName := fmt.Sprintf("%s.crt", options.CaCertAndKeyName)
|
||||||
karmadaCerts, err := opts.KubeClientSet.CoreV1().Secrets(opts.Namespace).Get(context.TODO(), options.KarmadaCertsName, metav1.GetOptions{})
|
karmadaCerts, err := opts.KubeClientSet.CoreV1().Secrets(opts.Namespace).Get(context.TODO(), options.KarmadaCertsName, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APISevice: %+v", opts.Namespace, options.KarmadaCertsName, err)
|
return fmt.Errorf("error when getting Secret %s/%s, which is used to fetch CaCert for building APIService: %+v", opts.Namespace, options.KarmadaCertsName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
aaService := &corev1.Service{}
|
aaService := &corev1.Service{}
|
||||||
|
|
|
@ -157,7 +157,7 @@ func NewCmdInit(parentCommand string) *cobra.Command {
|
||||||
flags.Int32VarP(&opts.KubeControllerManagerReplicas, "karmada-kube-controller-manager-replicas", "", 1, "Karmada kube controller manager replica set")
|
flags.Int32VarP(&opts.KubeControllerManagerReplicas, "karmada-kube-controller-manager-replicas", "", 1, "Karmada kube controller manager replica set")
|
||||||
flags.StringVarP(&opts.KarmadaControllerManagerImage, "karmada-controller-manager-image", "", kubernetes.DefaultKarmadaControllerManagerImage, "Karmada controller manager image")
|
flags.StringVarP(&opts.KarmadaControllerManagerImage, "karmada-controller-manager-image", "", kubernetes.DefaultKarmadaControllerManagerImage, "Karmada controller manager image")
|
||||||
flags.Int32VarP(&opts.KarmadaControllerManagerReplicas, "karmada-controller-manager-replicas", "", 1, "Karmada controller manager replica set")
|
flags.Int32VarP(&opts.KarmadaControllerManagerReplicas, "karmada-controller-manager-replicas", "", 1, "Karmada controller manager replica set")
|
||||||
flags.StringVarP(&opts.KarmadaWebhookImage, "karmada-webhook-image", "", kubernetes.DefualtKarmadaWebhookImage, "Karmada webhook image")
|
flags.StringVarP(&opts.KarmadaWebhookImage, "karmada-webhook-image", "", kubernetes.DefaultKarmadaWebhookImage, "Karmada webhook image")
|
||||||
flags.Int32VarP(&opts.KarmadaWebhookReplicas, "karmada-webhook-replicas", "", 1, "Karmada webhook replica set")
|
flags.Int32VarP(&opts.KarmadaWebhookReplicas, "karmada-webhook-replicas", "", 1, "Karmada webhook replica set")
|
||||||
flags.StringVarP(&opts.KarmadaAggregatedAPIServerImage, "karmada-aggregated-apiserver-image", "", kubernetes.DefaultKarmadaAggregatedAPIServerImage, "Karmada aggregated apiserver image")
|
flags.StringVarP(&opts.KarmadaAggregatedAPIServerImage, "karmada-aggregated-apiserver-image", "", kubernetes.DefaultKarmadaAggregatedAPIServerImage, "Karmada aggregated apiserver image")
|
||||||
flags.Int32VarP(&opts.KarmadaAggregatedAPIServerReplicas, "karmada-aggregated-apiserver-replicas", "", 1, "Karmada aggregated apiserver replica set")
|
flags.Int32VarP(&opts.KarmadaAggregatedAPIServerReplicas, "karmada-aggregated-apiserver-replicas", "", 1, "Karmada aggregated apiserver replica set")
|
||||||
|
|
|
@ -93,8 +93,8 @@ var (
|
||||||
DefaultKarmadaSchedulerImage string
|
DefaultKarmadaSchedulerImage string
|
||||||
// DefaultKarmadaControllerManagerImage Karmada controller manager image
|
// DefaultKarmadaControllerManagerImage Karmada controller manager image
|
||||||
DefaultKarmadaControllerManagerImage string
|
DefaultKarmadaControllerManagerImage string
|
||||||
// DefualtKarmadaWebhookImage Karmada webhook image
|
// DefaultKarmadaWebhookImage Karmada webhook image
|
||||||
DefualtKarmadaWebhookImage string
|
DefaultKarmadaWebhookImage string
|
||||||
// DefaultKarmadaAggregatedAPIServerImage Karmada aggregated apiserver image
|
// DefaultKarmadaAggregatedAPIServerImage Karmada aggregated apiserver image
|
||||||
DefaultKarmadaAggregatedAPIServerImage string
|
DefaultKarmadaAggregatedAPIServerImage string
|
||||||
)
|
)
|
||||||
|
@ -117,7 +117,7 @@ func init() {
|
||||||
DefaultInitImage = "docker.io/alpine:3.18.5"
|
DefaultInitImage = "docker.io/alpine:3.18.5"
|
||||||
DefaultKarmadaSchedulerImage = fmt.Sprintf("docker.io/karmada/karmada-scheduler:%s", releaseVer.ReleaseVersion())
|
DefaultKarmadaSchedulerImage = fmt.Sprintf("docker.io/karmada/karmada-scheduler:%s", releaseVer.ReleaseVersion())
|
||||||
DefaultKarmadaControllerManagerImage = fmt.Sprintf("docker.io/karmada/karmada-controller-manager:%s", releaseVer.ReleaseVersion())
|
DefaultKarmadaControllerManagerImage = fmt.Sprintf("docker.io/karmada/karmada-controller-manager:%s", releaseVer.ReleaseVersion())
|
||||||
DefualtKarmadaWebhookImage = fmt.Sprintf("docker.io/karmada/karmada-webhook:%s", releaseVer.ReleaseVersion())
|
DefaultKarmadaWebhookImage = fmt.Sprintf("docker.io/karmada/karmada-webhook:%s", releaseVer.ReleaseVersion())
|
||||||
DefaultKarmadaAggregatedAPIServerImage = fmt.Sprintf("docker.io/karmada/karmada-aggregated-apiserver:%s", releaseVer.ReleaseVersion())
|
DefaultKarmadaAggregatedAPIServerImage = fmt.Sprintf("docker.io/karmada/karmada-aggregated-apiserver:%s", releaseVer.ReleaseVersion())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -696,7 +696,7 @@ func (i *CommandInitOption) karmadaControllerManagerImage() string {
|
||||||
|
|
||||||
// get karmada-webhook image
|
// get karmada-webhook image
|
||||||
func (i *CommandInitOption) karmadaWebhookImage() string {
|
func (i *CommandInitOption) karmadaWebhookImage() string {
|
||||||
if i.ImageRegistry != "" && i.KarmadaWebhookImage == DefualtKarmadaWebhookImage {
|
if i.ImageRegistry != "" && i.KarmadaWebhookImage == DefaultKarmadaWebhookImage {
|
||||||
return i.ImageRegistry + "/karmada-webhook:" + karmadaRelease
|
return i.ImageRegistry + "/karmada-webhook:" + karmadaRelease
|
||||||
}
|
}
|
||||||
return i.KarmadaWebhookImage
|
return i.KarmadaWebhookImage
|
||||||
|
|
|
@ -96,7 +96,7 @@ func TestFlagsIP(t *testing.T) {
|
||||||
want []net.IP
|
want []net.IP
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "all ips are vaild",
|
name: "all ips are valid",
|
||||||
ip: "10.0.0.1,10.0.0.2",
|
ip: "10.0.0.1,10.0.0.2",
|
||||||
want: []net.IP{
|
want: []net.IP{
|
||||||
net.ParseIP("10.0.0.1"),
|
net.ParseIP("10.0.0.1"),
|
||||||
|
@ -104,7 +104,7 @@ func TestFlagsIP(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "have invaild ip",
|
name: "have invalid ip",
|
||||||
ip: "10.0.0,10.0.0.2",
|
ip: "10.0.0,10.0.0.2",
|
||||||
want: []net.IP{
|
want: []net.IP{
|
||||||
net.ParseIP("127.0.0.1"),
|
net.ParseIP("127.0.0.1"),
|
||||||
|
|
|
@ -123,7 +123,7 @@ func DeCompress(file, targetPath string) error {
|
||||||
}
|
}
|
||||||
outFile.Close()
|
outFile.Close()
|
||||||
default:
|
default:
|
||||||
fmt.Printf("uknown type: %v in %s\n", header.Typeflag, header.Name)
|
fmt.Printf("unknown type: %v in %s\n", header.Typeflag, header.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -390,7 +390,7 @@ func parseEditedIntoCustomization(file []byte, into *configv1alpha1.ResourceInte
|
||||||
if currRule == nil {
|
if currRule == nil {
|
||||||
return fmt.Errorf("unexpected line %q", line)
|
return fmt.Errorf("unexpected line %q", line)
|
||||||
}
|
}
|
||||||
script += string(line) + "\n"
|
script += line + "\n"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -155,7 +155,7 @@ type CommandPromoteOption struct {
|
||||||
|
|
||||||
// PolicyName is the name of the PropagationPolicy(or ClusterPropagationPolicy),
|
// PolicyName is the name of the PropagationPolicy(or ClusterPropagationPolicy),
|
||||||
// It defaults to the promoting resource name with a random hash suffix.
|
// It defaults to the promoting resource name with a random hash suffix.
|
||||||
// It will be ingnored if AutoCreatePolicy is false.
|
// It will be ignored if AutoCreatePolicy is false.
|
||||||
PolicyName string
|
PolicyName string
|
||||||
|
|
||||||
resource.FilenameOptions
|
resource.FilenameOptions
|
||||||
|
|
|
@ -308,7 +308,7 @@ func (o *CommandRegisterOption) Validate() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if !o.BootstrapToken.UnsafeSkipCAVerification && len(o.BootstrapToken.CACertHashes) == 0 {
|
if !o.BootstrapToken.UnsafeSkipCAVerification && len(o.BootstrapToken.CACertHashes) == 0 {
|
||||||
return fmt.Errorf("need to varify CACertHashes, or set --discovery-token-unsafe-skip-ca-verification=true")
|
return fmt.Errorf("need to verify CACertHashes, or set --discovery-token-unsafe-skip-ca-verification=true")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !filepath.IsAbs(o.CACertPath) || !strings.HasSuffix(o.CACertPath, ".crt") {
|
if !filepath.IsAbs(o.CACertPath) || !strings.HasSuffix(o.CACertPath, ".crt") {
|
||||||
|
@ -592,7 +592,7 @@ func (o *CommandRegisterOption) createSecretAndRBACInMemberCluster(karmadaAgentC
|
||||||
StringData: map[string]string{KarmadaKubeconfigName: string(configBytes)},
|
StringData: map[string]string{KarmadaKubeconfigName: string(configBytes)},
|
||||||
}
|
}
|
||||||
|
|
||||||
// cerate karmada-kubeconfig secret to be used by karmada-agent component.
|
// create karmada-kubeconfig secret to be used by karmada-agent component.
|
||||||
if err := cmdutil.CreateOrUpdateSecret(o.memberClusterClient, kubeConfigSecret); err != nil {
|
if err := cmdutil.CreateOrUpdateSecret(o.memberClusterClient, kubeConfigSecret); err != nil {
|
||||||
return fmt.Errorf("create secret %s failed: %v", kubeConfigSecret.Name, err)
|
return fmt.Errorf("create secret %s failed: %v", kubeConfigSecret.Name, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ var _ Factory = &factoryImpl{}
|
||||||
type factoryImpl struct {
|
type factoryImpl struct {
|
||||||
cmdutil.Factory
|
cmdutil.Factory
|
||||||
|
|
||||||
// kubeConfigFlags holds all the flags specificed by user.
|
// kubeConfigFlags holds all the flags specified by user.
|
||||||
// These flags will be inherited by the member cluster's client.
|
// These flags will be inherited by the member cluster's client.
|
||||||
kubeConfigFlags *genericclioptions.ConfigFlags
|
kubeConfigFlags *genericclioptions.ConfigFlags
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ func (c *CustomMetricsProvider) GetMetricByName(ctx context.Context, name types.
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
metricValueList := &custom_metrics.MetricValueList{}
|
metricValueList := &custom_metrics.MetricValueList{}
|
||||||
metricsChanel := make(chan *custom_metrics.MetricValueList)
|
metricsChannel := make(chan *custom_metrics.MetricValueList)
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
for _, cluster := range clusters {
|
for _, cluster := range clusters {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
@ -79,15 +79,15 @@ func (c *CustomMetricsProvider) GetMetricByName(ctx context.Context, name types.
|
||||||
klog.Warningf("query %s's %s metric from cluster %s failed, err: %+v", info.GroupResource.String(), info.Metric, clusterName, err)
|
klog.Warningf("query %s's %s metric from cluster %s failed, err: %+v", info.GroupResource.String(), info.Metric, clusterName, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
metricsChanel <- metrics
|
metricsChannel <- metrics
|
||||||
}(cluster.Name)
|
}(cluster.Name)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(metricsChanel)
|
close(metricsChannel)
|
||||||
}()
|
}()
|
||||||
for {
|
for {
|
||||||
metrics, ok := <-metricsChanel
|
metrics, ok := <-metricsChannel
|
||||||
if !ok {
|
if !ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -119,7 +119,7 @@ func (c *CustomMetricsProvider) GetMetricBySelector(ctx context.Context, namespa
|
||||||
}
|
}
|
||||||
metricValueList := &custom_metrics.MetricValueList{}
|
metricValueList := &custom_metrics.MetricValueList{}
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
metricsChanel := make(chan *custom_metrics.MetricValueList)
|
metricsChannel := make(chan *custom_metrics.MetricValueList)
|
||||||
for _, cluster := range clusters {
|
for _, cluster := range clusters {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(clusterName string) {
|
go func(clusterName string) {
|
||||||
|
@ -129,16 +129,16 @@ func (c *CustomMetricsProvider) GetMetricBySelector(ctx context.Context, namespa
|
||||||
klog.Warningf("query %s's %s metric from cluster %s failed", info.GroupResource.String(), info.Metric, clusterName)
|
klog.Warningf("query %s's %s metric from cluster %s failed", info.GroupResource.String(), info.Metric, clusterName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
metricsChanel <- metrics
|
metricsChannel <- metrics
|
||||||
}(cluster.Name)
|
}(cluster.Name)
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(metricsChanel)
|
close(metricsChannel)
|
||||||
}()
|
}()
|
||||||
sameMetrics := make(map[string]custom_metrics.MetricValue)
|
sameMetrics := make(map[string]custom_metrics.MetricValue)
|
||||||
for {
|
for {
|
||||||
metrics, ok := <-metricsChanel
|
metrics, ok := <-metricsChannel
|
||||||
if !ok {
|
if !ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ func (r *ResourceMetricsProvider) getMetricsParallel(resourceFunc queryResourceF
|
||||||
}
|
}
|
||||||
|
|
||||||
// step 2. Query metrics from the filtered target clusters
|
// step 2. Query metrics from the filtered target clusters
|
||||||
metricsChanel := make(chan interface{})
|
metricsChannel := make(chan interface{})
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for _, clusterName := range targetClusters {
|
for _, clusterName := range targetClusters {
|
||||||
|
@ -139,17 +139,17 @@ func (r *ResourceMetricsProvider) getMetricsParallel(resourceFunc queryResourceF
|
||||||
|
|
||||||
// If there are multiple metrics with same name, it's ok because it's an array instead of a map.
|
// If there are multiple metrics with same name, it's ok because it's an array instead of a map.
|
||||||
// The HPA controller will calculate the average utilization with the array.
|
// The HPA controller will calculate the average utilization with the array.
|
||||||
metricsChanel <- metrics
|
metricsChannel <- metrics
|
||||||
}(clusterName)
|
}(clusterName)
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
close(metricsChanel)
|
close(metricsChannel)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
data, ok := <-metricsChanel
|
data, ok := <-metricsChannel
|
||||||
if !ok {
|
if !ok {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -426,7 +426,7 @@ func (p *PodLister) convertToPodPartialData(pod *corev1.Pod, selector string, la
|
||||||
ret.Annotations = map[string]string{}
|
ret.Annotations = map[string]string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
//If user sets this annotation, we need to remove it to avoid parsing wrong next.
|
// If user sets this annotation, we need to remove it to avoid parsing wrong next.
|
||||||
if !labelSelector {
|
if !labelSelector {
|
||||||
delete(ret.Annotations, namespaceSpecifiedAnnotation)
|
delete(ret.Annotations, namespaceSpecifiedAnnotation)
|
||||||
delete(ret.Annotations, labelSelectorAnnotationInternal)
|
delete(ret.Annotations, labelSelectorAnnotationInternal)
|
||||||
|
@ -539,7 +539,7 @@ func (n *NodeLister) List(selector labels.Selector) (ret []*corev1.Node, err err
|
||||||
nodeTyped.Annotations = map[string]string{}
|
nodeTyped.Annotations = map[string]string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
//If user sets this annotation, we need to reset it.
|
// If user sets this annotation, we need to reset it.
|
||||||
nodeTyped.Annotations[labelSelectorAnnotationInternal] = selector.String()
|
nodeTyped.Annotations[labelSelectorAnnotationInternal] = selector.String()
|
||||||
ret = append(ret, nodeTyped)
|
ret = append(ret, nodeTyped)
|
||||||
}
|
}
|
||||||
|
@ -587,7 +587,7 @@ func (n *NodeLister) Get(name string) (*corev1.Node, error) {
|
||||||
nodeTyped.Annotations = map[string]string{}
|
nodeTyped.Annotations = map[string]string{}
|
||||||
}
|
}
|
||||||
|
|
||||||
//If user sets this annotation, we need to remove it to avoid parsing wrong next.
|
// If user sets this annotation, we need to remove it to avoid parsing wrong next.
|
||||||
delete(nodeTyped.Annotations, labelSelectorAnnotationInternal)
|
delete(nodeTyped.Annotations, labelSelectorAnnotationInternal)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ func VerifyResourceInterpreterContext(uid types.UID, operation configv1alpha1.In
|
||||||
switch r := interpreterContext.(type) {
|
switch r := interpreterContext.(type) {
|
||||||
case *configv1alpha1.ResourceInterpreterContext:
|
case *configv1alpha1.ResourceInterpreterContext:
|
||||||
if r.Response == nil {
|
if r.Response == nil {
|
||||||
return nil, fmt.Errorf("webhook resonse was absent")
|
return nil, fmt.Errorf("webhook response was absent")
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.Response.UID != uid {
|
if r.Response.UID != uid {
|
||||||
|
|
|
@ -115,7 +115,7 @@ func (c *Cluster) Connect(ctx context.Context, request framework.ProxyRequest) (
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Objects get by client via proxy are edited some fields, different from objets in member clusters.
|
// Objects get by client via proxy are edited some fields, different from objects in member clusters.
|
||||||
// So before update, we shall recover these fields.
|
// So before update, we shall recover these fields.
|
||||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||||
if err = modifyRequest(req, clusterName); err != nil {
|
if err = modifyRequest(req, clusterName); err != nil {
|
||||||
|
|
|
@ -421,7 +421,7 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La
|
||||||
err := createCluster(clusterName, kubeConfigPath, controlPlane, clusterContext)
|
err := createCluster(clusterName, kubeConfigPath, controlPlane, clusterContext)
|
||||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
ginkgo.By(fmt.Sprintf("Joining cluster: %s", clusterName), func() {
|
||||||
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, "", karmadactlTimeout,
|
cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, "", karmadactlTimeout,
|
||||||
"join", "--cluster-kubeconfig", kubeConfigPath, "--cluster-context", clusterContext, "--cluster-namespace", "karmada-cluster", clusterName)
|
"join", "--cluster-kubeconfig", kubeConfigPath, "--cluster-context", clusterContext, "--cluster-namespace", "karmada-cluster", clusterName)
|
||||||
_, err := cmd.ExecOrDie()
|
_, err := cmd.ExecOrDie()
|
||||||
|
|
Loading…
Reference in New Issue