diff --git a/artifacts/deploy/karmada-scheduler.yaml b/artifacts/deploy/karmada-scheduler.yaml index 23284c889..c0f003ca4 100644 --- a/artifacts/deploy/karmada-scheduler.yaml +++ b/artifacts/deploy/karmada-scheduler.yaml @@ -29,6 +29,7 @@ spec: - --bind-address=0.0.0.0 - --secure-port=10351 - --failover=true + - --v=4 volumeMounts: - name: kubeconfig subPath: kubeconfig diff --git a/artifacts/deploy/karmada-webhook.yaml b/artifacts/deploy/karmada-webhook.yaml index 55b192174..d615f16a9 100644 --- a/artifacts/deploy/karmada-webhook.yaml +++ b/artifacts/deploy/karmada-webhook.yaml @@ -29,6 +29,7 @@ spec: - --bind-address=0.0.0.0 - --secure-port=8443 - --cert-dir=/var/serving-cert + - --v=4 ports: - containerPort: 8443 volumeMounts: diff --git a/artifacts/deploy/kube-controller-manager.yaml b/artifacts/deploy/kube-controller-manager.yaml index 6aca18a6d..36527330f 100644 --- a/artifacts/deploy/kube-controller-manager.yaml +++ b/artifacts/deploy/kube-controller-manager.yaml @@ -51,7 +51,7 @@ spec: - --service-account-private-key-file=/etc/karmada/pki/karmada.key - --service-cluster-ip-range=10.96.0.0/12 - --use-service-account-credentials=true - - --v=5 + - --v=4 image: k8s.gcr.io/kube-controller-manager:v1.19.1 imagePullPolicy: IfNotPresent name: kube-controller-manager diff --git a/test/e2e/failover_test.go b/test/e2e/failover_test.go index cf935ce85..3be7f4a22 100644 --- a/test/e2e/failover_test.go +++ b/test/e2e/failover_test.go @@ -3,6 +3,7 @@ package e2e import ( "context" "fmt" + "strings" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -208,10 +209,8 @@ func recoverCluster(c client.Client, clusterName string, originalAPIEndpoint str // get the target cluster names from binding information func getTargetClusterNames(deployment *appsv1.Deployment) (targetClusterNames []string, err error) { bindingName := names.GenerateBindingName(deployment.Kind, deployment.Name) - fmt.Printf("deploy kind is %s, name is %s\n", deployment.Kind, deployment.Name) binding := &workv1alpha1.ResourceBinding{} - fmt.Printf("collect the target clusters in resource binding\n") err = wait.PollImmediate(pollInterval, pollTimeout, func() (done bool, err error) { err = controlPlaneClient.Get(context.TODO(), client.ObjectKey{Namespace: deployment.Namespace, Name: bindingName}, binding) if err != nil { @@ -220,6 +219,12 @@ func getTargetClusterNames(deployment *appsv1.Deployment) (targetClusterNames [] } return false, err } + + if len(binding.Spec.Clusters) == 0 { + klog.Infof("The ResourceBinding(%s/%s) hasn't been scheduled.", binding.Namespace, binding.Name) + return false, nil + } + return true, nil }) if err != nil { @@ -228,7 +233,7 @@ func getTargetClusterNames(deployment *appsv1.Deployment) (targetClusterNames [] for _, cluster := range binding.Spec.Clusters { targetClusterNames = append(targetClusterNames, cluster.Name) } - fmt.Printf("target clusters in resource binding are %s\n", targetClusterNames) + klog.Infof("The ResourceBinding(%s/%s) schedule result is: %s", binding.Namespace, binding.Name, strings.Join(targetClusterNames, ",")) return targetClusterNames, nil } diff --git a/test/e2e/tainttoleration_test.go b/test/e2e/tainttoleration_test.go index 30cd140f7..740b35367 100644 --- a/test/e2e/tainttoleration_test.go +++ b/test/e2e/tainttoleration_test.go @@ -9,6 +9,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" @@ -59,11 +60,12 @@ var _ = ginkgo.Describe("propagation with taint and toleration testing", func() ginkgo.BeforeEach(func() { ginkgo.By("adding taints to clusters", func() { for _, cluster := range clusterNames { - fmt.Printf("add taints to cluster %v", cluster) clusterObj := &clusterv1alpha1.Cluster{} err := controlPlaneClient.Get(context.TODO(), client.ObjectKey{Name: cluster}, clusterObj) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + // TODO(RainbowMango): This will overrides the taints already exist in the cluster. + // Should merge new taint to it and cleanup after testing. clusterObj.Spec.Taints = []corev1.Taint{ { Key: tolerationKey, @@ -72,6 +74,9 @@ var _ = ginkgo.Describe("propagation with taint and toleration testing", func() }, } + for _, taint := range clusterObj.Spec.Taints { + klog.Infof("Adding taints(%s) to cluster(%s)", taint.ToString(), clusterObj.Name) + } err = controlPlaneClient.Update(context.TODO(), clusterObj) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) } @@ -100,7 +105,7 @@ var _ = ginkgo.Describe("propagation with taint and toleration testing", func() }) ginkgo.It("deployment with cluster tolerations testing", func() { - ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deploymentNamespace, deploymentName), func() { + ginkgo.By(fmt.Sprintf("creating deployment(%s/%s)", deployment.Namespace, deployment.Name), func() { _, err := kubeClient.AppsV1().Deployments(testNamespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) })