From 55ee296b13b26b95ad6fdc1de0101757054da102 Mon Sep 17 00:00:00 2001 From: wangyanzhao Date: Wed, 29 Jun 2022 20:29:43 +0800 Subject: [PATCH 1/2] do not switch context when deploy karmada Signed-off-by: wangyanzhao --- hack/deploy-agent-and-estimator.sh | 6 +-- hack/deploy-karmada-agent.sh | 19 ++++--- hack/deploy-karmada-opensearch.sh | 14 ++--- hack/deploy-karmada.sh | 83 +++++++++++++++--------------- hack/deploy-scheduler-estimator.sh | 8 +-- hack/local-up-karmada.sh | 11 ++-- hack/post-run-e2e.sh | 27 ++++------ hack/pre-run-e2e.sh | 31 +++++------ hack/remote-up-karmada.sh | 1 - hack/undeploy-karmada-agent.sh | 15 +++--- hack/undeploy-karmada.sh | 12 ++--- hack/util.sh | 68 +++++++++++++----------- 12 files changed, 139 insertions(+), 156 deletions(-) diff --git a/hack/deploy-agent-and-estimator.sh b/hack/deploy-agent-and-estimator.sh index 352b4c8e7..6c2ee7887 100755 --- a/hack/deploy-agent-and-estimator.sh +++ b/hack/deploy-agent-and-estimator.sh @@ -42,9 +42,9 @@ fi KARMADA_APISERVER_KUBECONFIG=$3 # check context existence -if ! kubectl config use-context "${4}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1; +if ! kubectl config get-contexts "${4}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1; then - echo -e "ERROR: failed to use context: '${4}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n" + echo -e "ERROR: failed to get context: '${4}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n" usage exit 1 fi @@ -59,7 +59,7 @@ fi MEMBER_CLUSTER_KUBECONFIG=$5 # check context existence -if ! kubectl config use-context "${6}" --kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" > /dev/null 2>&1; +if ! kubectl config get-contexts "${6}" --kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" > /dev/null 2>&1; then echo -e "ERROR: failed to get context: '${6}' not in ${MEMBER_CLUSTER_KUBECONFIG}. \n" usage diff --git a/hack/deploy-karmada-agent.sh b/hack/deploy-karmada-agent.sh index bc68cb171..470b8919f 100755 --- a/hack/deploy-karmada-agent.sh +++ b/hack/deploy-karmada-agent.sh @@ -25,9 +25,9 @@ fi KARMADA_APISERVER_KUBECONFIG=$1 # check context existence -if ! kubectl config use-context "${2}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1; +if ! kubectl config get-contexts "${2}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1; then - echo -e "ERROR: failed to use context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n" + echo -e "ERROR: failed to get context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n" usage exit 1 fi @@ -57,20 +57,19 @@ if [ -n "${KUBECONFIG+x}" ];then CURR_KUBECONFIG=$KUBECONFIG # backup current kubeconfig fi export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}" # switch to member cluster -kubectl config use-context "${MEMBER_CLUSTER_NAME}" AGENT_IMAGE_PULL_POLICY=${IMAGE_PULL_POLICY:-IfNotPresent} # create namespace for karmada agent -kubectl apply -f "${REPO_ROOT}/artifacts/agent/namespace.yaml" +kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/namespace.yaml" # create service account, cluster role for karmada agent -kubectl apply -f "${REPO_ROOT}/artifacts/agent/serviceaccount.yaml" -kubectl apply -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml" -kubectl apply -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml" +kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/serviceaccount.yaml" +kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml" +kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml" # create secret -kubectl create secret generic karmada-kubeconfig --from-file=karmada-kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" -n "${KARMADA_SYSTEM_NAMESPACE}" +kubectl --context="${MEMBER_CLUSTER_NAME}" create secret generic karmada-kubeconfig --from-file=karmada-kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" -n "${KARMADA_SYSTEM_NAMESPACE}" # extract api endpoint of member cluster MEMBER_CLUSTER=$(kubectl config view -o jsonpath='{.contexts[?(@.name == "'${MEMBER_CLUSTER_NAME}'")].context.cluster}') @@ -84,10 +83,10 @@ sed -i'' -e "s/{{member_cluster_name}}/${MEMBER_CLUSTER_NAME}/g" "${TEMP_PATH}"/ sed -i'' -e "s/{{image_pull_policy}}/${AGENT_IMAGE_PULL_POLICY}/g" "${TEMP_PATH}"/karmada-agent.yaml sed -i'' -e "s|{{member_cluster_api_endpoint}}|${MEMBER_CLUSTER_API_ENDPOINT}|g" "${TEMP_PATH}"/karmada-agent.yaml echo -e "Apply dynamic rendered deployment in ${TEMP_PATH}/karmada-agent.yaml.\n" -kubectl apply -f "${TEMP_PATH}"/karmada-agent.yaml +kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-agent.yaml # Wait for karmada-etcd to come up before launching the rest of the components. -util::wait_pod_ready "${AGENT_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${MEMBER_CLUSTER_NAME}" "${AGENT_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" # recover the kubeconfig before installing agent if necessary if [ -n "${CURR_KUBECONFIG+x}" ];then diff --git a/hack/deploy-karmada-opensearch.sh b/hack/deploy-karmada-opensearch.sh index adb384c64..cd5c46998 100755 --- a/hack/deploy-karmada-opensearch.sh +++ b/hack/deploy-karmada-opensearch.sh @@ -38,20 +38,16 @@ if [ -n "${KUBECONFIG+x}" ];then CURR_KUBECONFIG=$KUBECONFIG # backup current kubeconfig fi - # switch to host cluster -TEMP_PATH=$(mktemp -d) -cp $HOST_CLUSTER_KUBECONFIG $TEMP_PATH/kubeconfig -export KUBECONFIG="$TEMP_PATH/kubeconfig" -kubectl config use-context "${HOST_CLUSTER_NAME}" +export KUBECONFIG=$HOST_CLUSTER_KUBECONFIG echo "using kubeconfig: "$KUBECONFIG # deploy karmada opensearch -kubectl apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch.yaml" -kubectl apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch-dashboards.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch-dashboards.yaml" # make sure all karmada-opensearch components are ready -util::wait_pod_ready "${KARMADA_OPENSEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" -util::wait_pod_ready "${KARMADA_OPENSEARCH_DASHBOARDS_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_OPENSEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_OPENSEARCH_DASHBOARDS_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" # recover the kubeconfig before installing opensearch if necessary if [ -n "${CURR_KUBECONFIG+x}" ];then diff --git a/hack/deploy-karmada.sh b/hack/deploy-karmada.sh index f874d4ca7..b25a5a645 100755 --- a/hack/deploy-karmada.sh +++ b/hack/deploy-karmada.sh @@ -61,9 +61,9 @@ if [ -n "${KUBECONFIG+x}" ];then fi export KUBECONFIG="${HOST_CLUSTER_KUBECONFIG}" HOST_CLUSTER_NAME=$2 -if ! kubectl config use-context "${HOST_CLUSTER_NAME}" > /dev/null 2>&1; +if ! kubectl config get-contexts "${HOST_CLUSTER_NAME}" > /dev/null 2>&1; then - echo -e "ERROR: failed to use context: '${HOST_CLUSTER_NAME}' not in ${HOST_CLUSTER_KUBECONFIG}. \n" + echo -e "ERROR: failed to get context: '${HOST_CLUSTER_NAME}' not in ${HOST_CLUSTER_KUBECONFIG}. \n" usage recover_kubeconfig exit 1 @@ -109,19 +109,20 @@ function generate_cert_secret { sed -i'' -e "s/{{server_key}}/${KARMADA_KEY}/g" "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml sed -i'' -e "s/{{server_certificate}}/${KARMADA_CRT}/g" "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml - kubectl apply -f "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - kubectl apply -f "${TEMP_PATH}"/secret-tmp.yaml - kubectl apply -f "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml + kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml + kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/secret-tmp.yaml + kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml rm -rf "${TEMP_PATH}" } # install Karmada's APIs function installCRDs() { - local crd_path=$1 + local context_name=$1 + local crd_path=$2 - kubectl apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml" + kubectl --context="${context_name}" apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml" - kubectl apply -k "${crd_path}"/_crds + kubectl --context="${context_name}" apply -k "${crd_path}"/_crds } # Use x.x.x.6 IP address, which is the same CIDR with the node address of the Kind cluster, @@ -144,12 +145,12 @@ util::create_certkey "" "${CERT_DIR}" "etcd-ca" etcd-server etcd-server "" kuber util::create_certkey "" "${CERT_DIR}" "etcd-ca" etcd-client etcd-client "" "*.etcd.karmada-system.svc.cluster.local" "*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1" # create namespace for control plane components -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml" # create service account, cluster role for controller-manager -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/serviceaccount.yaml" -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/clusterrole.yaml" -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/clusterrolebinding.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/serviceaccount.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/clusterrole.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/clusterrolebinding.yaml" KARMADA_CRT=$(base64 "${CERT_DIR}/karmada.crt" | tr -d '\r\n') KARMADA_KEY=$(base64 "${CERT_DIR}/karmada.key" | tr -d '\r\n') @@ -166,10 +167,10 @@ ETCD_CLIENT_KEY=$(base64 "${CERT_DIR}/etcd-client.key" | tr -d '\r\n') generate_cert_secret # deploy karmada etcd -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-etcd.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-etcd.yaml" # Wait for karmada-etcd to come up before launching the rest of the components. -util::wait_pod_ready "${ETCD_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${ETCD_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" #KARMADA_APISERVER_SERVICE_TYPE is the service type of karmada API Server, For connectivity, it will be different when # HOST_CLUSTER_TYPE is different. When HOST_CLUSTER_TYPE=local, we will create a ClusterIP type Service. And when @@ -192,19 +193,19 @@ TEMP_PATH_APISERVER=$(mktemp -d) cp "${REPO_ROOT}"/artifacts/deploy/karmada-apiserver.yaml "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml sed -i'' -e "s/{{service_type}}/${KARMADA_APISERVER_SERVICE_TYPE}/g" "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml echo -e "\nApply dynamic rendered apiserver service in ${TEMP_PATH_APISERVER}/karmada-apiserver.yaml." -kubectl apply -f "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml # Wait for karmada-apiserver to come up before launching the rest of the components. -util::wait_pod_ready "${APISERVER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${APISERVER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" # get Karmada apiserver IP at remote mode if [ "${HOST_CLUSTER_TYPE}" = "remote" ]; then case $KARMADA_APISERVER_SERVICE_TYPE in ClusterIP) - KARMADA_APISERVER_IP=$(kubectl get pod -l app=karmada-apiserver -n "${KARMADA_SYSTEM_NAMESPACE}" -o=jsonpath='{.items[0].status.podIP}') + KARMADA_APISERVER_IP=$(kubectl --context="${HOST_CLUSTER_NAME}" get pod -l app=karmada-apiserver -n "${KARMADA_SYSTEM_NAMESPACE}" -o=jsonpath='{.items[0].status.podIP}') ;; LoadBalancer) - if util::wait_service_external_ip "karmada-apiserver" "${KARMADA_SYSTEM_NAMESPACE}"; then + if util::wait_service_external_ip "karmada-apiserver" "karmada-apiserver" "${KARMADA_SYSTEM_NAMESPACE}"; then echo "Get service external IP: ${SERVICE_EXTERNAL_IP}, wait to check network connectivity" KARMADA_APISERVER_IP=$(util::get_load_balancer_ip) || KARMADA_APISERVER_IP='' else @@ -227,18 +228,18 @@ fi util::append_client_kubeconfig "${HOST_CLUSTER_KUBECONFIG}" "${CERT_DIR}/karmada.crt" "${CERT_DIR}/karmada.key" "${KARMADA_APISERVER_IP}" "${KARMADA_APISERVER_SECURE_PORT}" karmada-apiserver # deploy kube controller manager -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/kube-controller-manager.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/kube-controller-manager.yaml" # deploy aggregated-apiserver on host cluster -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver.yaml" -util::wait_pod_ready "${KARMADA_AGGREGATION_APISERVER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver.yaml" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_AGGREGATION_APISERVER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" # deploy karmada-search on host cluster -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search.yaml" -util::wait_pod_ready "${KARMADA_SEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search.yaml" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_SEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" # install CRD APIs on karmada apiserver. -if ! kubectl config use-context karmada-apiserver > /dev/null 2>&1; +if ! kubectl config get-contexts "karmada-apiserver" > /dev/null 2>&1; then - echo -e "ERROR: failed to use context: karmada-apiserver not in ${HOST_CLUSTER_KUBECONFIG}." + echo -e "ERROR: failed to get context: karmada-apiserver not in karmada-apiserver." recover_kubeconfig exit 1 fi @@ -247,38 +248,36 @@ TEMP_PATH_CRDS=$(mktemp -d) cp -rf "${REPO_ROOT}"/charts/karmada/_crds "${TEMP_PATH_CRDS}" util::fill_cabundle "${ROOT_CA_FILE}" "${TEMP_PATH_CRDS}/_crds/patches/webhook_in_resourcebindings.yaml" util::fill_cabundle "${ROOT_CA_FILE}" "${TEMP_PATH_CRDS}/_crds/patches/webhook_in_clusterresourcebindings.yaml" -installCRDs "${TEMP_PATH_CRDS}" +installCRDs "karmada-apiserver" "${TEMP_PATH_CRDS}" rm -rf "${TEMP_PATH_CRDS}" # deploy webhook configurations on karmada apiserver -util::deploy_webhook_configuration "${ROOT_CA_FILE}" "${REPO_ROOT}/artifacts/deploy/webhook-configuration.yaml" +util::deploy_webhook_configuration "karmada-apiserver" "${ROOT_CA_FILE}" "${REPO_ROOT}/artifacts/deploy/webhook-configuration.yaml" # deploy APIService on karmada apiserver for karmada-aggregated-apiserver -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver-apiservice.yaml" +kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver-apiservice.yaml" # make sure apiservice for v1alpha1.cluster.karmada.io is Available -util::wait_apiservice_ready "${KARMADA_AGGREGATION_APISERVER_LABEL}" +util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_AGGREGATION_APISERVER_LABEL}" # deploy APIService on karmada apiserver for karmada-search -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search-apiservice.yaml" +kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search-apiservice.yaml" # make sure apiservice for v1alpha1.search.karmada.io is Available -util::wait_apiservice_ready "${KARMADA_SEARCH_LABEL}" +util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_SEARCH_LABEL}" # deploy cluster proxy rbac for admin -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/cluster-proxy-admin-rbac.yaml" - -kubectl config use-context "${HOST_CLUSTER_NAME}" +kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/cluster-proxy-admin-rbac.yaml" # deploy controller-manager on host cluster -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-controller-manager.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-controller-manager.yaml" # deploy scheduler on host cluster -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-scheduler.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-scheduler.yaml" # deploy descheduler on host cluster -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-descheduler.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-descheduler.yaml" # deploy webhook on host cluster -kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-webhook.yaml" +kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-webhook.yaml" # make sure all karmada control plane components are ready -util::wait_pod_ready "${KARMADA_CONTROLLER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" -util::wait_pod_ready "${KARMADA_SCHEDULER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" -util::wait_pod_ready "${KUBE_CONTROLLER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" -util::wait_pod_ready "${KARMADA_WEBHOOK_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_CONTROLLER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_SCHEDULER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KUBE_CONTROLLER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" +util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_WEBHOOK_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}" diff --git a/hack/deploy-scheduler-estimator.sh b/hack/deploy-scheduler-estimator.sh index c9ed47281..327e143e6 100755 --- a/hack/deploy-scheduler-estimator.sh +++ b/hack/deploy-scheduler-estimator.sh @@ -48,18 +48,12 @@ then exit 1 fi MEMBER_CLUSTER_NAME=$4 -TEMP_PATH=$(mktemp -d) -MEMBER_CLUSTER_KUBECONFIG_NAME=`basename ${MEMBER_CLUSTER_KUBECONFIG}` -cp -rf ${MEMBER_CLUSTER_KUBECONFIG} "${TEMP_PATH}"/${MEMBER_CLUSTER_KUBECONFIG_NAME} - -kubectl --kubeconfig="${TEMP_PATH}"/${MEMBER_CLUSTER_KUBECONFIG_NAME} config use-context "${MEMBER_CLUSTER_NAME}" # check whether the kubeconfig secret has been created before if ! kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get secrets -n karmada-system | grep "${MEMBER_CLUSTER_NAME}-kubeconfig"; then # create secret - kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" create secret generic ${MEMBER_CLUSTER_NAME}-kubeconfig --from-file=${MEMBER_CLUSTER_NAME}-kubeconfig="${TEMP_PATH}"/${MEMBER_CLUSTER_KUBECONFIG_NAME} -n "karmada-system" + kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" create secret generic ${MEMBER_CLUSTER_NAME}-kubeconfig --from-file=${MEMBER_CLUSTER_NAME}-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" -n "karmada-system" fi -rm -rf "${TEMP_PATH}" # deploy scheduler estimator TEMP_PATH=$(mktemp -d) diff --git a/hack/local-up-karmada.sh b/hack/local-up-karmada.sh index b93c2d8f6..ec82dafb7 100755 --- a/hack/local-up-karmada.sh +++ b/hack/local-up-karmada.sh @@ -129,10 +129,9 @@ echo "cluster networks connected" #join push mode member clusters export KUBECONFIG="${MAIN_KUBECONFIG}" -kubectl config use-context "${KARMADA_APISERVER_CLUSTER_NAME}" -${KARMADACTL_BIN} join member1 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" +${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" member1 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" "${REPO_ROOT}"/hack/deploy-scheduler-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MEMBER_CLUSTER_KUBECONFIG}" "${MEMBER_CLUSTER_1_NAME}" -${KARMADACTL_BIN} join member2 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" +${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" member2 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" "${REPO_ROOT}"/hack/deploy-scheduler-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MEMBER_CLUSTER_KUBECONFIG}" "${MEMBER_CLUSTER_2_NAME}" # wait until the pull mode cluster ready @@ -143,9 +142,9 @@ kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MOD "${REPO_ROOT}"/hack/deploy-agent-and-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_KUBECONFIG}" "${PULL_MODE_CLUSTER_NAME}" # wait all of clusters member1, member2 and member3 status is ready -util:wait_cluster_ready "${MEMBER_CLUSTER_1_NAME}" -util:wait_cluster_ready "${MEMBER_CLUSTER_2_NAME}" -util:wait_cluster_ready "${PULL_MODE_CLUSTER_NAME}" +util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_1_NAME}" +util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_2_NAME}" +util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_NAME}" function print_success() { echo -e "$KARMADA_GREETING" diff --git a/hack/post-run-e2e.sh b/hack/post-run-e2e.sh index 2fea9b3e3..4cbc69cf1 100755 --- a/hack/post-run-e2e.sh +++ b/hack/post-run-e2e.sh @@ -17,28 +17,23 @@ PULL_MODE_CLUSTER_NAME=${PULL_MODE_CLUSTER_NAME:-"member3"} # delete interpreter webhook example in karmada-host export KUBECONFIG="${MAIN_KUBECONFIG}" -kubectl config use-context "${HOST_CLUSTER_NAME}" -kubectl delete -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml +kubectl --context="${HOST_CLUSTER_NAME}" delete -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml # uninstall metallb -kubectl delete configmap config -n metallb-system -kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml -kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl --context="${HOST_CLUSTER_NAME}" delete configmap config -n metallb-system +kubectl --context="${HOST_CLUSTER_NAME}" delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml +kubectl --context="${HOST_CLUSTER_NAME}" delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml -kubectl get configmap kube-proxy -n kube-system -o yaml | \ +kubectl --context="${HOST_CLUSTER_NAME}" get configmap kube-proxy -n kube-system -o yaml | \ sed -e "s/strictARP: true/strictARP: false/" | \ -kubectl apply -f - -n kube-system +kubectl --context="${HOST_CLUSTER_NAME}" apply -f - -n kube-system # delete interpreter workload webhook configuration -kubectl config use-context "${KARMADA_APISERVER}" -kubectl delete ResourceInterpreterWebhookConfiguration examples +kubectl --context="${KARMADA_APISERVER}" delete ResourceInterpreterWebhookConfiguration examples # delete interpreter example workload CRD in karamada-apiserver and member clusters -kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" +kubectl --context="${KARMADA_APISERVER}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}" -kubectl config use-context "${MEMBER_CLUSTER_1_NAME}" -kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" -kubectl config use-context "${MEMBER_CLUSTER_2_NAME}" -kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" -kubectl config use-context "${PULL_MODE_CLUSTER_NAME}" -kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" +kubectl --context="${MEMBER_CLUSTER_1_NAME}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" +kubectl --context="${MEMBER_CLUSTER_2_NAME}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" +kubectl --context="${PULL_MODE_CLUSTER_NAME}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml" diff --git a/hack/pre-run-e2e.sh b/hack/pre-run-e2e.sh index 7e58a9b86..3f64b8812 100755 --- a/hack/pre-run-e2e.sh +++ b/hack/pre-run-e2e.sh @@ -26,18 +26,17 @@ ROOT_CA_FILE=${CERT_DIR}/ca.crt kind load docker-image "${REGISTRY}/karmada-interpreter-webhook-example:${VERSION}" --name="${HOST_CLUSTER_NAME}" export KUBECONFIG="${MAIN_KUBECONFIG}" -kubectl config use-context "${HOST_CLUSTER_NAME}" # Due to we are using kube-proxy in IPVS mode, we have to enable strict ARP mode. # refer to https://metallb.universe.tf/installation/#preparation -kubectl get configmap kube-proxy -n kube-system -o yaml | \ +kubectl --context="${HOST_CLUSTER_NAME}" get configmap kube-proxy -n kube-system -o yaml | \ sed -e "s/strictARP: false/strictARP: true/" | \ -kubectl apply -f - -n kube-system +kubectl --context="${HOST_CLUSTER_NAME}" apply -f - -n kube-system # install metallb by manifest, refer to https://metallb.universe.tf/installation/#installation-by-manifest -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml -kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml -util::wait_pod_ready metallb metallb-system +kubectl --context="${HOST_CLUSTER_NAME}" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml +kubectl --context="${HOST_CLUSTER_NAME}" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml +util::wait_pod_ready "${HOST_CLUSTER_NAME}" metallb metallb-system # Use x.x.x.6 IP address, which is the same CIDR with the node address of the Kind cluster, # as the loadBalancer service address of component karmada-interpreter-webhook-example. @@ -45,7 +44,7 @@ interpreter_webhook_example_service_external_ip_prefix=$(echo $(util::get_apiser interpreter_webhook_example_service_external_ip_address=${interpreter_webhook_example_service_external_ip_prefix}.6 # config with layer 2 configuration. refer to https://metallb.universe.tf/configuration/#layer-2-configuration -cat < /dev/null 2>&1; +if ! kubectl config get-contexts "${2}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1; then - echo -e "ERROR: failed to use context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n" + echo -e "ERROR: failed to get context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n" usage exit 1 fi @@ -53,22 +53,21 @@ MEMBER_CLUSTER_NAME=$4 source "${REPO_ROOT}"/hack/util.sh # remove the member cluster from karmada control plane -kubectl delete cluster ${MEMBER_CLUSTER_NAME} +kubectl --context="${2}" delete cluster "${MEMBER_CLUSTER_NAME}" # remove agent from the member cluster if [ -n "${KUBECONFIG+x}" ];then CURR_KUBECONFIG=$KUBECONFIG # backup current kubeconfig fi export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}" # switch to member cluster -kubectl config use-context "${MEMBER_CLUSTER_NAME}" # remove namespace of karmada agent -kubectl delete -f "${REPO_ROOT}/artifacts/agent/namespace.yaml" -kubectl delete namespace karmada-cluster +kubectl --context="${MEMBER_CLUSTER_NAME}" delete -f "${REPO_ROOT}/artifacts/agent/namespace.yaml" +kubectl --context="${MEMBER_CLUSTER_NAME}" delete namespace karmada-cluster # remove clusterrole and clusterrolebinding of karmada agent -kubectl delete -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml" -kubectl delete -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml" +kubectl --context="${MEMBER_CLUSTER_NAME}" delete -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml" +kubectl --context="${MEMBER_CLUSTER_NAME}" delete -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml" # recover the kubeconfig after removing agent if necessary if [ -n "${CURR_KUBECONFIG+x}" ];then diff --git a/hack/undeploy-karmada.sh b/hack/undeploy-karmada.sh index c77052ac3..586190916 100755 --- a/hack/undeploy-karmada.sh +++ b/hack/undeploy-karmada.sh @@ -26,9 +26,9 @@ fi HOST_CLUSTER_KUBECONFIG=$1 # check context existence -if ! kubectl config use-context "${2}" --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" > /dev/null 2>&1; +if ! kubectl config get-contexts "${2}" --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" > /dev/null 2>&1; then - echo -e "ERROR: failed to use context: '${2}' not in ${HOST_CLUSTER_KUBECONFIG}. \n" + echo -e "ERROR: failed to get context: '${2}' not in ${HOST_CLUSTER_KUBECONFIG}. \n" usage exit 1 fi @@ -37,18 +37,16 @@ HOST_CLUSTER_NAME=$2 # delete all keys and certificates rm -fr "${HOME}/.karmada" -kubectl config use-context "${HOST_CLUSTER_NAME}" --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" - ETCD_HOST_IP=$(kubectl get pod -l app=etcd -n karmada-system -o jsonpath='{.items[0].status.hostIP}') # clear all in namespace karmada-system -kubectl delete ns karmada-system --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" +kubectl --context="${HOST_CLUSTER_NAME}" delete ns karmada-system --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" # delete clusterroles -kubectl delete clusterrole karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" +kubectl --context="${HOST_CLUSTER_NAME}" delete clusterrole karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" # delete clusterrolebindings -kubectl delete clusterrolebindings karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" +kubectl --context="${HOST_CLUSTER_NAME}" delete clusterrolebindings karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" # clear configs about karmada-apiserver in kubeconfig kubectl config delete-cluster karmada-apiserver --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" diff --git a/hack/util.sh b/hack/util.sh index 0752d27ec..14c6ba01f 100755 --- a/hack/util.sh +++ b/hack/util.sh @@ -313,61 +313,67 @@ function util::wait_file_exist() { } # util::wait_pod_ready waits for pod state becomes ready until timeout. -# Parmeters: -# - $1: pod label, such as "app=etcd" -# - $2: pod namespace, such as "karmada-system" -# - $3: time out, such as "200s" +# Parameters: +# - $1: k8s context name, such as "karmada-apiserver" +# - $2: pod label, such as "app=etcd" +# - $3: pod namespace, such as "karmada-system" +# - $4: time out, such as "200s" function util::wait_pod_ready() { - local pod_label=$1 - local pod_namespace=$2 + local context_name=$1 + local pod_label=$2 + local pod_namespace=$3 echo "wait the $pod_label ready..." set +e - util::kubectl_with_retry wait --for=condition=Ready --timeout=30s pods -l app=${pod_label} -n ${pod_namespace} + util::kubectl_with_retry --context="$context_name" wait --for=condition=Ready --timeout=30s pods -l app=${pod_label} -n ${pod_namespace} ret=$? set -e if [ $ret -ne 0 ];then echo "kubectl describe info:" - kubectl describe pod -l app=${pod_label} -n ${pod_namespace} + kubectl --context="$context_name" describe pod -l app=${pod_label} -n ${pod_namespace} echo "kubectl logs info:" - kubectl logs -l app=${pod_label} -n ${pod_namespace} + kubectl --context="$context_name" logs -l app=${pod_label} -n ${pod_namespace} fi return ${ret} } # util::wait_apiservice_ready waits for apiservice state becomes Available until timeout. # Parmeters: -# - $1: apiservice label, such as "app=etcd" +# - $1: k8s context name, such as "karmada-apiserver" +# - $2: apiservice label, such as "app=etcd" # - $3: time out, such as "200s" function util::wait_apiservice_ready() { - local apiservice_label=$1 + local context_name=$1 + local apiservice_label=$2 echo "wait the $apiservice_label Available..." set +e - util::kubectl_with_retry wait --for=condition=Available --timeout=30s apiservices -l app=${apiservice_label} + util::kubectl_with_retry --context="$context_name" wait --for=condition=Available --timeout=30s apiservices -l app=${apiservice_label} ret=$? set -e if [ $ret -ne 0 ];then echo "kubectl describe info:" - kubectl describe apiservices -l app=${apiservice_label} + kubectl --context="$context_name" describe apiservices -l app=${apiservice_label} fi return ${ret} } # util::wait_cluster_ready waits for cluster state becomes ready until timeout. # Parmeters: -# - $1: cluster name, such as "member1" +# - $1: context name, such as "karmada-apiserver" +# - $2: cluster name, such as "member1" function util:wait_cluster_ready() { - local cluster_name=$1 + local context_name=$1 + local cluster_name=$2 echo "wait the cluster $cluster_name onBoard..." set +e - util::kubectl_with_retry wait --for=condition=Ready --timeout=60s clusters ${cluster_name} + util::kubectl_with_retry --context="$context_name" wait --for=condition=Ready --timeout=60s clusters "${cluster_name}" ret=$? set -e - if [ $ret -ne 0 ];then + if [ $ret -ne 0 ]; then echo "kubectl describe info:" - kubectl describe clusters ${cluster_name} + kubectl --context="$context_name" describe clusters "${cluster_name}" fi return ${ret} } @@ -474,20 +480,22 @@ function util::get_apiserver_ip_from_kubeconfig(){ # This function deploys webhook configuration # Parameters: -# - $1: CA file -# - $2: configuration file +# - $1: k8s context name +# - $2: CA file +# - $3: configuration file # Note: # Deprecated: should be removed after helm get on board. function util::deploy_webhook_configuration() { - local ca_file=$1 - local conf=$2 + local context_name=$1 + local ca_file=$2 + local conf=$3 local ca_string=$(cat ${ca_file} | base64 | tr "\n" " "|sed s/[[:space:]]//g) local temp_path=$(mktemp -d) cp -rf "${conf}" "${temp_path}/temp.yaml" sed -i'' -e "s/{{caBundle}}/${ca_string}/g" "${temp_path}/temp.yaml" - kubectl apply -f "${temp_path}/temp.yaml" + kubectl --context="$context_name" apply -f "${temp_path}/temp.yaml" rm -rf "${temp_path}" } @@ -501,18 +509,20 @@ function util::fill_cabundle() { # util::wait_service_external_ip give a service external ip when it is ready, if not, wait until timeout # Parameters: -# - $1: service name in k8s -# - $2: namespace +# - $1: context name in k8s +# - $2: service name in k8s +# - $3: namespace SERVICE_EXTERNAL_IP='' function util::wait_service_external_ip() { - local service_name=$1 - local namespace=$2 + local context_name=$1 + local service_name=$2 + local namespace=$3 local external_ip local tmp for tmp in {1..30}; do set +e - external_host=$(kubectl get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.hostname}} {{end}}" | xargs) - external_ip=$(kubectl get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.ip}} {{end}}" | xargs) + external_host=$(kubectl --context="$context_name" get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.hostname}} {{end}}" | xargs) + external_ip=$(kubectl --context="$context_name" get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.ip}} {{end}}" | xargs) set -e if [[ ! -z "$external_host" ]]; then # Compatibility with hostname, such as AWS external_ip=$external_host From 17789e05a94f353e0dda4098098ee46924d4c34b Mon Sep 17 00:00:00 2001 From: yingjinhui Date: Mon, 5 Sep 2022 12:39:01 +0800 Subject: [PATCH 2/2] add karmadaContext to e2e runner. Signed-off-by: yingjinhui --- hack/run-e2e.sh | 2 +- test/e2e/federatedresourcequota_test.go | 6 +++++ test/e2e/framework/cluster.go | 24 ++++++++++++++++- test/e2e/karmadactl_test.go | 36 +++++++++++++++++++++++++ test/e2e/namespace_test.go | 6 +++++ test/e2e/rescheduling_test.go | 9 +++++++ test/e2e/suite_test.go | 4 ++- 7 files changed, 84 insertions(+), 3 deletions(-) diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh index 9e1bd78a8..a0d9152ae 100755 --- a/hack/run-e2e.sh +++ b/hack/run-e2e.sh @@ -39,7 +39,7 @@ export KUBECONFIG=${KARMADA_APISERVER_KUBECONFIG} export PULL_BASED_CLUSTERS=${PULL_BASED_CLUSTERS} set +e -ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/ +ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/ -- --karmada-context=karmada-apiserver TESTING_RESULT=$? # Collect logs diff --git a/test/e2e/federatedresourcequota_test.go b/test/e2e/federatedresourcequota_test.go index 9783949f8..0d8f51bb2 100644 --- a/test/e2e/federatedresourcequota_test.go +++ b/test/e2e/federatedresourcequota_test.go @@ -83,6 +83,9 @@ var _ = ginkgo.Describe("FederatedResourceQuota auto-provision testing", func() ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() { karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandUnjoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, @@ -111,6 +114,9 @@ var _ = ginkgo.Describe("FederatedResourceQuota auto-provision testing", func() ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() { karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandJoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, diff --git a/test/e2e/framework/cluster.go b/test/e2e/framework/cluster.go index 79a9c920a..42d731025 100644 --- a/test/e2e/framework/cluster.go +++ b/test/e2e/framework/cluster.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -188,7 +189,7 @@ func newClusterClientSet(controlPlaneClient client.Client, c *clusterv1alpha1.Cl } clusterConfigPath := pullModeClusters[c.Name] - clusterConfig, err := clientcmd.BuildConfigFromFlags("", clusterConfigPath) + clusterConfig, err := LoadRESTClientConfig(clusterConfigPath, c.Name) if err != nil { return nil, nil, err } @@ -248,3 +249,24 @@ func WaitClusterFitWith(c client.Client, clusterName string, fit func(cluster *c return fit(currentCluster), nil }, pollTimeout, pollInterval).Should(gomega.Equal(true)) } + +// LoadRESTClientConfig creates a rest.Config using the passed kubeconfig. If context is empty, current context in kubeconfig will be used. +func LoadRESTClientConfig(kubeconfig string, context string) (*rest.Config, error) { + loader := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig} + loadedConfig, err := loader.Load() + if err != nil { + return nil, err + } + + if context == "" { + context = loadedConfig.CurrentContext + } + klog.Infof("Use context %v", context) + + return clientcmd.NewNonInteractiveClientConfig( + *loadedConfig, + context, + &clientcmd.ConfigOverrides{}, + loader, + ).ClientConfig() +} diff --git a/test/e2e/karmadactl_test.go b/test/e2e/karmadactl_test.go index 8362d3696..f4d1fcf03 100644 --- a/test/e2e/karmadactl_test.go +++ b/test/e2e/karmadactl_test.go @@ -84,6 +84,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() { // Step 2, promote namespace used by the deployment from member1 to karmada ginkgo.By(fmt.Sprintf("Promoting namespace %s from member: %s to karmada control plane", deploymentNamespace, member1), func() { namespaceOpts = karmadactl.CommandPromoteOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, Cluster: member1, } args := []string{"namespace", deploymentNamespace} @@ -101,6 +104,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() { // Step 3, promote deployment from cluster member1 to karmada ginkgo.By(fmt.Sprintf("Promoting deployment %s from member: %s to karmada", deploymentName, member1), func() { deploymentOpts = karmadactl.CommandPromoteOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, Namespace: deploymentNamespace, Cluster: member1, } @@ -190,6 +196,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() { // Step2, promote clusterrole and clusterrolebinding from member1 ginkgo.By(fmt.Sprintf("Promoting clusterrole %s and clusterrolebindings %s from member to karmada", clusterRoleName, clusterRoleBindingName), func() { clusterRoleOpts = karmadactl.CommandPromoteOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, Cluster: member1, } @@ -203,6 +212,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() { gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) clusterRoleBindingOpts = karmadactl.CommandPromoteOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, Cluster: member1, } @@ -255,6 +267,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() { ginkgo.By(fmt.Sprintf("Promoting namespace %s from member: %s to karmada control plane", serviceNamespace, member1), func() { opts := karmadactl.CommandPromoteOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, Cluster: member1, } args := []string{"namespace", serviceNamespace} @@ -269,6 +284,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() { ginkgo.By(fmt.Sprintf("Promoting service %s from member: %s to karmada control plane", serviceName, member1), func() { opts := karmadactl.CommandPromoteOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, Namespace: serviceNamespace, Cluster: member1, } @@ -354,6 +372,9 @@ var _ = framework.SerialDescribe("Karmadactl unjoin testing", ginkgo.Labels{Need ginkgo.BeforeEach(func() { ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() { opts := karmadactl.CommandJoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, @@ -398,6 +419,9 @@ var _ = framework.SerialDescribe("Karmadactl unjoin testing", ginkgo.Labels{Need ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() { opts := karmadactl.CommandUnjoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, @@ -463,6 +487,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() { karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandJoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, @@ -481,6 +508,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La ginkgo.DeferCleanup(func() { ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() { opts := karmadactl.CommandUnjoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, @@ -501,6 +531,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La ginkgo.Context("cordon cluster", func() { ginkgo.BeforeEach(func() { opts := karmadactl.CommandCordonOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterName: clusterName, } @@ -536,6 +569,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La ginkgo.It("uncordon cluster", func() { opts := karmadactl.CommandCordonOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterName: clusterName, } diff --git a/test/e2e/namespace_test.go b/test/e2e/namespace_test.go index 171363c4d..a1b5fad5b 100644 --- a/test/e2e/namespace_test.go +++ b/test/e2e/namespace_test.go @@ -85,6 +85,9 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() { karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandJoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, @@ -100,6 +103,9 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() { karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandUnjoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: clusterName, diff --git a/test/e2e/rescheduling_test.go b/test/e2e/rescheduling_test.go index 10d6dc9f0..d4ea55a80 100644 --- a/test/e2e/rescheduling_test.go +++ b/test/e2e/rescheduling_test.go @@ -87,6 +87,9 @@ var _ = ginkgo.Describe("[cluster unjoined] reschedule testing", func() { ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName), func() { karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandJoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: newClusterName, @@ -229,6 +232,9 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() { ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName)) karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandJoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: newClusterName, @@ -289,6 +295,9 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() { ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName)) karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions()) opts := karmadactl.CommandJoinOption{ + GlobalCommandOptions: options.GlobalCommandOptions{ + KarmadaContext: karmadaContext, + }, DryRun: false, ClusterNamespace: "karmada-cluster", ClusterName: newClusterName, diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 371d641f7..c37fc29c2 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -68,6 +68,7 @@ var ( ) var ( + karmadaContext string kubeconfig string restConfig *rest.Config karmadaHost string @@ -86,6 +87,7 @@ func init() { // eg. ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/ -- --poll-interval=5s --pollTimeout=5m flag.DurationVar(&pollInterval, "poll-interval", 5*time.Second, "poll-interval defines the interval time for a poll operation") flag.DurationVar(&pollTimeout, "poll-timeout", 300*time.Second, "poll-timeout defines the time which the poll operation times out") + flag.StringVar(&karmadaContext, "karmada-context", karmadaContext, "Name of the cluster context in control plane kubeconfig file.") } func TestE2E(t *testing.T) { @@ -101,7 +103,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { clusterProvider = cluster.NewProvider() var err error - restConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + restConfig, err = framework.LoadRESTClientConfig(kubeconfig, karmadaContext) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) karmadaHost = restConfig.Host