Merge pull request #2474 from ikaven1024/fix_deploy
do not switch context when deploy karmada
This commit is contained in:
commit
71d037be0b
|
@ -42,9 +42,9 @@ fi
|
|||
KARMADA_APISERVER_KUBECONFIG=$3
|
||||
|
||||
# check context existence
|
||||
if ! kubectl config use-context "${4}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
if ! kubectl config get-contexts "${4}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
then
|
||||
echo -e "ERROR: failed to use context: '${4}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n"
|
||||
echo -e "ERROR: failed to get context: '${4}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
@ -59,7 +59,7 @@ fi
|
|||
MEMBER_CLUSTER_KUBECONFIG=$5
|
||||
|
||||
# check context existence
|
||||
if ! kubectl config use-context "${6}" --kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
if ! kubectl config get-contexts "${6}" --kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
then
|
||||
echo -e "ERROR: failed to get context: '${6}' not in ${MEMBER_CLUSTER_KUBECONFIG}. \n"
|
||||
usage
|
||||
|
|
|
@ -25,9 +25,9 @@ fi
|
|||
KARMADA_APISERVER_KUBECONFIG=$1
|
||||
|
||||
# check context existence
|
||||
if ! kubectl config use-context "${2}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
if ! kubectl config get-contexts "${2}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
then
|
||||
echo -e "ERROR: failed to use context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n"
|
||||
echo -e "ERROR: failed to get context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
@ -57,20 +57,19 @@ if [ -n "${KUBECONFIG+x}" ];then
|
|||
CURR_KUBECONFIG=$KUBECONFIG # backup current kubeconfig
|
||||
fi
|
||||
export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}" # switch to member cluster
|
||||
kubectl config use-context "${MEMBER_CLUSTER_NAME}"
|
||||
|
||||
AGENT_IMAGE_PULL_POLICY=${IMAGE_PULL_POLICY:-IfNotPresent}
|
||||
|
||||
# create namespace for karmada agent
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/agent/namespace.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/namespace.yaml"
|
||||
|
||||
# create service account, cluster role for karmada agent
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/agent/serviceaccount.yaml"
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml"
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/serviceaccount.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml"
|
||||
|
||||
# create secret
|
||||
kubectl create secret generic karmada-kubeconfig --from-file=karmada-kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" -n "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" create secret generic karmada-kubeconfig --from-file=karmada-kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" -n "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
||||
# extract api endpoint of member cluster
|
||||
MEMBER_CLUSTER=$(kubectl config view -o jsonpath='{.contexts[?(@.name == "'${MEMBER_CLUSTER_NAME}'")].context.cluster}')
|
||||
|
@ -84,10 +83,10 @@ sed -i'' -e "s/{{member_cluster_name}}/${MEMBER_CLUSTER_NAME}/g" "${TEMP_PATH}"/
|
|||
sed -i'' -e "s/{{image_pull_policy}}/${AGENT_IMAGE_PULL_POLICY}/g" "${TEMP_PATH}"/karmada-agent.yaml
|
||||
sed -i'' -e "s|{{member_cluster_api_endpoint}}|${MEMBER_CLUSTER_API_ENDPOINT}|g" "${TEMP_PATH}"/karmada-agent.yaml
|
||||
echo -e "Apply dynamic rendered deployment in ${TEMP_PATH}/karmada-agent.yaml.\n"
|
||||
kubectl apply -f "${TEMP_PATH}"/karmada-agent.yaml
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-agent.yaml
|
||||
|
||||
# Wait for karmada-etcd to come up before launching the rest of the components.
|
||||
util::wait_pod_ready "${AGENT_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${MEMBER_CLUSTER_NAME}" "${AGENT_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
||||
# recover the kubeconfig before installing agent if necessary
|
||||
if [ -n "${CURR_KUBECONFIG+x}" ];then
|
||||
|
|
|
@ -38,20 +38,16 @@ if [ -n "${KUBECONFIG+x}" ];then
|
|||
CURR_KUBECONFIG=$KUBECONFIG # backup current kubeconfig
|
||||
fi
|
||||
|
||||
# switch to host cluster
|
||||
TEMP_PATH=$(mktemp -d)
|
||||
cp $HOST_CLUSTER_KUBECONFIG $TEMP_PATH/kubeconfig
|
||||
export KUBECONFIG="$TEMP_PATH/kubeconfig"
|
||||
kubectl config use-context "${HOST_CLUSTER_NAME}"
|
||||
export KUBECONFIG=$HOST_CLUSTER_KUBECONFIG
|
||||
echo "using kubeconfig: "$KUBECONFIG
|
||||
|
||||
# deploy karmada opensearch
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch.yaml"
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch-dashboards.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/opensearch/karmada-opensearch-dashboards.yaml"
|
||||
|
||||
# make sure all karmada-opensearch components are ready
|
||||
util::wait_pod_ready "${KARMADA_OPENSEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${KARMADA_OPENSEARCH_DASHBOARDS_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_OPENSEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_OPENSEARCH_DASHBOARDS_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
||||
# recover the kubeconfig before installing opensearch if necessary
|
||||
if [ -n "${CURR_KUBECONFIG+x}" ];then
|
||||
|
|
|
@ -61,9 +61,9 @@ if [ -n "${KUBECONFIG+x}" ];then
|
|||
fi
|
||||
export KUBECONFIG="${HOST_CLUSTER_KUBECONFIG}"
|
||||
HOST_CLUSTER_NAME=$2
|
||||
if ! kubectl config use-context "${HOST_CLUSTER_NAME}" > /dev/null 2>&1;
|
||||
if ! kubectl config get-contexts "${HOST_CLUSTER_NAME}" > /dev/null 2>&1;
|
||||
then
|
||||
echo -e "ERROR: failed to use context: '${HOST_CLUSTER_NAME}' not in ${HOST_CLUSTER_KUBECONFIG}. \n"
|
||||
echo -e "ERROR: failed to get context: '${HOST_CLUSTER_NAME}' not in ${HOST_CLUSTER_KUBECONFIG}. \n"
|
||||
usage
|
||||
recover_kubeconfig
|
||||
exit 1
|
||||
|
@ -109,19 +109,20 @@ function generate_cert_secret {
|
|||
sed -i'' -e "s/{{server_key}}/${KARMADA_KEY}/g" "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml
|
||||
sed -i'' -e "s/{{server_certificate}}/${KARMADA_CRT}/g" "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml
|
||||
|
||||
kubectl apply -f "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml
|
||||
kubectl apply -f "${TEMP_PATH}"/secret-tmp.yaml
|
||||
kubectl apply -f "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/secret-tmp.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml
|
||||
rm -rf "${TEMP_PATH}"
|
||||
}
|
||||
|
||||
# install Karmada's APIs
|
||||
function installCRDs() {
|
||||
local crd_path=$1
|
||||
local context_name=$1
|
||||
local crd_path=$2
|
||||
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml"
|
||||
kubectl --context="${context_name}" apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml"
|
||||
|
||||
kubectl apply -k "${crd_path}"/_crds
|
||||
kubectl --context="${context_name}" apply -k "${crd_path}"/_crds
|
||||
}
|
||||
|
||||
# Use x.x.x.6 IP address, which is the same CIDR with the node address of the Kind cluster,
|
||||
|
@ -144,12 +145,12 @@ util::create_certkey "" "${CERT_DIR}" "etcd-ca" etcd-server etcd-server "" kuber
|
|||
util::create_certkey "" "${CERT_DIR}" "etcd-ca" etcd-client etcd-client "" "*.etcd.karmada-system.svc.cluster.local" "*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1"
|
||||
|
||||
# create namespace for control plane components
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml"
|
||||
|
||||
# create service account, cluster role for controller-manager
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/serviceaccount.yaml"
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/clusterrole.yaml"
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/clusterrolebinding.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/serviceaccount.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/clusterrole.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/clusterrolebinding.yaml"
|
||||
|
||||
KARMADA_CRT=$(base64 "${CERT_DIR}/karmada.crt" | tr -d '\r\n')
|
||||
KARMADA_KEY=$(base64 "${CERT_DIR}/karmada.key" | tr -d '\r\n')
|
||||
|
@ -166,10 +167,10 @@ ETCD_CLIENT_KEY=$(base64 "${CERT_DIR}/etcd-client.key" | tr -d '\r\n')
|
|||
generate_cert_secret
|
||||
|
||||
# deploy karmada etcd
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-etcd.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-etcd.yaml"
|
||||
|
||||
# Wait for karmada-etcd to come up before launching the rest of the components.
|
||||
util::wait_pod_ready "${ETCD_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${ETCD_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
||||
#KARMADA_APISERVER_SERVICE_TYPE is the service type of karmada API Server, For connectivity, it will be different when
|
||||
# HOST_CLUSTER_TYPE is different. When HOST_CLUSTER_TYPE=local, we will create a ClusterIP type Service. And when
|
||||
|
@ -192,19 +193,19 @@ TEMP_PATH_APISERVER=$(mktemp -d)
|
|||
cp "${REPO_ROOT}"/artifacts/deploy/karmada-apiserver.yaml "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml
|
||||
sed -i'' -e "s/{{service_type}}/${KARMADA_APISERVER_SERVICE_TYPE}/g" "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml
|
||||
echo -e "\nApply dynamic rendered apiserver service in ${TEMP_PATH_APISERVER}/karmada-apiserver.yaml."
|
||||
kubectl apply -f "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml
|
||||
|
||||
# Wait for karmada-apiserver to come up before launching the rest of the components.
|
||||
util::wait_pod_ready "${APISERVER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${APISERVER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
||||
# get Karmada apiserver IP at remote mode
|
||||
if [ "${HOST_CLUSTER_TYPE}" = "remote" ]; then
|
||||
case $KARMADA_APISERVER_SERVICE_TYPE in
|
||||
ClusterIP)
|
||||
KARMADA_APISERVER_IP=$(kubectl get pod -l app=karmada-apiserver -n "${KARMADA_SYSTEM_NAMESPACE}" -o=jsonpath='{.items[0].status.podIP}')
|
||||
KARMADA_APISERVER_IP=$(kubectl --context="${HOST_CLUSTER_NAME}" get pod -l app=karmada-apiserver -n "${KARMADA_SYSTEM_NAMESPACE}" -o=jsonpath='{.items[0].status.podIP}')
|
||||
;;
|
||||
LoadBalancer)
|
||||
if util::wait_service_external_ip "karmada-apiserver" "${KARMADA_SYSTEM_NAMESPACE}"; then
|
||||
if util::wait_service_external_ip "karmada-apiserver" "karmada-apiserver" "${KARMADA_SYSTEM_NAMESPACE}"; then
|
||||
echo "Get service external IP: ${SERVICE_EXTERNAL_IP}, wait to check network connectivity"
|
||||
KARMADA_APISERVER_IP=$(util::get_load_balancer_ip) || KARMADA_APISERVER_IP=''
|
||||
else
|
||||
|
@ -227,18 +228,18 @@ fi
|
|||
util::append_client_kubeconfig "${HOST_CLUSTER_KUBECONFIG}" "${CERT_DIR}/karmada.crt" "${CERT_DIR}/karmada.key" "${KARMADA_APISERVER_IP}" "${KARMADA_APISERVER_SECURE_PORT}" karmada-apiserver
|
||||
|
||||
# deploy kube controller manager
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/kube-controller-manager.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/kube-controller-manager.yaml"
|
||||
# deploy aggregated-apiserver on host cluster
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver.yaml"
|
||||
util::wait_pod_ready "${KARMADA_AGGREGATION_APISERVER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver.yaml"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_AGGREGATION_APISERVER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
# deploy karmada-search on host cluster
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search.yaml"
|
||||
util::wait_pod_ready "${KARMADA_SEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search.yaml"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_SEARCH_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
||||
# install CRD APIs on karmada apiserver.
|
||||
if ! kubectl config use-context karmada-apiserver > /dev/null 2>&1;
|
||||
if ! kubectl config get-contexts "karmada-apiserver" > /dev/null 2>&1;
|
||||
then
|
||||
echo -e "ERROR: failed to use context: karmada-apiserver not in ${HOST_CLUSTER_KUBECONFIG}."
|
||||
echo -e "ERROR: failed to get context: karmada-apiserver not in karmada-apiserver."
|
||||
recover_kubeconfig
|
||||
exit 1
|
||||
fi
|
||||
|
@ -247,38 +248,36 @@ TEMP_PATH_CRDS=$(mktemp -d)
|
|||
cp -rf "${REPO_ROOT}"/charts/karmada/_crds "${TEMP_PATH_CRDS}"
|
||||
util::fill_cabundle "${ROOT_CA_FILE}" "${TEMP_PATH_CRDS}/_crds/patches/webhook_in_resourcebindings.yaml"
|
||||
util::fill_cabundle "${ROOT_CA_FILE}" "${TEMP_PATH_CRDS}/_crds/patches/webhook_in_clusterresourcebindings.yaml"
|
||||
installCRDs "${TEMP_PATH_CRDS}"
|
||||
installCRDs "karmada-apiserver" "${TEMP_PATH_CRDS}"
|
||||
rm -rf "${TEMP_PATH_CRDS}"
|
||||
|
||||
# deploy webhook configurations on karmada apiserver
|
||||
util::deploy_webhook_configuration "${ROOT_CA_FILE}" "${REPO_ROOT}/artifacts/deploy/webhook-configuration.yaml"
|
||||
util::deploy_webhook_configuration "karmada-apiserver" "${ROOT_CA_FILE}" "${REPO_ROOT}/artifacts/deploy/webhook-configuration.yaml"
|
||||
|
||||
# deploy APIService on karmada apiserver for karmada-aggregated-apiserver
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver-apiservice.yaml"
|
||||
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-aggregated-apiserver-apiservice.yaml"
|
||||
# make sure apiservice for v1alpha1.cluster.karmada.io is Available
|
||||
util::wait_apiservice_ready "${KARMADA_AGGREGATION_APISERVER_LABEL}"
|
||||
util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_AGGREGATION_APISERVER_LABEL}"
|
||||
|
||||
# deploy APIService on karmada apiserver for karmada-search
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search-apiservice.yaml"
|
||||
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-search-apiservice.yaml"
|
||||
# make sure apiservice for v1alpha1.search.karmada.io is Available
|
||||
util::wait_apiservice_ready "${KARMADA_SEARCH_LABEL}"
|
||||
util::wait_apiservice_ready "karmada-apiserver" "${KARMADA_SEARCH_LABEL}"
|
||||
|
||||
# deploy cluster proxy rbac for admin
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/cluster-proxy-admin-rbac.yaml"
|
||||
|
||||
kubectl config use-context "${HOST_CLUSTER_NAME}"
|
||||
kubectl --context="karmada-apiserver" apply -f "${REPO_ROOT}/artifacts/deploy/cluster-proxy-admin-rbac.yaml"
|
||||
|
||||
# deploy controller-manager on host cluster
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-controller-manager.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-controller-manager.yaml"
|
||||
# deploy scheduler on host cluster
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-scheduler.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-scheduler.yaml"
|
||||
# deploy descheduler on host cluster
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-descheduler.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-descheduler.yaml"
|
||||
# deploy webhook on host cluster
|
||||
kubectl apply -f "${REPO_ROOT}/artifacts/deploy/karmada-webhook.yaml"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-webhook.yaml"
|
||||
|
||||
# make sure all karmada control plane components are ready
|
||||
util::wait_pod_ready "${KARMADA_CONTROLLER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${KARMADA_SCHEDULER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${KUBE_CONTROLLER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${KARMADA_WEBHOOK_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_CONTROLLER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_SCHEDULER_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KUBE_CONTROLLER_POD_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${KARMADA_WEBHOOK_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
|
|
@ -48,18 +48,12 @@ then
|
|||
exit 1
|
||||
fi
|
||||
MEMBER_CLUSTER_NAME=$4
|
||||
TEMP_PATH=$(mktemp -d)
|
||||
MEMBER_CLUSTER_KUBECONFIG_NAME=`basename ${MEMBER_CLUSTER_KUBECONFIG}`
|
||||
cp -rf ${MEMBER_CLUSTER_KUBECONFIG} "${TEMP_PATH}"/${MEMBER_CLUSTER_KUBECONFIG_NAME}
|
||||
|
||||
kubectl --kubeconfig="${TEMP_PATH}"/${MEMBER_CLUSTER_KUBECONFIG_NAME} config use-context "${MEMBER_CLUSTER_NAME}"
|
||||
|
||||
# check whether the kubeconfig secret has been created before
|
||||
if ! kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get secrets -n karmada-system | grep "${MEMBER_CLUSTER_NAME}-kubeconfig"; then
|
||||
# create secret
|
||||
kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" create secret generic ${MEMBER_CLUSTER_NAME}-kubeconfig --from-file=${MEMBER_CLUSTER_NAME}-kubeconfig="${TEMP_PATH}"/${MEMBER_CLUSTER_KUBECONFIG_NAME} -n "karmada-system"
|
||||
kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" create secret generic ${MEMBER_CLUSTER_NAME}-kubeconfig --from-file=${MEMBER_CLUSTER_NAME}-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}" -n "karmada-system"
|
||||
fi
|
||||
rm -rf "${TEMP_PATH}"
|
||||
|
||||
# deploy scheduler estimator
|
||||
TEMP_PATH=$(mktemp -d)
|
||||
|
|
|
@ -129,10 +129,9 @@ echo "cluster networks connected"
|
|||
|
||||
#join push mode member clusters
|
||||
export KUBECONFIG="${MAIN_KUBECONFIG}"
|
||||
kubectl config use-context "${KARMADA_APISERVER_CLUSTER_NAME}"
|
||||
${KARMADACTL_BIN} join member1 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}"
|
||||
${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" member1 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}"
|
||||
"${REPO_ROOT}"/hack/deploy-scheduler-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MEMBER_CLUSTER_KUBECONFIG}" "${MEMBER_CLUSTER_1_NAME}"
|
||||
${KARMADACTL_BIN} join member2 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}"
|
||||
${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" member2 --cluster-kubeconfig="${MEMBER_CLUSTER_KUBECONFIG}"
|
||||
"${REPO_ROOT}"/hack/deploy-scheduler-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MEMBER_CLUSTER_KUBECONFIG}" "${MEMBER_CLUSTER_2_NAME}"
|
||||
|
||||
# wait until the pull mode cluster ready
|
||||
|
@ -143,9 +142,9 @@ kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MOD
|
|||
"${REPO_ROOT}"/hack/deploy-agent-and-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_KUBECONFIG}" "${PULL_MODE_CLUSTER_NAME}"
|
||||
|
||||
# wait all of clusters member1, member2 and member3 status is ready
|
||||
util:wait_cluster_ready "${MEMBER_CLUSTER_1_NAME}"
|
||||
util:wait_cluster_ready "${MEMBER_CLUSTER_2_NAME}"
|
||||
util:wait_cluster_ready "${PULL_MODE_CLUSTER_NAME}"
|
||||
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_1_NAME}"
|
||||
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_2_NAME}"
|
||||
util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_NAME}"
|
||||
|
||||
function print_success() {
|
||||
echo -e "$KARMADA_GREETING"
|
||||
|
|
|
@ -17,28 +17,23 @@ PULL_MODE_CLUSTER_NAME=${PULL_MODE_CLUSTER_NAME:-"member3"}
|
|||
|
||||
# delete interpreter webhook example in karmada-host
|
||||
export KUBECONFIG="${MAIN_KUBECONFIG}"
|
||||
kubectl config use-context "${HOST_CLUSTER_NAME}"
|
||||
kubectl delete -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" delete -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml
|
||||
|
||||
# uninstall metallb
|
||||
kubectl delete configmap config -n metallb-system
|
||||
kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml
|
||||
kubectl delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" delete configmap config -n metallb-system
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" delete -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
|
||||
|
||||
kubectl get configmap kube-proxy -n kube-system -o yaml | \
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" get configmap kube-proxy -n kube-system -o yaml | \
|
||||
sed -e "s/strictARP: true/strictARP: false/" | \
|
||||
kubectl apply -f - -n kube-system
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f - -n kube-system
|
||||
|
||||
# delete interpreter workload webhook configuration
|
||||
kubectl config use-context "${KARMADA_APISERVER}"
|
||||
kubectl delete ResourceInterpreterWebhookConfiguration examples
|
||||
kubectl --context="${KARMADA_APISERVER}" delete ResourceInterpreterWebhookConfiguration examples
|
||||
|
||||
# delete interpreter example workload CRD in karamada-apiserver and member clusters
|
||||
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${KARMADA_APISERVER}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}"
|
||||
kubectl config use-context "${MEMBER_CLUSTER_1_NAME}"
|
||||
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl config use-context "${MEMBER_CLUSTER_2_NAME}"
|
||||
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl config use-context "${PULL_MODE_CLUSTER_NAME}"
|
||||
kubectl delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_1_NAME}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_2_NAME}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${PULL_MODE_CLUSTER_NAME}" delete -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
|
|
|
@ -26,18 +26,17 @@ ROOT_CA_FILE=${CERT_DIR}/ca.crt
|
|||
kind load docker-image "${REGISTRY}/karmada-interpreter-webhook-example:${VERSION}" --name="${HOST_CLUSTER_NAME}"
|
||||
|
||||
export KUBECONFIG="${MAIN_KUBECONFIG}"
|
||||
kubectl config use-context "${HOST_CLUSTER_NAME}"
|
||||
|
||||
# Due to we are using kube-proxy in IPVS mode, we have to enable strict ARP mode.
|
||||
# refer to https://metallb.universe.tf/installation/#preparation
|
||||
kubectl get configmap kube-proxy -n kube-system -o yaml | \
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" get configmap kube-proxy -n kube-system -o yaml | \
|
||||
sed -e "s/strictARP: false/strictARP: true/" | \
|
||||
kubectl apply -f - -n kube-system
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f - -n kube-system
|
||||
|
||||
# install metallb by manifest, refer to https://metallb.universe.tf/installation/#installation-by-manifest
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml
|
||||
util::wait_pod_ready metallb metallb-system
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/namespace.yaml
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" metallb metallb-system
|
||||
|
||||
# Use x.x.x.6 IP address, which is the same CIDR with the node address of the Kind cluster,
|
||||
# as the loadBalancer service address of component karmada-interpreter-webhook-example.
|
||||
|
@ -45,7 +44,7 @@ interpreter_webhook_example_service_external_ip_prefix=$(echo $(util::get_apiser
|
|||
interpreter_webhook_example_service_external_ip_address=${interpreter_webhook_example_service_external_ip_prefix}.6
|
||||
|
||||
# config with layer 2 configuration. refer to https://metallb.universe.tf/configuration/#layer-2-configuration
|
||||
cat <<EOF | kubectl apply -f -
|
||||
cat <<EOF | kubectl --context="${HOST_CLUSTER_NAME}" apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
|
@ -61,22 +60,18 @@ data:
|
|||
EOF
|
||||
|
||||
# deploy interpreter webhook example in karmada-host
|
||||
kubectl apply -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml
|
||||
util::wait_pod_ready "${INTERPRETER_WEBHOOK_EXAMPLE_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}"/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml
|
||||
util::wait_pod_ready "${HOST_CLUSTER_NAME}" "${INTERPRETER_WEBHOOK_EXAMPLE_LABEL}" "${KARMADA_SYSTEM_NAMESPACE}"
|
||||
|
||||
# deploy interpreter workload webhook-configuration.yaml
|
||||
kubectl config use-context "${KARMADA_APISERVER}"
|
||||
cp -rf "${REPO_ROOT}/examples/customresourceinterpreter/webhook-configuration.yaml" "${REPO_ROOT}/examples/customresourceinterpreter/webhook-configuration-temp.yaml"
|
||||
sed -i'' -e "s/{{karmada-interpreter-webhook-example-svc-address}}/${interpreter_webhook_example_service_external_ip_address}/g" "${REPO_ROOT}/examples/customresourceinterpreter/webhook-configuration-temp.yaml"
|
||||
util::deploy_webhook_configuration "${ROOT_CA_FILE}" "${REPO_ROOT}/examples/customresourceinterpreter/webhook-configuration-temp.yaml"
|
||||
util::deploy_webhook_configuration "${KARMADA_APISERVER}" "${ROOT_CA_FILE}" "${REPO_ROOT}/examples/customresourceinterpreter/webhook-configuration-temp.yaml"
|
||||
rm -rf "${REPO_ROOT}/examples/customresourceinterpreter/webhook-configuration-temp.yaml"
|
||||
|
||||
# install interpreter example workload CRD in karamada-apiserver and member clusters
|
||||
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${KARMADA_APISERVER}" apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}"
|
||||
kubectl config use-context "${MEMBER_CLUSTER_1_NAME}"
|
||||
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl config use-context "${MEMBER_CLUSTER_2_NAME}"
|
||||
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl config use-context "${PULL_MODE_CLUSTER_NAME}"
|
||||
kubectl apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_1_NAME}" apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_2_NAME}" apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
kubectl --context="${PULL_MODE_CLUSTER_NAME}" apply -f "${REPO_ROOT}/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml"
|
||||
|
|
|
@ -53,7 +53,6 @@ if [[ -n ${CHINA_MAINLAND:-} ]]; then
|
|||
fi
|
||||
|
||||
"${SCRIPT_ROOT}"/hack/deploy-karmada.sh "${HOST_CLUSTER_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "remote"
|
||||
kubectl config use-context karmada-apiserver --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
|
||||
function print_success() {
|
||||
echo -e "$KARMADA_GREETING"
|
||||
|
|
|
@ -39,7 +39,7 @@ export KUBECONFIG=${KARMADA_APISERVER_KUBECONFIG}
|
|||
export PULL_BASED_CLUSTERS=${PULL_BASED_CLUSTERS}
|
||||
|
||||
set +e
|
||||
ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/
|
||||
ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/ -- --karmada-context=karmada-apiserver
|
||||
TESTING_RESULT=$?
|
||||
|
||||
# Collect logs
|
||||
|
|
|
@ -25,9 +25,9 @@ fi
|
|||
KARMADA_APISERVER_KUBECONFIG=$1
|
||||
|
||||
# check context existence
|
||||
if ! kubectl config use-context "${2}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
if ! kubectl config get-contexts "${2}" --kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
then
|
||||
echo -e "ERROR: failed to use context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n"
|
||||
echo -e "ERROR: failed to get context: '${2}' not in ${KARMADA_APISERVER_KUBECONFIG}. \n"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
@ -53,22 +53,21 @@ MEMBER_CLUSTER_NAME=$4
|
|||
source "${REPO_ROOT}"/hack/util.sh
|
||||
|
||||
# remove the member cluster from karmada control plane
|
||||
kubectl delete cluster ${MEMBER_CLUSTER_NAME}
|
||||
kubectl --context="${2}" delete cluster "${MEMBER_CLUSTER_NAME}"
|
||||
|
||||
# remove agent from the member cluster
|
||||
if [ -n "${KUBECONFIG+x}" ];then
|
||||
CURR_KUBECONFIG=$KUBECONFIG # backup current kubeconfig
|
||||
fi
|
||||
export KUBECONFIG="${MEMBER_CLUSTER_KUBECONFIG}" # switch to member cluster
|
||||
kubectl config use-context "${MEMBER_CLUSTER_NAME}"
|
||||
|
||||
# remove namespace of karmada agent
|
||||
kubectl delete -f "${REPO_ROOT}/artifacts/agent/namespace.yaml"
|
||||
kubectl delete namespace karmada-cluster
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" delete -f "${REPO_ROOT}/artifacts/agent/namespace.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" delete namespace karmada-cluster
|
||||
|
||||
# remove clusterrole and clusterrolebinding of karmada agent
|
||||
kubectl delete -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml"
|
||||
kubectl delete -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" delete -f "${REPO_ROOT}/artifacts/agent/clusterrole.yaml"
|
||||
kubectl --context="${MEMBER_CLUSTER_NAME}" delete -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml"
|
||||
|
||||
# recover the kubeconfig after removing agent if necessary
|
||||
if [ -n "${CURR_KUBECONFIG+x}" ];then
|
||||
|
|
|
@ -26,9 +26,9 @@ fi
|
|||
HOST_CLUSTER_KUBECONFIG=$1
|
||||
|
||||
# check context existence
|
||||
if ! kubectl config use-context "${2}" --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
if ! kubectl config get-contexts "${2}" --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" > /dev/null 2>&1;
|
||||
then
|
||||
echo -e "ERROR: failed to use context: '${2}' not in ${HOST_CLUSTER_KUBECONFIG}. \n"
|
||||
echo -e "ERROR: failed to get context: '${2}' not in ${HOST_CLUSTER_KUBECONFIG}. \n"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
@ -37,18 +37,16 @@ HOST_CLUSTER_NAME=$2
|
|||
# delete all keys and certificates
|
||||
rm -fr "${HOME}/.karmada"
|
||||
|
||||
kubectl config use-context "${HOST_CLUSTER_NAME}" --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
|
||||
ETCD_HOST_IP=$(kubectl get pod -l app=etcd -n karmada-system -o jsonpath='{.items[0].status.hostIP}')
|
||||
|
||||
# clear all in namespace karmada-system
|
||||
kubectl delete ns karmada-system --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" delete ns karmada-system --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
|
||||
# delete clusterroles
|
||||
kubectl delete clusterrole karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" delete clusterrole karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
|
||||
# delete clusterrolebindings
|
||||
kubectl delete clusterrolebindings karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
kubectl --context="${HOST_CLUSTER_NAME}" delete clusterrolebindings karmada-controller-manager --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
|
||||
# clear configs about karmada-apiserver in kubeconfig
|
||||
kubectl config delete-cluster karmada-apiserver --kubeconfig="${HOST_CLUSTER_KUBECONFIG}"
|
||||
|
|
68
hack/util.sh
68
hack/util.sh
|
@ -313,61 +313,67 @@ function util::wait_file_exist() {
|
|||
}
|
||||
|
||||
# util::wait_pod_ready waits for pod state becomes ready until timeout.
|
||||
# Parmeters:
|
||||
# - $1: pod label, such as "app=etcd"
|
||||
# - $2: pod namespace, such as "karmada-system"
|
||||
# - $3: time out, such as "200s"
|
||||
# Parameters:
|
||||
# - $1: k8s context name, such as "karmada-apiserver"
|
||||
# - $2: pod label, such as "app=etcd"
|
||||
# - $3: pod namespace, such as "karmada-system"
|
||||
# - $4: time out, such as "200s"
|
||||
function util::wait_pod_ready() {
|
||||
local pod_label=$1
|
||||
local pod_namespace=$2
|
||||
local context_name=$1
|
||||
local pod_label=$2
|
||||
local pod_namespace=$3
|
||||
|
||||
echo "wait the $pod_label ready..."
|
||||
set +e
|
||||
util::kubectl_with_retry wait --for=condition=Ready --timeout=30s pods -l app=${pod_label} -n ${pod_namespace}
|
||||
util::kubectl_with_retry --context="$context_name" wait --for=condition=Ready --timeout=30s pods -l app=${pod_label} -n ${pod_namespace}
|
||||
ret=$?
|
||||
set -e
|
||||
if [ $ret -ne 0 ];then
|
||||
echo "kubectl describe info:"
|
||||
kubectl describe pod -l app=${pod_label} -n ${pod_namespace}
|
||||
kubectl --context="$context_name" describe pod -l app=${pod_label} -n ${pod_namespace}
|
||||
echo "kubectl logs info:"
|
||||
kubectl logs -l app=${pod_label} -n ${pod_namespace}
|
||||
kubectl --context="$context_name" logs -l app=${pod_label} -n ${pod_namespace}
|
||||
fi
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
# util::wait_apiservice_ready waits for apiservice state becomes Available until timeout.
|
||||
# Parmeters:
|
||||
# - $1: apiservice label, such as "app=etcd"
|
||||
# - $1: k8s context name, such as "karmada-apiserver"
|
||||
# - $2: apiservice label, such as "app=etcd"
|
||||
# - $3: time out, such as "200s"
|
||||
function util::wait_apiservice_ready() {
|
||||
local apiservice_label=$1
|
||||
local context_name=$1
|
||||
local apiservice_label=$2
|
||||
|
||||
echo "wait the $apiservice_label Available..."
|
||||
set +e
|
||||
util::kubectl_with_retry wait --for=condition=Available --timeout=30s apiservices -l app=${apiservice_label}
|
||||
util::kubectl_with_retry --context="$context_name" wait --for=condition=Available --timeout=30s apiservices -l app=${apiservice_label}
|
||||
ret=$?
|
||||
set -e
|
||||
if [ $ret -ne 0 ];then
|
||||
echo "kubectl describe info:"
|
||||
kubectl describe apiservices -l app=${apiservice_label}
|
||||
kubectl --context="$context_name" describe apiservices -l app=${apiservice_label}
|
||||
fi
|
||||
return ${ret}
|
||||
}
|
||||
|
||||
# util::wait_cluster_ready waits for cluster state becomes ready until timeout.
|
||||
# Parmeters:
|
||||
# - $1: cluster name, such as "member1"
|
||||
# - $1: context name, such as "karmada-apiserver"
|
||||
# - $2: cluster name, such as "member1"
|
||||
function util:wait_cluster_ready() {
|
||||
local cluster_name=$1
|
||||
local context_name=$1
|
||||
local cluster_name=$2
|
||||
|
||||
echo "wait the cluster $cluster_name onBoard..."
|
||||
set +e
|
||||
util::kubectl_with_retry wait --for=condition=Ready --timeout=60s clusters ${cluster_name}
|
||||
util::kubectl_with_retry --context="$context_name" wait --for=condition=Ready --timeout=60s clusters "${cluster_name}"
|
||||
ret=$?
|
||||
set -e
|
||||
if [ $ret -ne 0 ];then
|
||||
if [ $ret -ne 0 ]; then
|
||||
echo "kubectl describe info:"
|
||||
kubectl describe clusters ${cluster_name}
|
||||
kubectl --context="$context_name" describe clusters "${cluster_name}"
|
||||
fi
|
||||
return ${ret}
|
||||
}
|
||||
|
@ -474,20 +480,22 @@ function util::get_apiserver_ip_from_kubeconfig(){
|
|||
|
||||
# This function deploys webhook configuration
|
||||
# Parameters:
|
||||
# - $1: CA file
|
||||
# - $2: configuration file
|
||||
# - $1: k8s context name
|
||||
# - $2: CA file
|
||||
# - $3: configuration file
|
||||
# Note:
|
||||
# Deprecated: should be removed after helm get on board.
|
||||
function util::deploy_webhook_configuration() {
|
||||
local ca_file=$1
|
||||
local conf=$2
|
||||
local context_name=$1
|
||||
local ca_file=$2
|
||||
local conf=$3
|
||||
|
||||
local ca_string=$(cat ${ca_file} | base64 | tr "\n" " "|sed s/[[:space:]]//g)
|
||||
|
||||
local temp_path=$(mktemp -d)
|
||||
cp -rf "${conf}" "${temp_path}/temp.yaml"
|
||||
sed -i'' -e "s/{{caBundle}}/${ca_string}/g" "${temp_path}/temp.yaml"
|
||||
kubectl apply -f "${temp_path}/temp.yaml"
|
||||
kubectl --context="$context_name" apply -f "${temp_path}/temp.yaml"
|
||||
rm -rf "${temp_path}"
|
||||
}
|
||||
|
||||
|
@ -501,18 +509,20 @@ function util::fill_cabundle() {
|
|||
|
||||
# util::wait_service_external_ip give a service external ip when it is ready, if not, wait until timeout
|
||||
# Parameters:
|
||||
# - $1: service name in k8s
|
||||
# - $2: namespace
|
||||
# - $1: context name in k8s
|
||||
# - $2: service name in k8s
|
||||
# - $3: namespace
|
||||
SERVICE_EXTERNAL_IP=''
|
||||
function util::wait_service_external_ip() {
|
||||
local service_name=$1
|
||||
local namespace=$2
|
||||
local context_name=$1
|
||||
local service_name=$2
|
||||
local namespace=$3
|
||||
local external_ip
|
||||
local tmp
|
||||
for tmp in {1..30}; do
|
||||
set +e
|
||||
external_host=$(kubectl get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.hostname}} {{end}}" | xargs)
|
||||
external_ip=$(kubectl get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.ip}} {{end}}" | xargs)
|
||||
external_host=$(kubectl --context="$context_name" get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.hostname}} {{end}}" | xargs)
|
||||
external_ip=$(kubectl --context="$context_name" get service "${service_name}" -n "${namespace}" --template="{{range .status.loadBalancer.ingress}}{{.ip}} {{end}}" | xargs)
|
||||
set -e
|
||||
if [[ ! -z "$external_host" ]]; then # Compatibility with hostname, such as AWS
|
||||
external_ip=$external_host
|
||||
|
|
|
@ -83,6 +83,9 @@ var _ = ginkgo.Describe("FederatedResourceQuota auto-provision testing", func()
|
|||
ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() {
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandUnjoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
@ -111,6 +114,9 @@ var _ = ginkgo.Describe("FederatedResourceQuota auto-provision testing", func()
|
|||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -188,7 +189,7 @@ func newClusterClientSet(controlPlaneClient client.Client, c *clusterv1alpha1.Cl
|
|||
}
|
||||
|
||||
clusterConfigPath := pullModeClusters[c.Name]
|
||||
clusterConfig, err := clientcmd.BuildConfigFromFlags("", clusterConfigPath)
|
||||
clusterConfig, err := LoadRESTClientConfig(clusterConfigPath, c.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -248,3 +249,24 @@ func WaitClusterFitWith(c client.Client, clusterName string, fit func(cluster *c
|
|||
return fit(currentCluster), nil
|
||||
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
|
||||
}
|
||||
|
||||
// LoadRESTClientConfig creates a rest.Config using the passed kubeconfig. If context is empty, current context in kubeconfig will be used.
|
||||
func LoadRESTClientConfig(kubeconfig string, context string) (*rest.Config, error) {
|
||||
loader := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}
|
||||
loadedConfig, err := loader.Load()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if context == "" {
|
||||
context = loadedConfig.CurrentContext
|
||||
}
|
||||
klog.Infof("Use context %v", context)
|
||||
|
||||
return clientcmd.NewNonInteractiveClientConfig(
|
||||
*loadedConfig,
|
||||
context,
|
||||
&clientcmd.ConfigOverrides{},
|
||||
loader,
|
||||
).ClientConfig()
|
||||
}
|
||||
|
|
|
@ -84,6 +84,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() {
|
|||
// Step 2, promote namespace used by the deployment from member1 to karmada
|
||||
ginkgo.By(fmt.Sprintf("Promoting namespace %s from member: %s to karmada control plane", deploymentNamespace, member1), func() {
|
||||
namespaceOpts = karmadactl.CommandPromoteOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
Cluster: member1,
|
||||
}
|
||||
args := []string{"namespace", deploymentNamespace}
|
||||
|
@ -101,6 +104,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() {
|
|||
// Step 3, promote deployment from cluster member1 to karmada
|
||||
ginkgo.By(fmt.Sprintf("Promoting deployment %s from member: %s to karmada", deploymentName, member1), func() {
|
||||
deploymentOpts = karmadactl.CommandPromoteOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
Namespace: deploymentNamespace,
|
||||
Cluster: member1,
|
||||
}
|
||||
|
@ -190,6 +196,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() {
|
|||
// Step2, promote clusterrole and clusterrolebinding from member1
|
||||
ginkgo.By(fmt.Sprintf("Promoting clusterrole %s and clusterrolebindings %s from member to karmada", clusterRoleName, clusterRoleBindingName), func() {
|
||||
clusterRoleOpts = karmadactl.CommandPromoteOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
Cluster: member1,
|
||||
}
|
||||
|
||||
|
@ -203,6 +212,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() {
|
|||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
clusterRoleBindingOpts = karmadactl.CommandPromoteOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
Cluster: member1,
|
||||
}
|
||||
|
||||
|
@ -255,6 +267,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() {
|
|||
|
||||
ginkgo.By(fmt.Sprintf("Promoting namespace %s from member: %s to karmada control plane", serviceNamespace, member1), func() {
|
||||
opts := karmadactl.CommandPromoteOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
Cluster: member1,
|
||||
}
|
||||
args := []string{"namespace", serviceNamespace}
|
||||
|
@ -269,6 +284,9 @@ var _ = ginkgo.Describe("Karmadactl promote testing", func() {
|
|||
|
||||
ginkgo.By(fmt.Sprintf("Promoting service %s from member: %s to karmada control plane", serviceName, member1), func() {
|
||||
opts := karmadactl.CommandPromoteOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
Namespace: serviceNamespace,
|
||||
Cluster: member1,
|
||||
}
|
||||
|
@ -354,6 +372,9 @@ var _ = framework.SerialDescribe("Karmadactl join/unjoin testing", ginkgo.Labels
|
|||
ginkgo.BeforeEach(func() {
|
||||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
@ -420,6 +441,9 @@ var _ = framework.SerialDescribe("Karmadactl join/unjoin testing", ginkgo.Labels
|
|||
|
||||
ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() {
|
||||
opts := karmadactl.CommandUnjoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
@ -485,6 +509,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La
|
|||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
@ -503,6 +530,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La
|
|||
ginkgo.DeferCleanup(func() {
|
||||
ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() {
|
||||
opts := karmadactl.CommandUnjoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
@ -523,6 +553,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La
|
|||
ginkgo.Context("cordon cluster", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
opts := karmadactl.CommandCordonOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterName: clusterName,
|
||||
}
|
||||
|
@ -558,6 +591,9 @@ var _ = framework.SerialDescribe("Karmadactl cordon/uncordon testing", ginkgo.La
|
|||
|
||||
ginkgo.It("uncordon cluster", func() {
|
||||
opts := karmadactl.CommandCordonOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterName: clusterName,
|
||||
}
|
||||
|
|
|
@ -85,6 +85,9 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes
|
|||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", clusterName), func() {
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
@ -100,6 +103,9 @@ var _ = ginkgo.Describe("[namespace auto-provision] namespace auto-provision tes
|
|||
ginkgo.By(fmt.Sprintf("Unjoinning cluster: %s", clusterName), func() {
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandUnjoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: clusterName,
|
||||
|
|
|
@ -87,6 +87,9 @@ var _ = ginkgo.Describe("[cluster unjoined] reschedule testing", func() {
|
|||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName), func() {
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: newClusterName,
|
||||
|
@ -229,6 +232,9 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() {
|
|||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName))
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: newClusterName,
|
||||
|
@ -289,6 +295,9 @@ var _ = ginkgo.Describe("[cluster joined] reschedule testing", func() {
|
|||
ginkgo.By(fmt.Sprintf("Joinning cluster: %s", newClusterName))
|
||||
karmadaConfig := karmadactl.NewKarmadaConfig(clientcmd.NewDefaultPathOptions())
|
||||
opts := karmadactl.CommandJoinOption{
|
||||
GlobalCommandOptions: options.GlobalCommandOptions{
|
||||
KarmadaContext: karmadaContext,
|
||||
},
|
||||
DryRun: false,
|
||||
ClusterNamespace: "karmada-cluster",
|
||||
ClusterName: newClusterName,
|
||||
|
|
|
@ -68,6 +68,7 @@ var (
|
|||
)
|
||||
|
||||
var (
|
||||
karmadaContext string
|
||||
kubeconfig string
|
||||
restConfig *rest.Config
|
||||
karmadaHost string
|
||||
|
@ -86,6 +87,7 @@ func init() {
|
|||
// eg. ginkgo -v --race --trace --fail-fast -p --randomize-all ./test/e2e/ -- --poll-interval=5s --pollTimeout=5m
|
||||
flag.DurationVar(&pollInterval, "poll-interval", 5*time.Second, "poll-interval defines the interval time for a poll operation")
|
||||
flag.DurationVar(&pollTimeout, "poll-timeout", 300*time.Second, "poll-timeout defines the time which the poll operation times out")
|
||||
flag.StringVar(&karmadaContext, "karmada-context", karmadaContext, "Name of the cluster context in control plane kubeconfig file.")
|
||||
}
|
||||
|
||||
func TestE2E(t *testing.T) {
|
||||
|
@ -101,7 +103,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
|||
|
||||
clusterProvider = cluster.NewProvider()
|
||||
var err error
|
||||
restConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
restConfig, err = framework.LoadRESTClientConfig(kubeconfig, karmadaContext)
|
||||
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
|
||||
|
||||
karmadaHost = restConfig.Host
|
||||
|
|
Loading…
Reference in New Issue