diff --git a/hack/create-cluster.sh b/hack/create-cluster.sh new file mode 100755 index 000000000..644646580 --- /dev/null +++ b/hack/create-cluster.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +function usage() { + echo "This script starts a kube cluster by kind." + echo "Usage: hack/create-cluster.sh " + echo "Example: hack/create-cluster.sh host /root/.kube/host.config" +} + +if [[ $# -ne 2 ]]; then + usage + exit 1 +fi + +CLUSTER_NAME=$1 +if [[ -z "${CLUSTER_NAME}" ]]; then + usage + exit 1 +fi +KUBECONFIG=$2 +if [[ -z "${KUBECONFIG}" ]]; then + usage + exit 1 +fi + +# TODO(RainbowMango): pin kind version and install automatically. +kind create cluster --name "${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}" --wait=120s + +# Kind cluster's context name contains a "kind-" prefix by default. +# Change context name to cluster name. +kubectl config rename-context "kind-${CLUSTER_NAME}" "${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}" + +# Kind cluster uses `127.0.0.1` as kube-apiserver endpoint by default, thus kind clusters can't reach each other. +# So we need to update endpoint with docker IP. +docker_ip=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${CLUSTER_NAME}-control-plane") +kubectl config set-cluster "${CLUSTER_NAME}" --server="https://${docker_ip}:6443" --kubeconfig="${KUBECONFIG}" diff --git a/hack/delete-cluster.sh b/hack/delete-cluster.sh new file mode 100755 index 000000000..fac4be7be --- /dev/null +++ b/hack/delete-cluster.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +function usage() { + echo "This script delete a kube cluster by kind." + echo "Usage: hack/delete-cluster.sh " + echo "Example: hack/delete-cluster.sh host /root/.kube/host.config" +} + +if [[ $# -ne 2 ]]; then + usage + exit 1 +fi + +CLUSTER_NAME=$1 +if [[ -z "${CLUSTER_NAME}" ]]; then + usage + exit 1 +fi +KUBECONFIG=$2 +if [[ -z "${KUBECONFIG}" ]]; then + usage + exit 1 +fi + +# The context name has been changed when creating clusters by 'create-cluster.sh'. +# This will result in the context can't be removed by kind when deleting a cluster. +# So, we need to change context name back and let kind take care about it. +kubectl config rename-context "${CLUSTER_NAME}" "kind-${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}" + +kind delete cluster --name "${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}" diff --git a/hack/deploy-karmada.sh b/hack/deploy-karmada.sh new file mode 100755 index 000000000..33a672aca --- /dev/null +++ b/hack/deploy-karmada.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +function usage() { + echo "This script will deploy karmada control plane to a cluster." + echo "Usage: hack/deploy-karmada.sh" + echo "Example: hack/deploy-karmada.sh" +} + +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +# create namespace for control plane components +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/namespace.yaml" + +# create service account, cluster role for controller-manager +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/serviceaccount.yaml" +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrole.yaml" +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrolebinding.yaml" + +# install APIs +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/membercluster.karmada.io_memberclusters.yaml" +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationpolicies.yaml" +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationbindings.yaml" +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationworks.yaml" + +# deploy controller-manager +kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/controller-manager.yaml" diff --git a/hack/undeploy-karmada.sh b/hack/undeploy-karmada.sh new file mode 100755 index 000000000..a641a780f --- /dev/null +++ b/hack/undeploy-karmada.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +function usage() { + echo "This script will remove karmada control plane from a cluster." + echo "Usage: hack/undeploy-karmada.sh" + echo "Example: hack/undeploy-karmada.sh" +} + +SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. + +# delete controller-manager +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/controller-manager.yaml" + +# delete APIs +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/membercluster.karmada.io_memberclusters.yaml" +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationpolicies.yaml" +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationbindings.yaml" +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationworks.yaml" + +# delete service account, cluster role +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/serviceaccount.yaml" +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrole.yaml" +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrolebinding.yaml" + +# delete namespace for control plane components +kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/namespace.yaml"