Add scripts for create/delete kind cluster and deploy/undeploy

control-plane.
This commit is contained in:
RainbowMango 2020-11-26 16:50:41 +08:00 committed by Kevin Wang
parent be32c34b69
commit 6df7f16ee3
4 changed files with 133 additions and 0 deletions

39
hack/create-cluster.sh Executable file
View File

@ -0,0 +1,39 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
function usage() {
echo "This script starts a kube cluster by kind."
echo "Usage: hack/create-cluster.sh <CLUSTER_NAME> <KUBECONFIG>"
echo "Example: hack/create-cluster.sh host /root/.kube/host.config"
}
if [[ $# -ne 2 ]]; then
usage
exit 1
fi
CLUSTER_NAME=$1
if [[ -z "${CLUSTER_NAME}" ]]; then
usage
exit 1
fi
KUBECONFIG=$2
if [[ -z "${KUBECONFIG}" ]]; then
usage
exit 1
fi
# TODO(RainbowMango): pin kind version and install automatically.
kind create cluster --name "${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}" --wait=120s
# Kind cluster's context name contains a "kind-" prefix by default.
# Change context name to cluster name.
kubectl config rename-context "kind-${CLUSTER_NAME}" "${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}"
# Kind cluster uses `127.0.0.1` as kube-apiserver endpoint by default, thus kind clusters can't reach each other.
# So we need to update endpoint with docker IP.
docker_ip=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${CLUSTER_NAME}-control-plane")
kubectl config set-cluster "${CLUSTER_NAME}" --server="https://${docker_ip}:6443" --kubeconfig="${KUBECONFIG}"

34
hack/delete-cluster.sh Executable file
View File

@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
function usage() {
echo "This script delete a kube cluster by kind."
echo "Usage: hack/delete-cluster.sh <CLUSTER_NAME> <KUBECONFIG>"
echo "Example: hack/delete-cluster.sh host /root/.kube/host.config"
}
if [[ $# -ne 2 ]]; then
usage
exit 1
fi
CLUSTER_NAME=$1
if [[ -z "${CLUSTER_NAME}" ]]; then
usage
exit 1
fi
KUBECONFIG=$2
if [[ -z "${KUBECONFIG}" ]]; then
usage
exit 1
fi
# The context name has been changed when creating clusters by 'create-cluster.sh'.
# This will result in the context can't be removed by kind when deleting a cluster.
# So, we need to change context name back and let kind take care about it.
kubectl config rename-context "${CLUSTER_NAME}" "kind-${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}"
kind delete cluster --name "${CLUSTER_NAME}" --kubeconfig="${KUBECONFIG}"

30
hack/deploy-karmada.sh Executable file
View File

@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
function usage() {
echo "This script will deploy karmada control plane to a cluster."
echo "Usage: hack/deploy-karmada.sh"
echo "Example: hack/deploy-karmada.sh"
}
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
# create namespace for control plane components
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/namespace.yaml"
# create service account, cluster role for controller-manager
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/serviceaccount.yaml"
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrole.yaml"
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrolebinding.yaml"
# install APIs
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/membercluster.karmada.io_memberclusters.yaml"
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationpolicies.yaml"
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationbindings.yaml"
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationworks.yaml"
# deploy controller-manager
kubectl create -f "${SCRIPT_ROOT}/artifacts/deploy/controller-manager.yaml"

30
hack/undeploy-karmada.sh Executable file
View File

@ -0,0 +1,30 @@
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
function usage() {
echo "This script will remove karmada control plane from a cluster."
echo "Usage: hack/undeploy-karmada.sh"
echo "Example: hack/undeploy-karmada.sh"
}
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
# delete controller-manager
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/controller-manager.yaml"
# delete APIs
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/membercluster.karmada.io_memberclusters.yaml"
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationpolicies.yaml"
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationbindings.yaml"
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/propagationstrategy.karmada.io_propagationworks.yaml"
# delete service account, cluster role
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/serviceaccount.yaml"
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrole.yaml"
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/clusterrolebinding.yaml"
# delete namespace for control plane components
kubectl delete -f "${SCRIPT_ROOT}/artifacts/deploy/namespace.yaml"