Merge pull request #15276 from justinsb/simple_scalability_test_2

Update scalability test so it passes
This commit is contained in:
Kubernetes Prow Robot 2023-03-29 09:25:47 -07:00 committed by GitHub
commit 7b24c4f785
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 37 additions and 21 deletions

View File

@ -31,11 +31,6 @@ if [[ -z "${WORKSPACE-}" ]]; then
WORKSPACE=$(mktemp -dt kops.XXXXXXXXX)
fi
if [[ -z "${WORKSPACE-}" ]]; then
export WORKSPACE
WORKSPACE=$(mktemp -dt kops.XXXXXXXXX)
fi
if [[ -z "${NETWORKING-}" ]]; then
export NETWORKING="calico"
fi

View File

@ -21,17 +21,23 @@ REPO_ROOT=$(git rev-parse --show-toplevel)
cd "${REPO_ROOT}"
cd ..
WORKSPACE=$(pwd)
cd "${WORKSPACE}/kops"
# Create bindir
BINDIR=${WORKSPACE}/bin
export PATH=${BINDIR}:${PATH}
BINDIR="${WORKSPACE}/bin"
export PATH="${BINDIR}:${PATH}"
mkdir -p "${BINDIR}"
# Build kubetest-2 kOps support
pushd "${WORKSPACE}/kops"
GOBIN=${BINDIR} make test-e2e-install
popd
# Setup our cleanup function; as we allocate resources we set a variable to indicate they should be cleaned up
function cleanup {
# shellcheck disable=SC2153
if [[ "${DELETE_CLUSTER:-}" == "true" ]]; then
kubetest2 kops "${KUBETEST2_ARGS}" --down || echo "kubetest2 down failed"
kubetest2 kops "${KUBETEST2_ARGS[@]}" --down || echo "kubetest2 down failed"
fi
}
trap cleanup EXIT
@ -43,8 +49,6 @@ if [[ -z "${CLUSTER_NAME:-}" ]]; then
fi
echo "CLUSTER_NAME=${CLUSTER_NAME}"
exit 0
if [[ -z "${K8S_VERSION:-}" ]]; then
K8S_VERSION="$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)"
fi
@ -80,8 +84,12 @@ fi
echo "ADMIN_ACCESS=${ADMIN_ACCESS}"
# cilium does not yet pass conformance tests (shared hostport test)
create_args="--networking cilium"
create_args="${create_args} --node-size=c6g.medium --master-size=c6g.xlarge --node-count=100"
#create_args="--networking cilium"
create_args="--networking calico"
# TODO: Use the newer non-DNS mode, more scalable than gossip and generally recommended
# However, it currently fails two tests (HostPort & OIDC) so need to track that down
#create_args="--dns none"
create_args="${create_args} --node-size=c6g.medium --master-size=c6g.xlarge --node-count=101"
if [[ -n "${ZONES:-}" ]]; then
create_args="${create_args} --zones=${ZONES}"
fi
@ -93,25 +101,29 @@ echo "KOPS_FEATURE_FLAGS=${KOPS_FEATURE_FLAGS}"
# Note that these arguments for kubetest2
KUBETEST2_ARGS=""
KUBETEST2_ARGS="${KUBETEST2_ARGS} -v=2 --cloud-provider=${CLOUD_PROVIDER}"
KUBETEST2_ARGS="${KUBETEST2_ARGS} --cluster-name=${CLUSTER_NAME:-}"
KUBETEST2_ARGS="${KUBETEST2_ARGS} --kops-binary-path=${KOPS_BIN}"
KUBETEST2_ARGS="${KUBETEST2_ARGS} --admin-access=${ADMIN_ACCESS:-}"
KUBETEST2_ARGS="${KUBETEST2_ARGS} --env=KOPS_FEATURE_FLAGS=${KOPS_FEATURE_FLAGS}"
KUBETEST2_ARGS=()
KUBETEST2_ARGS+=("-v=2")
KUBETEST2_ARGS+=("--cloud-provider=${CLOUD_PROVIDER}")
KUBETEST2_ARGS+=("--cluster-name=${CLUSTER_NAME:-}")
KUBETEST2_ARGS+=("--kops-binary-path=${KOPS_BIN}")
KUBETEST2_ARGS+=("--admin-access=${ADMIN_ACCESS:-}")
KUBETEST2_ARGS+=("--env=KOPS_FEATURE_FLAGS=${KOPS_FEATURE_FLAGS}")
# More time for bigger clusters
KUBETEST2_ARGS+=("--validation-wait=30m")
# The caller can set DELETE_CLUSTER=false to stop us deleting the cluster
if [[ -z "${DELETE_CLUSTER:-}" ]]; then
DELETE_CLUSTER="true"
fi
kubetest2 kops "${KUBETEST2_ARGS}" \
kubetest2 kops "${KUBETEST2_ARGS[@]}" \
--up \
--kubernetes-version="${K8S_VERSION}" \
--create-args="${create_args}" \
--control-plane-size="${KOPS_CONTROL_PLANE_SIZE:-1}"
kubetest2 kops "${KUBETEST2_ARGS}" \
kubetest2 kops "${KUBETEST2_ARGS[@]}" \
--test=kops \
--kubernetes-version="${K8S_VERSION}" \
-- \
@ -121,6 +133,6 @@ kubetest2 kops "${KUBETEST2_ARGS}" \
--focus-regex="\[Conformance\]"
if [[ "${DELETE_CLUSTER:-}" == "true" ]]; then
kubetest2 kops "${KUBETEST2_ARGS}" --down
kubetest2 kops "${KUBETEST2_ARGS[@]}" --down
DELETE_CLUSTER=false # Don't delete again in trap
fi

View File

@ -2317,13 +2317,16 @@ func (c *awsCloudImplementation) DefaultInstanceType(cluster *kops.Cluster, ig *
igZonesSet := sets.NewString(igZones...)
// TODO: Validate that instance type exists in all AZs, but skip AZs that don't support any VPC stuff
var reasons []string
for _, instanceType := range candidates {
if strings.HasPrefix(instanceType, "t4g") {
if imageArch != "arm64" {
reasons = append(reasons, fmt.Sprintf("instance type %q does not match image architecture %q", instanceType, imageArch))
continue
}
} else {
if imageArch == "arm64" {
reasons = append(reasons, fmt.Sprintf("instance type %q does not match image architecture %q", instanceType, imageArch))
continue
}
}
@ -2335,10 +2338,16 @@ func (c *awsCloudImplementation) DefaultInstanceType(cluster *kops.Cluster, ig *
if zones.IsSuperset(igZonesSet) {
return instanceType, nil
} else {
reasons = append(reasons, fmt.Sprintf("instance type %q is not available in all zones (available in zones %v, need %v)", instanceType, zones, igZones))
klog.V(2).Infof("can't use instance type %q, available in zones %v but need %v", instanceType, zones, igZones)
}
}
// Log the detailed reasons why we can't find an instance type
klog.Warning("cannot find suitable instance type")
for _, reason := range reasons {
klog.Warning(" * " + reason)
}
return "", fmt.Errorf("could not find a suitable supported instance type for the instance group %q (type %q) in region %q", ig.Name, ig.Spec.Role, c.region)
}