Merge pull request #10569 from hakman/gp3-cloudformation

Add CF integration test for gp3 volumes
This commit is contained in:
Kubernetes Prow Robot 2021-01-15 02:23:51 -08:00 committed by GitHub
commit b5dc22b800
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 1699 additions and 11 deletions

View File

@ -184,6 +184,7 @@ func TestMinimalCloudformation(t *testing.T) {
// TestMinimalGp3 runs the test on a minimum configuration using gp3 volumes, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimalGp3(t *testing.T) {
newIntegrationTest("minimal.example.com", "minimal-gp3").runTestTerraformAWS(t)
newIntegrationTest("minimal.example.com", "minimal-gp3").runTestCloudformation(t)
}
// TestExistingIAMCloudformation runs the test with existing IAM instance profiles, similar to kops create cluster minimal.example.com --zones us-west-1a

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,550 @@
Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/amd64/nodeup
NODEUP_HASH_AMD64=6980fda4fa37bbdc043738cf4ddac6388eb57f561895c69299c1b0ee263d465d
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/arm64/nodeup
NODEUP_HASH_ARM64=dcc7f9f3c180ee76a511627e46da0ac69cdcb518cdf3be348e5ed046d491eb87
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
configOverride: |
disabled_plugins = ["cri"]
logLevel: info
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 19.03.14
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.19.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.19.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.19.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.19.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 3f03e5c160a8b658d30b34824a1c00abadbac96e62c4d01bf5c9271a2debc3ab@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubelet
- 79bb0d2f05487ff533999a639c075043c70a0a1ba25c1629eb1eef6ebe3ba70f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 9f1ec28e357a8f18e9561129239caf9c0807d74756e21cc63637c7fdeaafe847@https://download.docker.com/linux/static/stable/x86_64/docker-19.03.14.tgz
arm64:
- d8fa5a9739ecc387dfcc55afa91ac6f4b0ccd01f1423c423dbd312d787bbb6bf@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubelet
- d4adf1b6b97252025cb2f7febf55daa3f42dc305822e3da133f77fd33071ec2f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 8350eaa0c0965bb8eb9d45a014f4b6728c985715f56466077dfe6feb271d9518@https://download.docker.com/linux/static/stable/aarch64/docker-19.03.14.tgz
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
KubeletConfig:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nodeLabels:
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml
protokubeImage:
amd64:
hash: 7b3c7f6adbda11b1ec740bd6b969c84f249b7eee818af95f2d321963088245a8
name: protokube:1.19.0-alpha.3
sources:
- https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/images/protokube-amd64.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/images-protokube-amd64.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/images/protokube-amd64.tar.gz
arm64:
hash: 69270ca9c1c950be65af40337adfccec0a728930fa3224bb0d2e88f181f39ead
name: protokube:1.19.0-alpha.3
sources:
- https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/images/protokube-arm64.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/images-protokube-arm64.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/images/protokube-arm64.tar.gz
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/amd64/nodeup
NODEUP_HASH_AMD64=6980fda4fa37bbdc043738cf4ddac6388eb57f561895c69299c1b0ee263d465d
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/arm64/nodeup
NODEUP_HASH_ARM64=dcc7f9f3c180ee76a511627e46da0ac69cdcb518cdf3be348e5ed046d491eb87
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
configOverride: |
disabled_plugins = ["cri"]
logLevel: info
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 19.03.14
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.19.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 3f03e5c160a8b658d30b34824a1c00abadbac96e62c4d01bf5c9271a2debc3ab@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubelet
- 79bb0d2f05487ff533999a639c075043c70a0a1ba25c1629eb1eef6ebe3ba70f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 9f1ec28e357a8f18e9561129239caf9c0807d74756e21cc63637c7fdeaafe847@https://download.docker.com/linux/static/stable/x86_64/docker-19.03.14.tgz
arm64:
- d8fa5a9739ecc387dfcc55afa91ac6f4b0ccd01f1423c423dbd312d787bbb6bf@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubelet
- d4adf1b6b97252025cb2f7febf55daa3f42dc305822e3da133f77fd33071ec2f@https://storage.googleapis.com/kubernetes-release/release/v1.19.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 8350eaa0c0965bb8eb9d45a014f4b6728c985715f56466077dfe6feb271d9518@https://download.docker.com/linux/static/stable/aarch64/docker-19.03.14.tgz
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
KubeletConfig:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -13,10 +13,14 @@ spec:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
volumeIops: 5000
volumeThroughput: 125
volumeType: gp3
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
volumeType: gp3
name: events
iam: {}
kubelet:
@ -51,7 +55,7 @@ metadata:
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
machineType: t3.large
rootVolumeType: gp3
maxSize: 2
minSize: 2
@ -71,7 +75,7 @@ metadata:
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
machineType: m5.large
maxSize: 1
minSize: 1
rootVolumeType: gp3

View File

@ -184,6 +184,7 @@ resource "aws_autoscaling_group" "nodes-minimal-example-com" {
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
@ -192,12 +193,13 @@ resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
type = "gp2"
type = "gp3"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
iops = 5000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
@ -206,7 +208,7 @@ resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-example-com" {
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
type = "gp2"
type = "gp3"
}
resource "aws_iam_instance_profile" "masters-minimal-example-com" {
@ -282,15 +284,11 @@ resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
volume_type = "gp3"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
instance_type = "m5.large"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
@ -361,7 +359,7 @@ resource "aws_launch_template" "nodes-minimal-example-com" {
name = aws_iam_instance_profile.nodes-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
instance_type = "t3.large"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true

View File

@ -227,12 +227,18 @@ type terraformVolume struct {
}
func (_ *EBSVolume) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *EBSVolume) error {
// TODO: Remove when Terraform gets support for "throughput" with "aws_ebs_volume"
// https://github.com/hashicorp/terraform-provider-aws/pull/16517
throughput := e.VolumeThroughput
if fi.Int64Value(e.VolumeThroughput) <= 125 {
throughput = nil
}
tf := &terraformVolume{
AvailabilityZone: e.AvailabilityZone,
Size: e.SizeGB,
Type: e.VolumeType,
Iops: e.VolumeIops,
Throughput: e.VolumeThroughput,
Throughput: throughput,
KmsKeyId: e.KmsKeyId,
Encrypted: e.Encrypted,
Tags: e.Tags,