Merge pull request #11470 from rifelpet/oidc-gossip

Use kubernetes.default for OIDC discovery in gossip clusters
This commit is contained in:
Kubernetes Prow Robot 2021-05-12 16:04:25 -07:00 committed by GitHub
commit 06151727aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 1522 additions and 0 deletions

View File

@ -147,6 +147,11 @@ func TestMinimal(t *testing.T) {
newIntegrationTest("minimal.example.com", "minimal").runTestTerraformAWS(t)
}
// TestMinimal runs the test on a minimum gossip configuration
func TestMinimalGossip(t *testing.T) {
newIntegrationTest("minimal.k8s.local", "minimal_gossip").runTestTerraformAWS(t)
}
// TestMinimalGCE runs tests on a minimal GCE configuration
func TestMinimalGCE(t *testing.T) {
newIntegrationTest("minimal-gce.example.com", "minimal_gce").runTestTerraformGCE(t)

View File

@ -12,6 +12,7 @@ go_library(
deps = [
"//pkg/apis/kops:go_default_library",
"//pkg/apis/kops/model:go_default_library",
"//pkg/dns:go_default_library",
"//pkg/util/stringorslice:go_default_library",
"//pkg/wellknownusers:go_default_library",
"//upup/pkg/fi:go_default_library",

View File

@ -23,6 +23,7 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/wellknownusers"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/util/pkg/vfs"
@ -136,6 +137,9 @@ func ServiceAccountIssuer(clusterSpec *kops.ClusterSpec) (string, error) {
return "", fmt.Errorf("locationStore=%q is of unexpected type %T", store, base)
}
} else {
if dns.IsGossipHostname(clusterSpec.MasterInternalName) {
return "https://kubernetes.default", nil
}
if supportsPublicJWKS(clusterSpec) {
return "https://" + clusterSpec.MasterPublicName, nil
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,182 @@
{
"Statement": [
{
"Action": [
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstances",
"ec2:DescribeInternetGateways",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVolumes"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:DescribeVolumesModifications",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateRoute",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:RevokeSecurityGroupIngress"
],
"Condition": {
"StringEquals": {
"ec2:ResourceTag/KubernetesCluster": "minimal.k8s.local"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "autoscaling:CompleteLifecycleAction",
"Condition": {
"StringEquals": {
"autoscaling:ResourceTag/KubernetesCluster": "minimal.k8s.local"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "autoscaling:DescribeLifecycleHooks",
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "autoscaling:DescribeAutoScalingInstances",
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:DescribeLaunchTemplateVersions"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup"
],
"Condition": {
"StringEquals": {
"autoscaling:ResourceTag/KubernetesCluster": "minimal.k8s.local"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:DescribeAutoScalingInstances"
],
"Condition": {
"StringEquals": {
"autoscaling:ResourceTag/KubernetesCluster": "minimal.k8s.local"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"elasticloadbalancing:AddTags",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:DescribeVpcs",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,22 @@
{
"Statement": [
{
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "autoscaling:DescribeAutoScalingInstances",
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,328 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/nodeup
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/nodeup
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.ipv4.tcp_rmem='4096 12582912 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
manageStorageClasses: true
containerRuntime: containerd
containerd:
configOverride: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
logLevel: info
version: 1.4.4
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://kubernetes.default
serviceAccountJWKSURI: https://kubernetes.default/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.k8s.local
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 96641849cb78a0a119223a427dfdc1ade88412ef791a14193212c8c8e29d447b@https://github.com/containerd/containerd/releases/download/v1.4.4/cri-containerd-cni-1.4.4-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/protokube
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/channels
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 998b3b6669335f1a1d8c475fb7c211ed1e41c2ff37275939e2523666ccb7d910@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.6.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/protokube
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/channels
ClusterName: minimal.k8s.local
ConfigBase: memfs://clusters.example.com/minimal.k8s.local
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
channels:
- memfs://clusters.example.com/minimal.k8s.local/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/minimal.k8s.local/manifests/etcd/main.yaml
- memfs://clusters.example.com/minimal.k8s.local/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,236 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/nodeup
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/nodeup
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.ipv4.tcp_rmem='4096 12582912 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
manageStorageClasses: true
containerRuntime: containerd
containerd:
configOverride: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
logLevel: info
version: 1.4.4
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 96641849cb78a0a119223a427dfdc1ade88412ef791a14193212c8c8e29d447b@https://github.com/containerd/containerd/releases/download/v1.4.4/cri-containerd-cni-1.4.4-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/protokube
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/channels
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 998b3b6669335f1a1d8c475fb7c211ed1e41c2ff37275939e2523666ccb7d910@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.6.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/protokube
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/channels
ClusterName: minimal.k8s.local
ConfigBase: memfs://clusters.example.com/minimal.k8s.local
InstanceGroupName: nodes
InstanceGroupRole: Node
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
channels:
- memfs://clusters.example.com/minimal.k8s.local/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,78 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.k8s.local
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.k8s.local
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
iam: {}
kubelet:
anonymousAuth: false
kubernetesVersion: v1.21.0
masterInternalName: api.internal.minimal.k8s.local
masterPublicName: api.minimal.k8s.local
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: minimal.k8s.local
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: minimal.k8s.local
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -0,0 +1,644 @@
locals {
cluster_name = "minimal.k8s.local"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-k8s-local.id]
master_security_group_ids = [aws_security_group.masters-minimal-k8s-local.id]
masters_role_arn = aws_iam_role.masters-minimal-k8s-local.arn
masters_role_name = aws_iam_role.masters-minimal-k8s-local.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-k8s-local.id]
node_security_group_ids = [aws_security_group.nodes-minimal-k8s-local.id]
node_subnet_ids = [aws_subnet.us-test-1a-minimal-k8s-local.id]
nodes_role_arn = aws_iam_role.nodes-minimal-k8s-local.arn
nodes_role_name = aws_iam_role.nodes-minimal-k8s-local.name
region = "us-test-1"
route_table_public_id = aws_route_table.minimal-k8s-local.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-k8s-local.id
vpc_cidr_block = aws_vpc.minimal-k8s-local.cidr_block
vpc_id = aws_vpc.minimal-k8s-local.id
}
output "cluster_name" {
value = "minimal.k8s.local"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-k8s-local.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-minimal-k8s-local.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-minimal-k8s-local.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-minimal-k8s-local.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-minimal-k8s-local.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-minimal-k8s-local.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-minimal-k8s-local.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-minimal-k8s-local.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-minimal-k8s-local.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.minimal-k8s-local.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-minimal-k8s-local.id
}
output "vpc_cidr_block" {
value = aws_vpc.minimal-k8s-local.cidr_block
}
output "vpc_id" {
value = aws_vpc.minimal-k8s-local.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-k8s-local" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-minimal-k8s-local.id
version = aws_launch_template.master-us-test-1a-masters-minimal-k8s-local.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.minimal.k8s.local"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.k8s.local"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.k8s.local"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "master"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/minimal.k8s.local"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-k8s-local.id]
}
resource "aws_autoscaling_group" "nodes-minimal-k8s-local" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-minimal-k8s-local.id
version = aws_launch_template.nodes-minimal-k8s-local.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.minimal.k8s.local"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.k8s.local"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.minimal.k8s.local"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "node"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/minimal.k8s.local"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-k8s-local.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-k8s-local" {
availability_zone = "us-test-1a"
encrypted = false
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "us-test-1a.etcd-events.minimal.k8s.local"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-k8s-local" {
availability_zone = "us-test-1a"
encrypted = false
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "us-test-1a.etcd-main.minimal.k8s.local"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_iam_instance_profile" "masters-minimal-k8s-local" {
name = "masters.minimal.k8s.local"
role = aws_iam_role.masters-minimal-k8s-local.name
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "masters.minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
resource "aws_iam_instance_profile" "nodes-minimal-k8s-local" {
name = "nodes.minimal.k8s.local"
role = aws_iam_role.nodes-minimal-k8s-local.name
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "nodes.minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
resource "aws_iam_role" "masters-minimal-k8s-local" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal.k8s.local_policy")
name = "masters.minimal.k8s.local"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "masters.minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
resource "aws_iam_role" "nodes-minimal-k8s-local" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal.k8s.local_policy")
name = "nodes.minimal.k8s.local"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "nodes.minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
resource "aws_iam_role_policy" "masters-minimal-k8s-local" {
name = "masters.minimal.k8s.local"
policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal.k8s.local_policy")
role = aws_iam_role.masters-minimal-k8s-local.name
}
resource "aws_iam_role_policy" "nodes-minimal-k8s-local" {
name = "nodes.minimal.k8s.local"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal.k8s.local_policy")
role = aws_iam_role.nodes-minimal-k8s-local.name
}
resource "aws_internet_gateway" "minimal-k8s-local" {
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
vpc_id = aws_vpc.minimal-k8s-local.id
}
resource "aws_key_pair" "kubernetes-minimal-k8s-local-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.minimal.k8s.local-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal.k8s.local-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
resource "aws_launch_template" "master-us-test-1a-masters-minimal-k8s-local" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 64
volume_type = "gp3"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-minimal-k8s-local.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-minimal-k8s-local-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_put_response_hop_limit = 1
http_tokens = "optional"
}
name = "master-us-test-1a.masters.minimal.k8s.local"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.masters-minimal-k8s-local.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "master-us-test-1a.masters.minimal.k8s.local"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "master-us-test-1a.masters.minimal.k8s.local"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "master-us-test-1a.masters.minimal.k8s.local"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal.k8s.local_user_data")
}
resource "aws_launch_template" "nodes-minimal-k8s-local" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 128
volume_type = "gp3"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-minimal-k8s-local.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-minimal-k8s-local-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_put_response_hop_limit = 1
http_tokens = "optional"
}
name = "nodes.minimal.k8s.local"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.nodes-minimal-k8s-local.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "nodes.minimal.k8s.local"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "nodes.minimal.k8s.local"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "nodes.minimal.k8s.local"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal.k8s.local_user_data")
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.minimal-k8s-local.id
route_table_id = aws_route_table.minimal-k8s-local.id
}
resource "aws_route_table" "minimal-k8s-local" {
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.minimal-k8s-local.id
}
resource "aws_route_table_association" "us-test-1a-minimal-k8s-local" {
route_table_id = aws_route_table.minimal-k8s-local.id
subnet_id = aws_subnet.us-test-1a-minimal-k8s-local.id
}
resource "aws_security_group" "masters-minimal-k8s-local" {
description = "Security group for masters"
name = "masters.minimal.k8s.local"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "masters.minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
vpc_id = aws_vpc.minimal-k8s-local.id
}
resource "aws_security_group" "nodes-minimal-k8s-local" {
description = "Security group for nodes"
name = "nodes.minimal.k8s.local"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "nodes.minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
vpc_id = aws_vpc.minimal-k8s-local.id
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-k8s-local" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-k8s-local" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-minimal-k8s-local.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-minimal-k8s-local" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-k8s-local-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-k8s-local-ingress-all-0to0-masters-minimal-k8s-local" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
source_security_group_id = aws_security_group.masters-minimal-k8s-local.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-k8s-local-ingress-all-0to0-nodes-minimal-k8s-local" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-k8s-local.id
source_security_group_id = aws_security_group.masters-minimal-k8s-local.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-k8s-local-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-k8s-local.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-k8s-local-ingress-all-0to0-nodes-minimal-k8s-local" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-k8s-local.id
source_security_group_id = aws_security_group.nodes-minimal-k8s-local.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-k8s-local-ingress-tcp-1to2379-masters-minimal-k8s-local" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
source_security_group_id = aws_security_group.nodes-minimal-k8s-local.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-k8s-local-ingress-tcp-2382to4000-masters-minimal-k8s-local" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
source_security_group_id = aws_security_group.nodes-minimal-k8s-local.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-k8s-local-ingress-tcp-4003to65535-masters-minimal-k8s-local" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
source_security_group_id = aws_security_group.nodes-minimal-k8s-local.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-k8s-local-ingress-udp-1to65535-masters-minimal-k8s-local" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-minimal-k8s-local.id
source_security_group_id = aws_security_group.nodes-minimal-k8s-local.id
to_port = 65535
type = "ingress"
}
resource "aws_subnet" "us-test-1a-minimal-k8s-local" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "us-test-1a.minimal.k8s.local"
"SubnetType" = "Public"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.minimal-k8s-local.id
}
resource "aws_vpc" "minimal-k8s-local" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
resource "aws_vpc_dhcp_options" "minimal-k8s-local" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "minimal.k8s.local"
"Name" = "minimal.k8s.local"
"kubernetes.io/cluster/minimal.k8s.local" = "owned"
}
}
resource "aws_vpc_dhcp_options_association" "minimal-k8s-local" {
dhcp_options_id = aws_vpc_dhcp_options.minimal-k8s-local.id
vpc_id = aws_vpc.minimal-k8s-local.id
}
terraform {
required_version = ">= 0.12.26"
required_providers {
aws = {
"source" = "hashicorp/aws"
"version" = ">= 3.34.0"
}
}
}