Remove externaldns feature flag

This commit is contained in:
Ole Markus With 2021-06-24 21:22:47 +02:00
parent 41c3ff2aac
commit 0152c23c1e
66 changed files with 10306 additions and 3 deletions

View File

@ -472,6 +472,16 @@ func TestCCM(t *testing.T) {
runTestTerraformAWS(t)
}
func TestExternalDNS(t *testing.T) {
newIntegrationTest("minimal.example.com", "external_dns").
runTestTerraformAWS(t)
}
func TestExternalDNSIRSA(t *testing.T) {
newIntegrationTest("minimal.example.com", "external_dns_irsa").
runTestTerraformAWS(t)
}
// TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC)
func TestSharedSubnet(t *testing.T) {
newIntegrationTest("sharedsubnet.example.com", "shared_subnet").

View File

@ -52,8 +52,6 @@ var (
CacheNodeidentityInfo = new("CacheNodeidentityInfo", Bool(false))
// DNSPreCreate controls whether we pre-create DNS records.
DNSPreCreate = new("DNSPreCreate", Bool(true))
// EnableExternalDNS enables external DNS
EnableExternalDNS = new("EnableExternalDNS", Bool(false))
// EnableSeparateConfigBase allows a config-base that is different from the state store
EnableSeparateConfigBase = new("EnableSeparateConfigBase", Bool(false))
// ExperimentalClusterDNS allows for setting the kubelet dns flag to experimental values.

View File

@ -0,0 +1,12 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["iam.go"],
importpath = "k8s.io/kops/pkg/model/components/addonmanifests/externaldns",
visibility = ["//visibility:public"],
deps = [
"//pkg/model/iam:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)

View File

@ -0,0 +1,48 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package externaldns
import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/kops/pkg/model/iam"
)
// ServiceAccount represents the service-account used by the dns-controller.
// It implements iam.Subject to get AWS IAM permissions.
type ServiceAccount struct {
}
var _ iam.Subject = &ServiceAccount{}
// BuildAWSPolicy generates a custom policy for a ServiceAccount IAM role.
func (r *ServiceAccount) BuildAWSPolicy(b *iam.PolicyBuilder) (*iam.Policy, error) {
p := &iam.Policy{
Version: iam.PolicyDefaultVersion,
}
iam.AddDNSControllerPermissions(b, p)
return p, nil
}
// ServiceAccount returns the kubernetes service account used.
func (r *ServiceAccount) ServiceAccount() (types.NamespacedName, bool) {
return types.NamespacedName{
Namespace: "kube-system",
Name: "external-dns",
}, true
}

View File

@ -125,6 +125,8 @@ func getWellknownServiceAccount(name string) iam.Subject {
return &nodeterminationhandler.ServiceAccount{}
case "aws-cloud-controller-manager":
return &awscloudcontrollermanager.ServiceAccount{}
case "external-dns":
return &awsebscsidriver.ServiceAccount{}
default:
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,583 @@
Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 6ae4763598c9583f8b50605f19d6c7e9ef93c216706465e73dfc84ee6b63a238@https://github.com/containerd/containerd/releases/download/v1.4.6/cri-containerd-cni-1.4.6-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- be8c9a5a06ebec8fb1d36e867cd00fb5777746a9812a0cae2966778ff899c525@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.7.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
AuxConfigHash: /O5IS/dGo83lv2DbWn4k91OYfuOqtO79vjf5pD1DQlI=
CAs:
ca: |
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6Pex4lTCM8fOIMA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
ANiW3hfHTcKnxCig+uWhpVbOfH1pANKmXVSysPKgE80QSU4tZ6m49pAEeIMsvwvD
MaLsb2v6JvXe0qvCmueU+/sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFCOW3hR7ngBsk9aUOlEznWzH494EMA0GCSqG
SIb3DQEBCwUAA0EAVnZzkiku07kQFGAEXzWI6aZnAbzSoClYskEzCBMrOmdadjVp
VWcz76FwFlyd5jhzOJ49eMcVusSotKv2ZGimcA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6PeyECsgUTfc2MA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
AKOE64nZbH+GM91AIrqf7HEk4hvzqsZFFtxc+8xir1XC3mI/RhCCrs6AdVRZNZ26
A6uHArhi33c2kHQkCjyLA7sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFIT28RJlG8FTgmvn2YMa3hYX+u1BMA0GCSqG
SIb3DQEBCwUAA0EAKuaE5wKMP26AyfxkWu83iHoTPFtdjabXF0JcyPy0ijQZxfJq
9xc2CkttvgaDtT4H+E/ryQ3iq6kSfEYYPi8c0w==
-----END CERTIFICATE-----
CloudProvider: aws
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
KeypairIDs:
ca: "6976381481633145814258938760"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
etcdManifests:
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 6ae4763598c9583f8b50605f19d6c7e9ef93c216706465e73dfc84ee6b63a238@https://github.com/containerd/containerd/releases/download/v1.4.6/cri-containerd-cni-1.4.6-linux-amd64.tar.gz
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- be8c9a5a06ebec8fb1d36e867cd00fb5777746a9812a0cae2966778ff899c525@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.7.tgz
AuxConfigHash: /O5IS/dGo83lv2DbWn4k91OYfuOqtO79vjf5pD1DQlI=
CAs:
ca: |
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6Pex4lTCM8fOIMA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
ANiW3hfHTcKnxCig+uWhpVbOfH1pANKmXVSysPKgE80QSU4tZ6m49pAEeIMsvwvD
MaLsb2v6JvXe0qvCmueU+/sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFCOW3hR7ngBsk9aUOlEznWzH494EMA0GCSqG
SIb3DQEBCwUAA0EAVnZzkiku07kQFGAEXzWI6aZnAbzSoClYskEzCBMrOmdadjVp
VWcz76FwFlyd5jhzOJ49eMcVusSotKv2ZGimcA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6PeyECsgUTfc2MA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
AKOE64nZbH+GM91AIrqf7HEk4hvzqsZFFtxc+8xir1XC3mI/RhCCrs6AdVRZNZ26
A6uHArhi33c2kHQkCjyLA7sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFIT28RJlG8FTgmvn2YMa3hYX+u1BMA0GCSqG
SIb3DQEBCwUAA0EAKuaE5wKMP26AyfxkWu83iHoTPFtdjabXF0JcyPy0ijQZxfJq
9xc2CkttvgaDtT4H+E/ryQ3iq6kSfEYYPi8c0w==
-----END CERTIFICATE-----
CloudProvider: aws
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
KeypairIDs: {}
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,234 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-write-bucket"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": "ec2:DeleteTags",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,40 @@
{
"Statement": [
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/addons/*",
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/cluster-completed.spec",
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/igconfig/node/*",
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/secrets/dockerconfig"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstances",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,252 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.9
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
NodeupConfigHash: 5StgKu5eTk/zFpcb1W8qO9BWna8InTaJCmpUJg5Cdfk=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,170 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.9
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: mvvoRjabjJ8D+FkowMWTvnv0Lwjpp4pIZonxFm0ccSQ=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,187 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
alwaysAllow: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: memfs://clusters.example.com/minimal.example.com
configStore: memfs://clusters.example.com/minimal.example.com
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.9
dnsZone: Z1AFAKE1ZON3YO
docker:
skipInstall: true
etcdClusters:
- backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/main
enableEtcdTLS: true
enableTLSAuth: true
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
provider: Manager
version: 3.4.13
- backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/events
enableEtcdTLS: true
enableTLSAuth: true
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
provider: Manager
version: 3.4.13
externalDns:
provider: dns-controller
iam:
legacy: false
keyStore: memfs://clusters.example.com/minimal.example.com/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
memoryRequest: 5Mi
provider: CoreDNS
replicas: 2
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
kubernetesApiAccess:
- 0.0.0.0/0
kubernetesVersion: 1.21.0
masterInternalName: api.internal.minimal.example.com
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://clusters.example.com/minimal.example.com/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public
masters: public
nodes: public

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.4.13"
}

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.4.13"
}

View File

@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3997 --peer-urls=https://__name__:2381
--quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events
--volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: k8s.gcr.io/etcdadm/etcd-manager:3.0.20210707
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3996 --peer-urls=https://__name__:2380
--quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main
--volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: k8s.gcr.io/etcdadm/etcd-manager:3.0.20210707
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
command:
- /kube-apiserver-healthcheck
image: k8s.gcr.io/kops/kube-apiserver-healthcheck:1.22.0-alpha.2
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@ -0,0 +1,47 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: ad4c88257aa1b1d05c38bfee630eb6cad07b3e258932dafc37b76d7328f3a8ff
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
- manifest: core.addons.k8s.io/v1.4.0.yaml
manifestHash: 90f8d3bd227dc1f4fdd46b561ac6bfc9355392477a7eedd6c3e2318614c375d8
name: core.addons.k8s.io
selector:
k8s-addon: core.addons.k8s.io
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 3bf8c29c45f0f7dbbb1671b577f302a19418b55d214f6847ff586f1ee9d1ba71
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 7557767927ab9cbbc5a0bc200c773efd0b6cd31084d2e8dff905e68d5b74e799
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: core.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: core.addons.k8s.io
name: kube-system

View File

@ -0,0 +1,383 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-dns
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: k8s.gcr.io/coredns/coredns:v1.8.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- configMap:
items:
- key: Corefile
path: Corefile
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: coredns-autoscaler
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.4
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@ -0,0 +1,121 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.22.0-alpha.2
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.22.0-alpha.2
spec:
containers:
- command:
- /dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --zone=*/*
- -v=2
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: k8s.gcr.io/kops/dns-controller:1.22.0-alpha.2
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -0,0 +1,204 @@
apiVersion: v1
data:
config.yaml: |
{"cloud":"aws","configBase":"memfs://clusters.example.com/minimal.example.com","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.22.0-alpha.2
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.22.0-alpha.2
spec:
containers:
- command:
- /kops-controller
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: k8s.gcr.io/kops/kops-controller:1.22.0-alpha.2
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector:
kops.k8s.io/kops-controller-pki: ""
node-role.kubernetes.io/master: ""
priorityClassName: system-node-critical
serviceAccount: kops-controller
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@ -0,0 +1,98 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@ -0,0 +1,267 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm
XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ==
-----END RSA PUBLIC KEY-----
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF
Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ==
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 9911479f86012d6eab7e0f532da8f807a8b0f555ee09ef89367d8c31243073bb@https://github.com/containerd/containerd/releases/download/v1.4.9/cri-containerd-cni-1.4.9-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 4eb9d5e2adf718cd7ee59f6951715f3113c9c4ee49c75c9efb9747f2c3457b2b@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.8.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX
DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX
WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk
CzMeMdr4
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX
DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN
QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW
HLtkTXH8
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx
NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY
qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx
NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E
YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co=
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN
MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H
g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6
CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O
sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs
GS/VUw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN
MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL
DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW
LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE
hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV
cPfVNg==
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm
ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx
GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu
Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP
vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP
DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9
t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY
xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O
Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB
DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW
03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh
cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI
J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3
MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA
aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf
OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt
uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3
MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt
naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC
qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K
G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo=
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "6980187172486667078076483355"
etcd-clients-ca: "6979622252718071085282986282"
etcd-manager-ca-events: "6982279354000777253151890266"
etcd-manager-ca-main: "6982279354000936168671127624"
etcd-peers-ca-events: "6982279353999767935825892873"
etcd-peers-ca-main: "6982279353998887468930183660"
kubernetes-ca: "6982820025135291416230495506"
service-account: "2"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.4.9
etcdManifests:
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@ -0,0 +1,63 @@
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 9911479f86012d6eab7e0f532da8f807a8b0f555ee09ef89367d8c31243073bb@https://github.com/containerd/containerd/releases/download/v1.4.9/cri-containerd-cni-1.4.9-linux-amd64.tar.gz
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 4eb9d5e2adf718cd7ee59f6951715f3113c9c4ee49c75c9efb9747f2c3457b2b@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.8.tgz
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "6982820025135291416230495506"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.4.9

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,80 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
externalDNS:
provider: "external-dns"
iam: {}
kubelet:
anonymousAuth: false
kubernetesVersion: v1.21.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -0,0 +1,799 @@
locals {
cluster_name = "minimal.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
master_security_group_ids = [aws_security_group.masters-minimal-example-com.id]
masters_role_arn = aws_iam_role.masters-minimal-example-com.arn
masters_role_name = aws_iam_role.masters-minimal-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-example-com.id]
node_security_group_ids = [aws_security_group.nodes-minimal-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-minimal-example-com.id]
nodes_role_arn = aws_iam_role.nodes-minimal-example-com.arn
nodes_role_name = aws_iam_role.nodes-minimal-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.minimal-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-example-com.id
vpc_cidr_block = aws_vpc.minimal-example-com.cidr_block
vpc_id = aws_vpc.minimal-example-com.id
}
output "cluster_name" {
value = "minimal.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-minimal-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-minimal-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-minimal-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-minimal-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-minimal-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-minimal-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-minimal-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-minimal-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.minimal-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-minimal-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.minimal-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.minimal-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-minimal-example-com.id
version = aws_launch_template.master-us-test-1a-masters-minimal-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "master"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_group" "nodes-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-minimal-example-com.id
version = aws_launch_template.nodes-minimal-example-com.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "node"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.etcd-events.minimal.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.etcd-main.minimal.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_iam_instance_profile" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
role = aws_iam_role.masters-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
role = aws_iam_role.nodes-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "masters-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal.example.com_policy")
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "nodes-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal.example.com_policy")
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role_policy" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal.example.com_policy")
role = aws_iam_role.masters-minimal-example-com.name
}
resource "aws_iam_role_policy" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal.example.com_policy")
role = aws_iam_role.nodes-minimal-example-com.name
}
resource "aws_internet_gateway" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_key_pair" "kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 64
volume_type = "gp3"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_put_response_hop_limit = 1
http_tokens = "optional"
}
monitoring {
enabled = false
}
name = "master-us-test-1a.masters.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.masters-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data")
}
resource "aws_launch_template" "nodes-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 128
volume_type = "gp3"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_put_response_hop_limit = 1
http_tokens = "optional"
}
monitoring {
enabled = false
}
name = "nodes.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.nodes-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal.example.com_user_data")
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route" "route-__--0" {
destination_ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route_table" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_route_table_association" "us-test-1a-minimal-example-com" {
route_table_id = aws_route_table.minimal-example-com.id
subnet_id = aws_subnet.us-test-1a-minimal-example-com.id
}
resource "aws_s3_bucket_object" "cluster-completed-spec" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_cluster-completed.spec_content")
key = "clusters.example.com/minimal.example.com/cluster-completed.spec"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "etcd-cluster-spec-events" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_etcd-cluster-spec-events_content")
key = "clusters.example.com/minimal.example.com/backups/etcd/events/control/etcd-cluster-spec"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "etcd-cluster-spec-main" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_etcd-cluster-spec-main_content")
key = "clusters.example.com/minimal.example.com/backups/etcd/main/control/etcd-cluster-spec"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "kops-version-txt" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_kops-version.txt_content")
key = "clusters.example.com/minimal.example.com/kops-version.txt"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "manifests-etcdmanager-events" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_manifests-etcdmanager-events_content")
key = "clusters.example.com/minimal.example.com/manifests/etcd/events.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "manifests-etcdmanager-main" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_manifests-etcdmanager-main_content")
key = "clusters.example.com/minimal.example.com/manifests/etcd/main.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "manifests-static-kube-apiserver-healthcheck" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_manifests-static-kube-apiserver-healthcheck_content")
key = "clusters.example.com/minimal.example.com/manifests/static/kube-apiserver-healthcheck.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-bootstrap" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content")
key = "clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-core-addons-k8s-io" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-core.addons.k8s.io_content")
key = "clusters.example.com/minimal.example.com/addons/core.addons.k8s.io/v1.4.0.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-coredns-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content")
key = "clusters.example.com/minimal.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content")
key = "clusters.example.com/minimal.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content")
key = "clusters.example.com/minimal.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content")
key = "clusters.example.com/minimal.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-limit-range-addons-k8s-io" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-limit-range.addons.k8s.io_content")
key = "clusters.example.com/minimal.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content")
key = "clusters.example.com/minimal.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "nodeupconfig-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_nodeupconfig-master-us-test-1a_content")
key = "clusters.example.com/minimal.example.com/igconfig/master/master-us-test-1a/nodeupconfig.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "nodeupconfig-nodes" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_nodeupconfig-nodes_content")
key = "clusters.example.com/minimal.example.com/igconfig/node/nodes/nodeupconfig.yaml"
server_side_encryption = "AES256"
}
resource "aws_security_group" "masters-minimal-example-com" {
description = "Security group for masters"
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group" "nodes-minimal-example-com" {
description = "Security group for nodes"
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-masters-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-1to2379-masters-minimal-example-com" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-2382to4000-masters-minimal-example-com" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-4003to65535-masters-minimal-example-com" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1to65535-masters-minimal-example-com" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.minimal.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_vpc" "minimal-example-com" {
assign_generated_ipv6_cidr_block = true
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options" "minimal-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options_association" "minimal-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.minimal-example-com.id
vpc_id = aws_vpc.minimal-example-com.id
}
terraform {
required_version = ">= 0.12.26"
required_providers {
aws = {
"source" = "hashicorp/aws"
"version" = ">= 3.34.0"
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,583 @@
Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.6
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 6ae4763598c9583f8b50605f19d6c7e9ef93c216706465e73dfc84ee6b63a238@https://github.com/containerd/containerd/releases/download/v1.4.6/cri-containerd-cni-1.4.6-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- be8c9a5a06ebec8fb1d36e867cd00fb5777746a9812a0cae2966778ff899c525@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.7.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
AuxConfigHash: /O5IS/dGo83lv2DbWn4k91OYfuOqtO79vjf5pD1DQlI=
CAs:
ca: |
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6Pex4lTCM8fOIMA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
ANiW3hfHTcKnxCig+uWhpVbOfH1pANKmXVSysPKgE80QSU4tZ6m49pAEeIMsvwvD
MaLsb2v6JvXe0qvCmueU+/sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFCOW3hR7ngBsk9aUOlEznWzH494EMA0GCSqG
SIb3DQEBCwUAA0EAVnZzkiku07kQFGAEXzWI6aZnAbzSoClYskEzCBMrOmdadjVp
VWcz76FwFlyd5jhzOJ49eMcVusSotKv2ZGimcA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6PeyECsgUTfc2MA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
AKOE64nZbH+GM91AIrqf7HEk4hvzqsZFFtxc+8xir1XC3mI/RhCCrs6AdVRZNZ26
A6uHArhi33c2kHQkCjyLA7sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFIT28RJlG8FTgmvn2YMa3hYX+u1BMA0GCSqG
SIb3DQEBCwUAA0EAKuaE5wKMP26AyfxkWu83iHoTPFtdjabXF0JcyPy0ijQZxfJq
9xc2CkttvgaDtT4H+E/ryQ3iq6kSfEYYPi8c0w==
-----END CERTIFICATE-----
CloudProvider: aws
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
KeypairIDs:
ca: "6976381481633145814258938760"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
etcdManifests:
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.6
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 6ae4763598c9583f8b50605f19d6c7e9ef93c216706465e73dfc84ee6b63a238@https://github.com/containerd/containerd/releases/download/v1.4.6/cri-containerd-cni-1.4.6-linux-amd64.tar.gz
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- be8c9a5a06ebec8fb1d36e867cd00fb5777746a9812a0cae2966778ff899c525@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.7.tgz
AuxConfigHash: /O5IS/dGo83lv2DbWn4k91OYfuOqtO79vjf5pD1DQlI=
CAs:
ca: |
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6Pex4lTCM8fOIMA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
ANiW3hfHTcKnxCig+uWhpVbOfH1pANKmXVSysPKgE80QSU4tZ6m49pAEeIMsvwvD
MaLsb2v6JvXe0qvCmueU+/sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFCOW3hR7ngBsk9aUOlEznWzH494EMA0GCSqG
SIb3DQEBCwUAA0EAVnZzkiku07kQFGAEXzWI6aZnAbzSoClYskEzCBMrOmdadjVp
VWcz76FwFlyd5jhzOJ49eMcVusSotKv2ZGimcA==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBaDCCARKgAwIBAgIMFoq6PeyECsgUTfc2MA0GCSqGSIb3DQEBCwUAMBUxEzAR
BgNVBAMTCmt1YmVybmV0ZXMwHhcNMjEwNjE5MjI0MzEwWhcNMzEwNjE5MjI0MzEw
WjAVMRMwEQYDVQQDEwprdWJlcm5ldGVzMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJB
AKOE64nZbH+GM91AIrqf7HEk4hvzqsZFFtxc+8xir1XC3mI/RhCCrs6AdVRZNZ26
A6uHArhi33c2kHQkCjyLA7sCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
EwEB/wQFMAMBAf8wHQYDVR0OBBYEFIT28RJlG8FTgmvn2YMa3hYX+u1BMA0GCSqG
SIb3DQEBCwUAA0EAKuaE5wKMP26AyfxkWu83iHoTPFtdjabXF0JcyPy0ijQZxfJq
9xc2CkttvgaDtT4H+E/ryQ3iq6kSfEYYPi8c0w==
-----END CERTIFICATE-----
CloudProvider: aws
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
KeypairIDs: {}
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,234 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-write-bucket"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": "ec2:DeleteTags",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws:ec2:*:*:volume/*",
"arn:aws:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,40 @@
{
"Statement": [
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/addons/*",
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/cluster-completed.spec",
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/igconfig/node/*",
"arn:aws:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/secrets/dockerconfig"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstances",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,252 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.9
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
NodeupConfigHash: 5StgKu5eTk/zFpcb1W8qO9BWna8InTaJCmpUJg5Cdfk=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,170 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.9
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: mvvoRjabjJ8D+FkowMWTvnv0Lwjpp4pIZonxFm0ccSQ=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,187 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
alwaysAllow: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: memfs://clusters.example.com/minimal.example.com
configStore: memfs://clusters.example.com/minimal.example.com
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.9
dnsZone: Z1AFAKE1ZON3YO
docker:
skipInstall: true
etcdClusters:
- backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/main
enableEtcdTLS: true
enableTLSAuth: true
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
provider: Manager
version: 3.4.13
- backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/events
enableEtcdTLS: true
enableTLSAuth: true
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
provider: Manager
version: 3.4.13
externalDns:
provider: dns-controller
iam:
legacy: false
keyStore: memfs://clusters.example.com/minimal.example.com/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
memoryRequest: 5Mi
provider: CoreDNS
replicas: 2
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
kubernetesApiAccess:
- 0.0.0.0/0
kubernetesVersion: 1.21.0
masterInternalName: api.internal.minimal.example.com
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://clusters.example.com/minimal.example.com/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public
masters: public
nodes: public

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.4.13"
}

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.4.13"
}

View File

@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3997 --peer-urls=https://__name__:2381
--quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events
--volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: k8s.gcr.io/etcdadm/etcd-manager:3.0.20210707
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3996 --peer-urls=https://__name__:2380
--quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main
--volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: k8s.gcr.io/etcdadm/etcd-manager:3.0.20210707
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
command:
- /kube-apiserver-healthcheck
image: k8s.gcr.io/kops/kube-apiserver-healthcheck:1.22.0-alpha.2
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@ -0,0 +1,47 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: ad4c88257aa1b1d05c38bfee630eb6cad07b3e258932dafc37b76d7328f3a8ff
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
- manifest: core.addons.k8s.io/v1.4.0.yaml
manifestHash: 90f8d3bd227dc1f4fdd46b561ac6bfc9355392477a7eedd6c3e2318614c375d8
name: core.addons.k8s.io
selector:
k8s-addon: core.addons.k8s.io
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 3bf8c29c45f0f7dbbb1671b577f302a19418b55d214f6847ff586f1ee9d1ba71
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 7557767927ab9cbbc5a0bc200c773efd0b6cd31084d2e8dff905e68d5b74e799
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: core.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: core.addons.k8s.io
name: kube-system

View File

@ -0,0 +1,383 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-dns
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: k8s.gcr.io/coredns/coredns:v1.8.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- configMap:
items:
- key: Corefile
path: Corefile
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: coredns-autoscaler
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.4
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@ -0,0 +1,121 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.22.0-alpha.2
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.22.0-alpha.2
spec:
containers:
- command:
- /dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --zone=*/*
- -v=2
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: k8s.gcr.io/kops/dns-controller:1.22.0-alpha.2
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -0,0 +1,204 @@
apiVersion: v1
data:
config.yaml: |
{"cloud":"aws","configBase":"memfs://clusters.example.com/minimal.example.com","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.22.0-alpha.2
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.22.0-alpha.2
spec:
containers:
- command:
- /kops-controller
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: k8s.gcr.io/kops/kops-controller:1.22.0-alpha.2
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector:
kops.k8s.io/kops-controller-pki: ""
node-role.kubernetes.io/master: ""
priorityClassName: system-node-critical
serviceAccount: kops-controller
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@ -0,0 +1,98 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@ -0,0 +1,267 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm
XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ==
-----END RSA PUBLIC KEY-----
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF
Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ==
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 9911479f86012d6eab7e0f532da8f807a8b0f555ee09ef89367d8c31243073bb@https://github.com/containerd/containerd/releases/download/v1.4.9/cri-containerd-cni-1.4.9-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 4eb9d5e2adf718cd7ee59f6951715f3113c9c4ee49c75c9efb9747f2c3457b2b@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.8.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX
DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX
WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk
CzMeMdr4
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX
DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN
QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW
HLtkTXH8
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx
NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY
qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx
NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E
YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co=
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN
MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H
g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6
CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O
sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs
GS/VUw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN
MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL
DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW
LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE
hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV
cPfVNg==
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm
ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx
GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu
Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP
vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP
DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9
t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY
xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O
Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB
DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW
03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh
cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI
J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3
MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA
aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf
OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt
uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3
MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt
naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC
qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K
G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo=
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "6980187172486667078076483355"
etcd-clients-ca: "6979622252718071085282986282"
etcd-manager-ca-events: "6982279354000777253151890266"
etcd-manager-ca-main: "6982279354000936168671127624"
etcd-peers-ca-events: "6982279353999767935825892873"
etcd-peers-ca-main: "6982279353998887468930183660"
kubernetes-ca: "6982820025135291416230495506"
service-account: "2"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.4.9
etcdManifests:
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@ -0,0 +1,63 @@
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 9911479f86012d6eab7e0f532da8f807a8b0f555ee09ef89367d8c31243073bb@https://github.com/containerd/containerd/releases/download/v1.4.9/cri-containerd-cni-1.4.9-linux-amd64.tar.gz
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 4eb9d5e2adf718cd7ee59f6951715f3113c9c4ee49c75c9efb9747f2c3457b2b@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.8.tgz
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "6982820025135291416230495506"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.4.9

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,81 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
externalDNS:
provider: "external-dns"
iam:
useServiceAccountsExternalPermissions: true
kubelet:
anonymousAuth: false
kubernetesVersion: v1.21.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -0,0 +1,799 @@
locals {
cluster_name = "minimal.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
master_security_group_ids = [aws_security_group.masters-minimal-example-com.id]
masters_role_arn = aws_iam_role.masters-minimal-example-com.arn
masters_role_name = aws_iam_role.masters-minimal-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-example-com.id]
node_security_group_ids = [aws_security_group.nodes-minimal-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-minimal-example-com.id]
nodes_role_arn = aws_iam_role.nodes-minimal-example-com.arn
nodes_role_name = aws_iam_role.nodes-minimal-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.minimal-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-example-com.id
vpc_cidr_block = aws_vpc.minimal-example-com.cidr_block
vpc_id = aws_vpc.minimal-example-com.id
}
output "cluster_name" {
value = "minimal.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-minimal-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-minimal-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-minimal-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-minimal-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-minimal-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-minimal-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-minimal-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-minimal-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.minimal-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-minimal-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.minimal-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.minimal-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-minimal-example-com.id
version = aws_launch_template.master-us-test-1a-masters-minimal-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "master"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_group" "nodes-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-minimal-example-com.id
version = aws_launch_template.nodes-minimal-example-com.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "node"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.etcd-events.minimal.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = false
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.etcd-main.minimal.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_iam_instance_profile" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
role = aws_iam_role.masters-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
role = aws_iam_role.nodes-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "masters-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal.example.com_policy")
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "nodes-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal.example.com_policy")
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role_policy" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal.example.com_policy")
role = aws_iam_role.masters-minimal-example-com.name
}
resource "aws_iam_role_policy" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal.example.com_policy")
role = aws_iam_role.nodes-minimal-example-com.name
}
resource "aws_internet_gateway" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_key_pair" "kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 64
volume_type = "gp3"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_put_response_hop_limit = 1
http_tokens = "optional"
}
monitoring {
enabled = false
}
name = "master-us-test-1a.masters.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.masters-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data")
}
resource "aws_launch_template" "nodes-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 128
volume_type = "gp3"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_put_response_hop_limit = 1
http_tokens = "optional"
}
monitoring {
enabled = false
}
name = "nodes.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.nodes-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal.example.com_user_data")
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route" "route-__--0" {
destination_ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route_table" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_route_table_association" "us-test-1a-minimal-example-com" {
route_table_id = aws_route_table.minimal-example-com.id
subnet_id = aws_subnet.us-test-1a-minimal-example-com.id
}
resource "aws_s3_bucket_object" "cluster-completed-spec" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_cluster-completed.spec_content")
key = "clusters.example.com/minimal.example.com/cluster-completed.spec"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "etcd-cluster-spec-events" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_etcd-cluster-spec-events_content")
key = "clusters.example.com/minimal.example.com/backups/etcd/events/control/etcd-cluster-spec"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "etcd-cluster-spec-main" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_etcd-cluster-spec-main_content")
key = "clusters.example.com/minimal.example.com/backups/etcd/main/control/etcd-cluster-spec"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "kops-version-txt" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_kops-version.txt_content")
key = "clusters.example.com/minimal.example.com/kops-version.txt"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "manifests-etcdmanager-events" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_manifests-etcdmanager-events_content")
key = "clusters.example.com/minimal.example.com/manifests/etcd/events.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "manifests-etcdmanager-main" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_manifests-etcdmanager-main_content")
key = "clusters.example.com/minimal.example.com/manifests/etcd/main.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "manifests-static-kube-apiserver-healthcheck" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_manifests-static-kube-apiserver-healthcheck_content")
key = "clusters.example.com/minimal.example.com/manifests/static/kube-apiserver-healthcheck.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-bootstrap" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-bootstrap_content")
key = "clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-core-addons-k8s-io" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-core.addons.k8s.io_content")
key = "clusters.example.com/minimal.example.com/addons/core.addons.k8s.io/v1.4.0.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-coredns-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content")
key = "clusters.example.com/minimal.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content")
key = "clusters.example.com/minimal.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content")
key = "clusters.example.com/minimal.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content")
key = "clusters.example.com/minimal.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-limit-range-addons-k8s-io" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-limit-range.addons.k8s.io_content")
key = "clusters.example.com/minimal.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "minimal-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content")
key = "clusters.example.com/minimal.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "nodeupconfig-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_nodeupconfig-master-us-test-1a_content")
key = "clusters.example.com/minimal.example.com/igconfig/master/master-us-test-1a/nodeupconfig.yaml"
server_side_encryption = "AES256"
}
resource "aws_s3_bucket_object" "nodeupconfig-nodes" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_bucket_object_nodeupconfig-nodes_content")
key = "clusters.example.com/minimal.example.com/igconfig/node/nodes/nodeupconfig.yaml"
server_side_encryption = "AES256"
}
resource "aws_security_group" "masters-minimal-example-com" {
description = "Security group for masters"
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group" "nodes-minimal-example-com" {
description = "Security group for nodes"
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-masters-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-1to2379-masters-minimal-example-com" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-2382to4000-masters-minimal-example-com" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-4003to65535-masters-minimal-example-com" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1to65535-masters-minimal-example-com" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.minimal.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_vpc" "minimal-example-com" {
assign_generated_ipv6_cidr_block = true
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options" "minimal-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options_association" "minimal-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.minimal-example-com.id
vpc_id = aws_vpc.minimal-example-com.id
}
terraform {
required_version = ">= 0.12.26"
required_providers {
aws = {
"source" = "hashicorp/aws"
"version" = ">= 3.34.0"
}
}
}

View File

@ -22,6 +22,7 @@ go_library(
"//pkg/model/components/addonmanifests/awsloadbalancercontroller:go_default_library",
"//pkg/model/components/addonmanifests/clusterautoscaler:go_default_library",
"//pkg/model/components/addonmanifests/dnscontroller:go_default_library",
"//pkg/model/components/addonmanifests/externaldns:go_default_library",
"//pkg/model/components/addonmanifests/nodeterminationhandler:go_default_library",
"//pkg/model/iam:go_default_library",
"//pkg/templates:go_default_library",

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kops/pkg/model/components/addonmanifests/awsloadbalancercontroller"
"k8s.io/kops/pkg/model/components/addonmanifests/clusterautoscaler"
"k8s.io/kops/pkg/model/components/addonmanifests/dnscontroller"
"k8s.io/kops/pkg/model/components/addonmanifests/externaldns"
"k8s.io/kops/pkg/model/components/addonmanifests/nodeterminationhandler"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/pkg/templates"
@ -462,6 +463,10 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
Id: id,
})
}
if b.UseServiceAccountExternalPermissions() {
serviceAccountRoles = append(serviceAccountRoles, &externaldns.ServiceAccount{})
}
}
}

View File

@ -567,6 +567,7 @@ func (tf *TemplateFunctions) KopsControllerArgv() ([]string, error) {
func (tf *TemplateFunctions) ExternalDNSArgv() ([]string, error) {
cluster := tf.Cluster
externalDNS := tf.Cluster.Spec.ExternalDNS
var argv []string
@ -584,13 +585,18 @@ func (tf *TemplateFunctions) ExternalDNSArgv() ([]string, error) {
}
argv = append(argv, "--events")
argv = append(argv, "--source=ingress")
if fi.BoolValue(externalDNS.WatchIngress) {
argv = append(argv, "--source=ingress")
}
argv = append(argv, "--source=pod")
argv = append(argv, "--source=service")
argv = append(argv, "--compatibility=kops-dns-controller")
argv = append(argv, "--registry=txt")
argv = append(argv, "--txt-owner-id=kops-"+tf.ClusterName())
argv = append(argv, "--zone-id-filter="+tf.Cluster.Spec.DNSZone)
if externalDNS.WatchNamespace != "" {
argv = append(argv, "--namespace="+externalDNS.WatchNamespace)
}
return argv, nil
}