Merge pull request #9409 from rifelpet/integration-test-reduce

Fold multiple integration test cases into the complex test case
This commit is contained in:
Kubernetes Prow Robot 2020-06-19 20:44:40 -07:00 committed by GitHub
commit 80e1da9dd2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 239 additions and 11747 deletions

View File

@ -131,11 +131,6 @@ func TestMinimalGCE(t *testing.T) {
newIntegrationTest("minimal-gce.example.com", "minimal_gce").runTestTerraformGCE(t)
}
// TestRestrictAccess runs the test on a simple SG configuration, similar to kops create cluster minimal.example.com --ssh-access=$(IPS) --admin-access=$(IPS) --master-count=3
func TestRestrictAccess(t *testing.T) {
newIntegrationTest("restrictaccess.example.com", "restrict_access").runTestTerraformAWS(t)
}
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
newIntegrationTest("ha.example.com", "ha").withZones(3).runTestTerraformAWS(t)
@ -149,9 +144,9 @@ func TestHighAvailabilityGCE(t *testing.T) {
// TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases
func TestComplex(t *testing.T) {
newIntegrationTest("complex.example.com", "complex").runTestTerraformAWS(t)
newIntegrationTest("complex.example.com", "complex").runTestCloudformation(t)
newIntegrationTest("complex.example.com", "complex").withVersion("legacy-v1alpha2").runTestTerraformAWS(t)
newIntegrationTest("complex.example.com", "complex").withoutSSHKey().runTestTerraformAWS(t)
newIntegrationTest("complex.example.com", "complex").withoutSSHKey().runTestCloudformation(t)
newIntegrationTest("complex.example.com", "complex").withoutSSHKey().withVersion("legacy-v1alpha2").runTestTerraformAWS(t)
}
// TestExternalPolicies tests external policies output
@ -159,16 +154,6 @@ func TestExternalPolicies(t *testing.T) {
newIntegrationTest("externalpolicies.example.com", "externalpolicies").runTestTerraformAWS(t)
}
func TestNoSSHKey(t *testing.T) {
newIntegrationTest("nosshkey.example.com", "nosshkey").withoutSSHKey().runTestTerraformAWS(t)
newIntegrationTest("nosshkey.example.com", "nosshkey-cloudformation").withoutSSHKey().runTestCloudformation(t)
}
// TestCrossZone tests that the cross zone setting on the API ELB is set properly
func TestCrossZone(t *testing.T) {
newIntegrationTest("crosszone.example.com", "api_elb_cross_zone").runTestTerraformAWS(t)
}
// TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimalCloudformation(t *testing.T) {
newIntegrationTest("minimal.example.com", "minimal-cloudformation").runTestCloudformation(t)
@ -185,11 +170,6 @@ func TestExistingSG(t *testing.T) {
newIntegrationTest("existingsg.example.com", "existing_sg").withZones(3).runTestTerraformAWS(t)
}
// TestAdditionalUserData runs the test on passing additional user-data to an instance at bootstrap.
func TestAdditionalUserData(t *testing.T) {
newIntegrationTest("additionaluserdata.example.com", "additional_user-data").runTestCloudformation(t)
}
// TestBastionAdditionalUserData runs the test on passing additional user-data to a bastion instance group
func TestBastionAdditionalUserData(t *testing.T) {
newIntegrationTest("bastionuserdata.example.com", "bastionadditional_user-data").withPrivate().withBastionUserData().runTestTerraformAWS(t)
@ -286,12 +266,6 @@ func TestExistingIAM(t *testing.T) {
newIntegrationTest("existing-iam.example.com", "existing_iam").withZones(3).withoutPolicies().withLifecycleOverrides(lifecycleOverrides).runTestTerraformAWS(t)
}
// TestAdditionalCIDR runs the test on a configuration with a shared VPC
func TestAdditionalCIDR(t *testing.T) {
newIntegrationTest("additionalcidr.example.com", "additional_cidr").withVersion("v1alpha3").withZones(3).runTestTerraformAWS(t)
newIntegrationTest("additionalcidr.example.com", "additional_cidr").runTestCloudformation(t)
}
// TestPhaseNetwork tests the output of tf for the network phase
func TestPhaseNetwork(t *testing.T) {
newIntegrationTest("lifecyclephases.example.com", "lifecycle_phases").runTestPhase(t, cloudup.PhaseNetwork)

View File

@ -1,537 +0,0 @@
Resources.AWSEC2LaunchTemplatemasterustest1bmastersadditionalcidrexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: additionalcidr.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionalcidr.example.com
ConfigBase: memfs://clusters.example.com/additionalcidr.example.com
InstanceGroupName: master-us-test-1b
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionalcidr.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSEC2LaunchTemplatenodesadditionalcidrexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionalcidr.example.com
ConfigBase: memfs://clusters.example.com/additionalcidr.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionalcidr.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,102 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1,68 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,314 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 3
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: additionalcidr.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionalcidr.example.com
ConfigBase: memfs://clusters.example.com/additionalcidr.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionalcidr.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,314 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 3
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: additionalcidr.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionalcidr.example.com
ConfigBase: memfs://clusters.example.com/additionalcidr.example.com
InstanceGroupName: master-us-test-1b
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionalcidr.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,314 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 3
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: additionalcidr.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionalcidr.example.com
ConfigBase: memfs://clusters.example.com/additionalcidr.example.com
InstanceGroupName: master-us-test-1c
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionalcidr.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/additionalcidr.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,221 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionalcidr.example.com
ConfigBase: memfs://clusters.example.com/additionalcidr.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionalcidr.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,85 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: additionalcidr.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/additionalcidr.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1b
name: us-test-1b
name: main
- etcdMembers:
- instanceGroup: master-us-test-1b
name: us-test-1b
name: events
kubelet:
anonymousAuth: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.additionalcidr.example.com
masterPublicName: api.additionalcidr.example.com
networkCIDR: 10.0.0.0/16
additionalNetworkCIDRs:
- 10.1.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 10.0.1.0/24
name: us-test-1a
type: Public
zone: us-test-1a
- cidr: 10.1.1.0/24
name: us-test-1b
type: Public
zone: us-test-1b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: additionalcidr.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1b
labels:
kops.k8s.io/cluster: additionalcidr.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1b

View File

@ -1,133 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: additionalcidr.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/additionalcidr.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
- instanceGroup: master-us-test-1b
name: us-test-1b
- instanceGroup: master-us-test-1c
name: us-test-1c
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
- instanceGroup: master-us-test-1b
name: us-test-1b
- instanceGroup: master-us-test-1c
name: us-test-1c
name: events
kubelet:
anonymousAuth: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.additionalcidr.example.com
masterPublicName: api.additionalcidr.example.com
networkCIDR: 10.0.0.0/16
additionalNetworkCIDRs:
- 10.1.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 10.0.1.0/24
name: us-test-1a
type: Public
zone: us-test-1a
- cidr: 10.1.1.0/24
name: us-test-1b
type: Public
zone: us-test-1b
- cidr: 10.1.2.0/24
name: us-test-1c
type: Public
zone: us-test-1c
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: additionalcidr.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: additionalcidr.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1b
labels:
kops.k8s.io/cluster: additionalcidr.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1b
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1c
labels:
kops.k8s.io/cluster: additionalcidr.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1c

View File

@ -1,808 +0,0 @@
locals {
cluster_name = "additionalcidr.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-additionalcidr-example-com.id, aws_autoscaling_group.master-us-test-1b-masters-additionalcidr-example-com.id, aws_autoscaling_group.master-us-test-1c-masters-additionalcidr-example-com.id]
master_security_group_ids = [aws_security_group.masters-additionalcidr-example-com.id, aws_security_group.masters-additionalcidr-example-com.id, aws_security_group.masters-additionalcidr-example-com.id]
masters_role_arn = aws_iam_role.masters-additionalcidr-example-com.arn
masters_role_name = aws_iam_role.masters-additionalcidr-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-additionalcidr-example-com.id]
node_security_group_ids = [aws_security_group.nodes-additionalcidr-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1b-additionalcidr-example-com.id]
nodes_role_arn = aws_iam_role.nodes-additionalcidr-example-com.arn
nodes_role_name = aws_iam_role.nodes-additionalcidr-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.additionalcidr-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-additionalcidr-example-com.id
subnet_us-test-1b_id = aws_subnet.us-test-1b-additionalcidr-example-com.id
subnet_us-test-1c_id = aws_subnet.us-test-1c-additionalcidr-example-com.id
vpc_cidr_block = aws_vpc.additionalcidr-example-com.cidr_block
vpc_id = aws_vpc.additionalcidr-example-com.id
}
output "cluster_name" {
value = "additionalcidr.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-additionalcidr-example-com.id, aws_autoscaling_group.master-us-test-1b-masters-additionalcidr-example-com.id, aws_autoscaling_group.master-us-test-1c-masters-additionalcidr-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-additionalcidr-example-com.id, aws_security_group.masters-additionalcidr-example-com.id, aws_security_group.masters-additionalcidr-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-additionalcidr-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-additionalcidr-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-additionalcidr-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-additionalcidr-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1b-additionalcidr-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-additionalcidr-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-additionalcidr-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.additionalcidr-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-additionalcidr-example-com.id
}
output "subnet_us-test-1b_id" {
value = aws_subnet.us-test-1b-additionalcidr-example-com.id
}
output "subnet_us-test-1c_id" {
value = aws_subnet.us-test-1c-additionalcidr-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.additionalcidr-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.additionalcidr-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-additionalcidr-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-additionalcidr-example-com.id
version = aws_launch_template.master-us-test-1a-masters-additionalcidr-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.additionalcidr.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "additionalcidr.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.additionalcidr.example.com"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/additionalcidr.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-additionalcidr-example-com.id]
}
resource "aws_autoscaling_group" "master-us-test-1b-masters-additionalcidr-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1b-masters-additionalcidr-example-com.id
version = aws_launch_template.master-us-test-1b-masters-additionalcidr-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1b.masters.additionalcidr.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "additionalcidr.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1b.masters.additionalcidr.example.com"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1b"
}
tag {
key = "kubernetes.io/cluster/additionalcidr.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1b-additionalcidr-example-com.id]
}
resource "aws_autoscaling_group" "master-us-test-1c-masters-additionalcidr-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1c-masters-additionalcidr-example-com.id
version = aws_launch_template.master-us-test-1c-masters-additionalcidr-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1c.masters.additionalcidr.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "additionalcidr.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1c.masters.additionalcidr.example.com"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1c"
}
tag {
key = "kubernetes.io/cluster/additionalcidr.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1c-additionalcidr-example-com.id]
}
resource "aws_autoscaling_group" "nodes-additionalcidr-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-additionalcidr-example-com.id
version = aws_launch_template.nodes-additionalcidr-example-com.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.additionalcidr.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "additionalcidr.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.additionalcidr.example.com"
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/additionalcidr.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1b-additionalcidr-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-additionalcidr-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1a.etcd-events.additionalcidr.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a,us-test-1b,us-test-1c"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-additionalcidr-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1a.etcd-main.additionalcidr.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a,us-test-1b,us-test-1c"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1b-etcd-events-additionalcidr-example-com" {
availability_zone = "us-test-1b"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1b.etcd-events.additionalcidr.example.com"
"k8s.io/etcd/events" = "us-test-1b/us-test-1a,us-test-1b,us-test-1c"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1b-etcd-main-additionalcidr-example-com" {
availability_zone = "us-test-1b"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1b.etcd-main.additionalcidr.example.com"
"k8s.io/etcd/main" = "us-test-1b/us-test-1a,us-test-1b,us-test-1c"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1c-etcd-events-additionalcidr-example-com" {
availability_zone = "us-test-1c"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1c.etcd-events.additionalcidr.example.com"
"k8s.io/etcd/events" = "us-test-1c/us-test-1a,us-test-1b,us-test-1c"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1c-etcd-main-additionalcidr-example-com" {
availability_zone = "us-test-1c"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1c.etcd-main.additionalcidr.example.com"
"k8s.io/etcd/main" = "us-test-1c/us-test-1a,us-test-1b,us-test-1c"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
type = "gp2"
}
resource "aws_iam_instance_profile" "masters-additionalcidr-example-com" {
name = "masters.additionalcidr.example.com"
role = aws_iam_role.masters-additionalcidr-example-com.name
}
resource "aws_iam_instance_profile" "nodes-additionalcidr-example-com" {
name = "nodes.additionalcidr.example.com"
role = aws_iam_role.nodes-additionalcidr-example-com.name
}
resource "aws_iam_role_policy" "masters-additionalcidr-example-com" {
name = "masters.additionalcidr.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.additionalcidr.example.com_policy")
role = aws_iam_role.masters-additionalcidr-example-com.name
}
resource "aws_iam_role_policy" "nodes-additionalcidr-example-com" {
name = "nodes.additionalcidr.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.additionalcidr.example.com_policy")
role = aws_iam_role.nodes-additionalcidr-example-com.name
}
resource "aws_iam_role" "masters-additionalcidr-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.additionalcidr.example.com_policy")
name = "masters.additionalcidr.example.com"
}
resource "aws_iam_role" "nodes-additionalcidr-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.additionalcidr.example.com_policy")
name = "nodes.additionalcidr.example.com"
}
resource "aws_internet_gateway" "additionalcidr-example-com" {
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "additionalcidr.example.com"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_key_pair" "kubernetes-additionalcidr-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.additionalcidr.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.additionalcidr.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
}
resource "aws_launch_template" "master-us-test-1a-masters-additionalcidr-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-additionalcidr-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-additionalcidr-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1a.masters.additionalcidr.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.masters-additionalcidr-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "master-us-test-1a.masters.additionalcidr.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "master-us-test-1a.masters.additionalcidr.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_master-us-test-1a.masters.additionalcidr.example.com_user_data")
}
resource "aws_launch_template" "master-us-test-1b-masters-additionalcidr-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-additionalcidr-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-additionalcidr-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1b.masters.additionalcidr.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.masters-additionalcidr-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "master-us-test-1b.masters.additionalcidr.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1b"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "master-us-test-1b.masters.additionalcidr.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1b"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_master-us-test-1b.masters.additionalcidr.example.com_user_data")
}
resource "aws_launch_template" "master-us-test-1c-masters-additionalcidr-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-additionalcidr-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-additionalcidr-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1c.masters.additionalcidr.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.masters-additionalcidr-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "master-us-test-1c.masters.additionalcidr.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1c"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "master-us-test-1c.masters.additionalcidr.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1c"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_master-us-test-1c.masters.additionalcidr.example.com_user_data")
}
resource "aws_launch_template" "nodes-additionalcidr-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 128
volume_type = "gp2"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-additionalcidr-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-additionalcidr-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "nodes.additionalcidr.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.nodes-additionalcidr-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "nodes.additionalcidr.example.com"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "nodes.additionalcidr.example.com"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_nodes.additionalcidr.example.com_user_data")
}
resource "aws_route_table_association" "us-test-1a-additionalcidr-example-com" {
route_table_id = aws_route_table.additionalcidr-example-com.id
subnet_id = aws_subnet.us-test-1a-additionalcidr-example-com.id
}
resource "aws_route_table_association" "us-test-1b-additionalcidr-example-com" {
route_table_id = aws_route_table.additionalcidr-example-com.id
subnet_id = aws_subnet.us-test-1b-additionalcidr-example-com.id
}
resource "aws_route_table_association" "us-test-1c-additionalcidr-example-com" {
route_table_id = aws_route_table.additionalcidr-example-com.id
subnet_id = aws_subnet.us-test-1c-additionalcidr-example-com.id
}
resource "aws_route_table" "additionalcidr-example-com" {
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "additionalcidr.example.com"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.additionalcidr-example-com.id
route_table_id = aws_route_table.additionalcidr-example-com.id
}
resource "aws_security_group_rule" "all-master-to-master" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
source_security_group_id = aws_security_group.masters-additionalcidr-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-master-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
source_security_group_id = aws_security_group.masters-additionalcidr-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-node-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
source_security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "https-external-to-master-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "master-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
source_security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
source_security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
source_security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
source_security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-additionalcidr-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-additionalcidr-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group" "masters-additionalcidr-example-com" {
description = "Security group for masters"
name = "masters.additionalcidr.example.com"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "masters.additionalcidr.example.com"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_security_group" "nodes-additionalcidr-example-com" {
description = "Security group for nodes"
name = "nodes.additionalcidr.example.com"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "nodes.additionalcidr.example.com"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_subnet" "us-test-1a-additionalcidr-example-com" {
availability_zone = "us-test-1a"
cidr_block = "10.0.1.0/24"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1a.additionalcidr.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_subnet" "us-test-1b-additionalcidr-example-com" {
availability_zone = "us-test-1b"
cidr_block = "10.1.1.0/24"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1b.additionalcidr.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_subnet" "us-test-1c-additionalcidr-example-com" {
availability_zone = "us-test-1c"
cidr_block = "10.1.2.0/24"
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "us-test-1c.additionalcidr.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_vpc_dhcp_options_association" "additionalcidr-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.additionalcidr-example-com.id
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_vpc_dhcp_options" "additionalcidr-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "additionalcidr.example.com"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
resource "aws_vpc_ipv4_cidr_block_association" "cidr-10-1-0-0--16" {
cidr_block = "10.1.0.0/16"
vpc_id = aws_vpc.additionalcidr-example-com.id
}
resource "aws_vpc" "additionalcidr-example-com" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "additionalcidr.example.com"
"Name" = "additionalcidr.example.com"
"kubernetes.io/cluster/additionalcidr.example.com" = "owned"
}
}
terraform {
required_version = ">= 0.12.0"
}

View File

@ -1,577 +0,0 @@
Resources.AWSEC2LaunchTemplatemasterustest1amastersadditionaluserdataexamplecom.Properties.LaunchTemplateData.UserData: |
Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
MIME-Version: 1.0
--MIMEBOUNDARY
Content-Disposition: attachment; filename="nodeup.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: additionaluserdata.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionaluserdata.example.com
ConfigBase: memfs://clusters.example.com/additionaluserdata.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionaluserdata.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/additionaluserdata.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/additionaluserdata.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
--MIMEBOUNDARY
Content-Disposition: attachment; filename="myscript.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/sh
echo "master: The time is now $(date -R)!" | tee /root/output.txt
--MIMEBOUNDARY--
Resources.AWSEC2LaunchTemplatenodesadditionaluserdataexamplecom.Properties.LaunchTemplateData.UserData: |
Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
MIME-Version: 1.0
--MIMEBOUNDARY
Content-Disposition: attachment; filename="nodeup.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: additionaluserdata.example.com
ConfigBase: memfs://clusters.example.com/additionaluserdata.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/additionaluserdata.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
--MIMEBOUNDARY
Content-Disposition: attachment; filename="myscript.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
--MIMEBOUNDARY--

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,99 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: additionaluserdata.example.com
spec:
additionalPolicies:
master: |
[
{
"Action": [ "s3:GetObject" ],
"Resource": [ "arn:aws:s3:::somebucket/someobject" ],
"Effect": "Allow"
}
]
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/additionaluserdata.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubelet:
anonymousAuth: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.additionaluserdata.example.com
masterPublicName: api.additionaluserdata.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: additionaluserdata.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
additionalUserData:
- name: myscript.sh
type: text/x-shellscript
content: |
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: additionaluserdata.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
additionalUserData:
- name: myscript.sh
type: text/x-shellscript
content: |
#!/bin/sh
echo "master: The time is now $(date -R)!" | tee /root/output.txt

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,102 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1,68 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,315 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
serviceNodePortRange: 28000-32767
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: crosszone.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: crosszone.example.com
ConfigBase: memfs://clusters.example.com/crosszone.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/crosszone.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/crosszone.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/crosszone.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,221 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: crosszone.example.com
ConfigBase: memfs://clusters.example.com/crosszone.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/crosszone.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,98 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: crosszone.example.com
spec:
api:
loadBalancer:
type: Public
additionalSecurityGroups:
- sg-exampleid3
- sg-exampleid4
crossZoneLoadBalancing: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
cloudLabels:
Owner: John Doe
foo/bar: fib+baz
configBase: memfs://clusters.example.com/crosszone.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubeAPIServer:
serviceNodePortRange: 28000-32767
kubelet:
anonymousAuth: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.crosszone.example.com
masterPublicName: api.crosszone.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nodePortAccess:
- 1.2.3.4/32
- 10.20.30.0/24
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: crosszone.example.com
spec:
additionalSecurityGroups:
- sg-exampleid3
- sg-exampleid4
associatePublicIp: true
suspendProcesses:
- AZRebalance
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
detailedInstanceMonitoring: true
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: crosszone.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -1,691 +0,0 @@
locals {
cluster_name = "crosszone.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-crosszone-example-com.id]
master_security_group_ids = [aws_security_group.masters-crosszone-example-com.id]
masters_role_arn = aws_iam_role.masters-crosszone-example-com.arn
masters_role_name = aws_iam_role.masters-crosszone-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-crosszone-example-com.id]
node_security_group_ids = [aws_security_group.nodes-crosszone-example-com.id, "sg-exampleid3", "sg-exampleid4"]
node_subnet_ids = [aws_subnet.us-test-1a-crosszone-example-com.id]
nodes_role_arn = aws_iam_role.nodes-crosszone-example-com.arn
nodes_role_name = aws_iam_role.nodes-crosszone-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.crosszone-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-crosszone-example-com.id
vpc_cidr_block = aws_vpc.crosszone-example-com.cidr_block
vpc_id = aws_vpc.crosszone-example-com.id
}
output "cluster_name" {
value = "crosszone.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-crosszone-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-crosszone-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-crosszone-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-crosszone-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-crosszone-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-crosszone-example-com.id, "sg-exampleid3", "sg-exampleid4"]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-crosszone-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-crosszone-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-crosszone-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.crosszone-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-crosszone-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.crosszone-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.crosszone-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_attachment" "master-us-test-1a-masters-crosszone-example-com" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-crosszone-example-com.id
elb = aws_elb.api-crosszone-example-com.id
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-crosszone-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-crosszone-example-com.id
version = aws_launch_template.master-us-test-1a-masters-crosszone-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.crosszone.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "crosszone.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.crosszone.example.com"
}
tag {
key = "Owner"
propagate_at_launch = true
value = "John Doe"
}
tag {
key = "foo/bar"
propagate_at_launch = true
value = "fib+baz"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/crosszone.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-crosszone-example-com.id]
}
resource "aws_autoscaling_group" "nodes-crosszone-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-crosszone-example-com.id
version = aws_launch_template.nodes-crosszone-example-com.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.crosszone.example.com"
suspended_processes = ["AZRebalance"]
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "crosszone.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.crosszone.example.com"
}
tag {
key = "Owner"
propagate_at_launch = true
value = "John Doe"
}
tag {
key = "foo/bar"
propagate_at_launch = true
value = "fib+baz"
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/crosszone.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-crosszone-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-crosszone-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "us-test-1a.etcd-events.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-crosszone-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "us-test-1a.etcd-main.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
type = "gp2"
}
resource "aws_elb" "api-crosszone-example-com" {
cross_zone_load_balancing = true
health_check {
healthy_threshold = 2
interval = 10
target = "SSL:443"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 443
instance_protocol = "TCP"
lb_port = 443
lb_protocol = "TCP"
ssl_certificate_id = ""
}
name = "api-crosszone-example-com-qhvtkl"
security_groups = [aws_security_group.api-elb-crosszone-example-com.id, "sg-exampleid3", "sg-exampleid4"]
subnets = [aws_subnet.us-test-1a-crosszone-example-com.id]
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "api.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "masters-crosszone-example-com" {
name = "masters.crosszone.example.com"
role = aws_iam_role.masters-crosszone-example-com.name
}
resource "aws_iam_instance_profile" "nodes-crosszone-example-com" {
name = "nodes.crosszone.example.com"
role = aws_iam_role.nodes-crosszone-example-com.name
}
resource "aws_iam_role_policy" "masters-crosszone-example-com" {
name = "masters.crosszone.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.crosszone.example.com_policy")
role = aws_iam_role.masters-crosszone-example-com.name
}
resource "aws_iam_role_policy" "nodes-crosszone-example-com" {
name = "nodes.crosszone.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.crosszone.example.com_policy")
role = aws_iam_role.nodes-crosszone-example-com.name
}
resource "aws_iam_role" "masters-crosszone-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.crosszone.example.com_policy")
name = "masters.crosszone.example.com"
}
resource "aws_iam_role" "nodes-crosszone-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.crosszone.example.com_policy")
name = "nodes.crosszone.example.com"
}
resource "aws_internet_gateway" "crosszone-example-com" {
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
vpc_id = aws_vpc.crosszone-example-com.id
}
resource "aws_key_pair" "kubernetes-crosszone-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.crosszone.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.crosszone.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
}
resource "aws_launch_template" "master-us-test-1a-masters-crosszone-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-crosszone-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-crosszone-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1a.masters.crosszone.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.masters-crosszone-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "master-us-test-1a.masters.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "master-us-test-1a.masters.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_master-us-test-1a.masters.crosszone.example.com_user_data")
}
resource "aws_launch_template" "nodes-crosszone-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 128
volume_type = "gp2"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-crosszone-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-crosszone-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "nodes.crosszone.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.nodes-crosszone-example-com.id, "sg-exampleid3", "sg-exampleid4"]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "nodes.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "nodes.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_nodes.crosszone.example.com_user_data")
}
resource "aws_route53_record" "api-crosszone-example-com" {
alias {
evaluate_target_health = false
name = aws_elb.api-crosszone-example-com.dns_name
zone_id = aws_elb.api-crosszone-example-com.zone_id
}
name = "api.crosszone.example.com"
type = "A"
zone_id = "/hostedzone/Z1AFAKE1ZON3YO"
}
resource "aws_route_table_association" "us-test-1a-crosszone-example-com" {
route_table_id = aws_route_table.crosszone-example-com.id
subnet_id = aws_subnet.us-test-1a-crosszone-example-com.id
}
resource "aws_route_table" "crosszone-example-com" {
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.crosszone-example-com.id
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.crosszone-example-com.id
route_table_id = aws_route_table.crosszone-example-com.id
}
resource "aws_security_group_rule" "all-master-to-master" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-crosszone-example-com.id
source_security_group_id = aws_security_group.masters-crosszone-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-master-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
source_security_group_id = aws_security_group.masters-crosszone-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-node-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
source_security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "api-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.api-elb-crosszone-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.api-elb-crosszone-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "https-elb-to-master" {
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-crosszone-example-com.id
source_security_group_id = aws_security_group.api-elb-crosszone-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 3
protocol = "icmp"
security_group_id = aws_security_group.api-elb-crosszone-example-com.id
to_port = 4
type = "ingress"
}
resource "aws_security_group_rule" "master-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-crosszone-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-crosszone-example-com.id
source_security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-crosszone-example-com.id
source_security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-crosszone-example-com.id
source_security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-crosszone-example-com.id
source_security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-tcp-external-to-node-1-2-3-4--32" {
cidr_blocks = ["1.2.3.4/32"]
from_port = 28000
protocol = "tcp"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-tcp-external-to-node-10-20-30-0--24" {
cidr_blocks = ["10.20.30.0/24"]
from_port = 28000
protocol = "tcp"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-udp-external-to-node-1-2-3-4--32" {
cidr_blocks = ["1.2.3.4/32"]
from_port = 28000
protocol = "udp"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-udp-external-to-node-10-20-30-0--24" {
cidr_blocks = ["10.20.30.0/24"]
from_port = 28000
protocol = "udp"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-crosszone-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-crosszone-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group" "api-elb-crosszone-example-com" {
description = "Security group for api ELB"
name = "api-elb.crosszone.example.com"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "api-elb.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
vpc_id = aws_vpc.crosszone-example-com.id
}
resource "aws_security_group" "masters-crosszone-example-com" {
description = "Security group for masters"
name = "masters.crosszone.example.com"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "masters.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
vpc_id = aws_vpc.crosszone-example-com.id
}
resource "aws_security_group" "nodes-crosszone-example-com" {
description = "Security group for nodes"
name = "nodes.crosszone.example.com"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "nodes.crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
vpc_id = aws_vpc.crosszone-example-com.id
}
resource "aws_subnet" "us-test-1a-crosszone-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "us-test-1a.crosszone.example.com"
"Owner" = "John Doe"
"SubnetType" = "Public"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.crosszone-example-com.id
}
resource "aws_vpc_dhcp_options_association" "crosszone-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.crosszone-example-com.id
vpc_id = aws_vpc.crosszone-example-com.id
}
resource "aws_vpc_dhcp_options" "crosszone-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
}
resource "aws_vpc" "crosszone-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "crosszone.example.com"
"Name" = "crosszone.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/crosszone.example.com" = "owned"
}
}
terraform {
required_version = ">= 0.12.0"
}

View File

@ -241,7 +241,6 @@
},
"ImageId": "ami-12345678",
"InstanceType": "m3.medium",
"KeyName": "kubernetes.complex.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57",
"NetworkInterfaces": [
{
"AssociatePublicIpAddress": true,
@ -356,7 +355,6 @@
},
"ImageId": "ami-12345678",
"InstanceType": "t2.medium",
"KeyName": "kubernetes.complex.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57",
"NetworkInterfaces": [
{
"AssociatePublicIpAddress": true,
@ -567,7 +565,7 @@
"IpProtocol": "-1"
}
},
"AWSEC2SecurityGroupIngresshttpsapielb00000": {
"AWSEC2SecurityGroupIngresshttpsapielb111024": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
@ -576,7 +574,19 @@
"FromPort": 443,
"ToPort": 443,
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0"
"CidrIp": "1.1.1.0/24"
}
},
"AWSEC2SecurityGroupIngresshttpsapielb20010850040": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "AWSEC2SecurityGroupapielbcomplexexamplecom"
},
"FromPort": 443,
"ToPort": 443,
"IpProtocol": "tcp",
"CidrIp": "2001:0:8500::/40"
}
},
"AWSEC2SecurityGroupIngresshttpselbtomaster": {
@ -593,7 +603,7 @@
"IpProtocol": "tcp"
}
},
"AWSEC2SecurityGroupIngressicmppmtuapielb00000": {
"AWSEC2SecurityGroupIngressicmppmtuapielb111024": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
@ -602,7 +612,19 @@
"FromPort": 3,
"ToPort": 4,
"IpProtocol": "icmp",
"CidrIp": "0.0.0.0/0"
"CidrIp": "1.1.1.0/24"
}
},
"AWSEC2SecurityGroupIngressicmppmtuapielb20010850040": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "AWSEC2SecurityGroupapielbcomplexexamplecom"
},
"FromPort": 3,
"ToPort": 4,
"IpProtocol": "icmp",
"CidrIp": "2001:0:8500::/40"
}
},
"AWSEC2SecurityGroupIngressnodeporttcpexternaltonode102030024": {
@ -709,7 +731,7 @@
"IpProtocol": "udp"
}
},
"AWSEC2SecurityGroupIngresssshexternaltomaster00000": {
"AWSEC2SecurityGroupIngresssshexternaltomaster111132": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
@ -718,10 +740,22 @@
"FromPort": 22,
"ToPort": 22,
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0"
"CidrIp": "1.1.1.1/32"
}
},
"AWSEC2SecurityGroupIngresssshexternaltonode00000": {
"AWSEC2SecurityGroupIngresssshexternaltomaster2001085a348": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom"
},
"FromPort": 22,
"ToPort": 22,
"IpProtocol": "tcp",
"CidrIp": "2001:0:85a3::/48"
}
},
"AWSEC2SecurityGroupIngresssshexternaltonode111132": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
@ -730,7 +764,19 @@
"FromPort": 22,
"ToPort": 22,
"IpProtocol": "tcp",
"CidrIp": "0.0.0.0/0"
"CidrIp": "1.1.1.1/32"
}
},
"AWSEC2SecurityGroupIngresssshexternaltonode2001085a348": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "AWSEC2SecurityGroupnodescomplexexamplecom"
},
"FromPort": 22,
"ToPort": 22,
"IpProtocol": "tcp",
"CidrIp": "2001:0:85a3::/48"
}
},
"AWSEC2SecurityGroupapielbcomplexexamplecom": {
@ -877,6 +923,15 @@
]
}
},
"AWSEC2VPCCidrBlock1010016": {
"Type": "AWS::EC2::VPCCidrBlock",
"Properties": {
"VpcId": {
"Ref": "AWSEC2VPCcomplexexamplecom"
},
"CidrBlock": "10.1.0.0/16"
}
},
"AWSEC2VPCDHCPOptionsAssociationcomplexexamplecom": {
"Type": "AWS::EC2::VPCDHCPOptionsAssociation",
"Properties": {
@ -1041,7 +1096,7 @@
"ConnectionSettings": {
"IdleTimeout": 300
},
"CrossZone": false,
"CrossZone": true,
"Tags": [
{
"Key": "KubernetesCluster",

View File

@ -1,4 +1,13 @@
Resources.AWSEC2LaunchTemplatemasterustest1amasterscomplexexamplecom.Properties.LaunchTemplateData.UserData: |
Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
MIME-Version: 1.0
--MIMEBOUNDARY
Content-Disposition: attachment; filename="nodeup.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
@ -315,7 +324,27 @@ Resources.AWSEC2LaunchTemplatemasterustest1amasterscomplexexamplecom.Properties.
download-release
echo "== nodeup node config done =="
--MIMEBOUNDARY
Content-Disposition: attachment; filename="myscript.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
--MIMEBOUNDARY--
Resources.AWSEC2LaunchTemplatenodescomplexexamplecom.Properties.LaunchTemplateData.UserData: |
Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
MIME-Version: 1.0
--MIMEBOUNDARY
Content-Disposition: attachment; filename="nodeup.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
@ -537,3 +566,14 @@ Resources.AWSEC2LaunchTemplatenodescomplexexamplecom.Properties.LaunchTemplateDa
download-release
echo "== nodeup node config done =="
--MIMEBOUNDARY
Content-Disposition: attachment; filename="myscript.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
--MIMEBOUNDARY--

View File

@ -1,3 +1,12 @@
Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
MIME-Version: 1.0
--MIMEBOUNDARY
Content-Disposition: attachment; filename="nodeup.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
@ -314,3 +323,14 @@ __EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
--MIMEBOUNDARY
Content-Disposition: attachment; filename="myscript.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
--MIMEBOUNDARY--

View File

@ -1,3 +1,12 @@
Content-Type: multipart/mixed; boundary="MIMEBOUNDARY"
MIME-Version: 1.0
--MIMEBOUNDARY
Content-Disposition: attachment; filename="nodeup.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
@ -219,3 +228,14 @@ __EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
--MIMEBOUNDARY
Content-Disposition: attachment; filename="myscript.sh"
Content-Transfer-Encoding: 7bit
Content-Type: text/x-shellscript
Mime-Version: 1.0
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
--MIMEBOUNDARY--

View File

@ -10,8 +10,10 @@ spec:
additionalSecurityGroups:
- sg-exampleid3
- sg-exampleid4
crossZoneLoadBalancing: true
kubernetesApiAccess:
- 0.0.0.0/0
- 1.1.1.0/24
- 2001:0:8500::/40
channel: stable
cloudProvider: aws
cloudLabels:
@ -36,6 +38,8 @@ spec:
masterInternalName: api.internal.complex.example.com
masterPublicName: api.complex.example.com
networkCIDR: 172.20.0.0/16
additionalNetworkCIDRs:
- 10.1.0.0/16
networking:
kubenet: {}
nodePortAccess:
@ -43,7 +47,9 @@ spec:
- 10.20.30.0/24
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- 1.1.1.1/32
- 2001:0:85a3::/48
sshKeyName: ""
topology:
masters: public
nodes: public
@ -83,6 +89,12 @@ spec:
deleteOnTermination: false
size: 20
type: gp2
additionalUserData:
- name: myscript.sh
type: text/x-shellscript
content: |
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
---
@ -102,3 +114,9 @@ spec:
role: Master
subnets:
- us-test-1a
additionalUserData:
- name: myscript.sh
type: text/x-shellscript
content: |
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt

View File

@ -10,8 +10,10 @@ spec:
additionalSecurityGroups:
- sg-exampleid3
- sg-exampleid4
crossZoneLoadBalancing: true
kubernetesApiAccess:
- 0.0.0.0/0
- 1.1.1.0/24
- 2001:0:8500::/40
channel: stable
cloudProvider: aws
cloudLabels:
@ -36,6 +38,8 @@ spec:
masterInternalName: api.internal.complex.example.com
masterPublicName: api.complex.example.com
networkCIDR: 172.20.0.0/16
additionalNetworkCIDRs:
- 10.1.0.0/16
networking:
kubenet: {}
nodePortAccess:
@ -43,7 +47,9 @@ spec:
- 10.20.30.0/24
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- 1.1.1.1/32
- 2001:0:85a3::/48
sshKeyName: ""
topology:
masters: public
nodes: public
@ -83,6 +89,12 @@ spec:
deleteOnTermination: false
size: 20
type: gp2
additionalUserData:
- name: myscript.sh
type: text/x-shellscript
content: |
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt
---
@ -102,3 +114,9 @@ spec:
role: Master
subnets:
- us-test-1a
additionalUserData:
- name: myscript.sh
type: text/x-shellscript
content: |
#!/bin/sh
echo "nodes: The time is now $(date -R)!" | tee /root/output.txt

View File

@ -215,7 +215,7 @@ resource "aws_ebs_volume" "us-test-1a-etcd-main-complex-example-com" {
}
resource "aws_elb" "api-complex-example-com" {
cross_zone_load_balancing = false
cross_zone_load_balancing = true
health_check {
healthy_threshold = 2
interval = 10
@ -286,11 +286,6 @@ resource "aws_internet_gateway" "complex-example-com" {
vpc_id = aws_vpc.complex-example-com.id
}
resource "aws_key_pair" "kubernetes-complex-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.complex.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.complex.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
}
resource "aws_launch_template" "master-us-test-1a-masters-complex-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
@ -309,7 +304,6 @@ resource "aws_launch_template" "master-us-test-1a-masters-complex-example-com" {
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-complex-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
@ -368,7 +362,6 @@ resource "aws_launch_template" "nodes-complex-example-com" {
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-complex-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
@ -475,8 +468,17 @@ resource "aws_security_group_rule" "api-elb-egress" {
type = "egress"
}
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
resource "aws_security_group_rule" "https-api-elb-1-1-1-0--24" {
cidr_blocks = ["1.1.1.0/24"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.api-elb-complex-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "https-api-elb-2001_0_8500__--40" {
cidr_blocks = ["2001:0:8500::/40"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.api-elb-complex-example-com.id
@ -493,8 +495,17 @@ resource "aws_security_group_rule" "https-elb-to-master" {
type = "ingress"
}
resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
resource "aws_security_group_rule" "icmp-pmtu-api-elb-1-1-1-0--24" {
cidr_blocks = ["1.1.1.0/24"]
from_port = 3
protocol = "icmp"
security_group_id = aws_security_group.api-elb-complex-example-com.id
to_port = 4
type = "ingress"
}
resource "aws_security_group_rule" "icmp-pmtu-api-elb-2001_0_8500__--40" {
cidr_blocks = ["2001:0:8500::/40"]
from_port = 3
protocol = "icmp"
security_group_id = aws_security_group.api-elb-complex-example-com.id
@ -592,8 +603,8 @@ resource "aws_security_group_rule" "nodeport-udp-external-to-node-10-20-30-0--24
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
resource "aws_security_group_rule" "ssh-external-to-master-1-1-1-1--32" {
cidr_blocks = ["1.1.1.1/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-complex-example-com.id
@ -601,8 +612,26 @@ resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
resource "aws_security_group_rule" "ssh-external-to-master-2001_0_85a3__--48" {
cidr_blocks = ["2001:0:85a3::/48"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-complex-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-1-1-1-1--32" {
cidr_blocks = ["1.1.1.1/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-complex-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-2001_0_85a3__--48" {
cidr_blocks = ["2001:0:85a3::/48"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-complex-example-com.id
@ -681,6 +710,11 @@ resource "aws_vpc_dhcp_options" "complex-example-com" {
}
}
resource "aws_vpc_ipv4_cidr_block_association" "cidr-10-1-0-0--16" {
cidr_block = "10.1.0.0/16"
vpc_id = aws_vpc.complex-example-com.id
}
resource "aws_vpc" "complex-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true

View File

@ -1,541 +0,0 @@
Resources.AWSEC2LaunchTemplatemasterustest1amastersnosshkeyexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 17.03.2
encryptionConfig: null
etcdClusters:
events:
image: k8s.gcr.io/etcd:2.2.1
version: 2.2.1
main:
image: k8s.gcr.io/etcd:2.2.1
version: 2.2.1
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- Initializers
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdQuorumRead: false
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.11.10
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd2
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: nosshkey.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.11.10
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.11.10
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.11.10
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
allowPrivileged: true
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
allowPrivileged: true
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- a1e5d2a7da4cabc29af0dda630564511a9b437d8@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubelet
- c133f55152c76c33d9b41894dcd311064904503e@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- e895202b1ff6beaee15232f0c3118f2dc0cd4862@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubelet
- b7b2bda2aff7a430a04c0e2b7a0ffe853ffb5288@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: nosshkey.example.com
ConfigBase: memfs://clusters.example.com/nosshkey.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/nosshkey.example.com/addons/bootstrap-channel.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSEC2LaunchTemplatenodesnosshkeyexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 17.03.2
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.11.10
logLevel: 2
kubelet:
allowPrivileged: true
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- a1e5d2a7da4cabc29af0dda630564511a9b437d8@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubelet
- c133f55152c76c33d9b41894dcd311064904503e@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- e895202b1ff6beaee15232f0c3118f2dc0cd4862@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubelet
- b7b2bda2aff7a430a04c0e2b7a0ffe853ffb5288@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: nosshkey.example.com
ConfigBase: memfs://clusters.example.com/nosshkey.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/nosshkey.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,80 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: nosshkey.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/nosshkey.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubelet:
anonymousAuth: false
kubernetesVersion: v1.11.10
masterInternalName: api.internal.nosshkey.example.com
masterPublicName: api.nosshkey.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
sshKeyName: ""
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: nosshkey.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: nosshkey.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,102 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1,68 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1,318 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 17.03.2
encryptionConfig: null
etcdClusters:
events:
image: k8s.gcr.io/etcd:2.2.1
version: 2.2.1
main:
image: k8s.gcr.io/etcd:2.2.1
version: 2.2.1
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- Initializers
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdQuorumRead: false
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.11.10
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
serviceNodePortRange: 28000-32767
storageBackend: etcd2
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: nosshkey.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.11.10
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.11.10
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.11.10
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
allowPrivileged: true
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
allowPrivileged: true
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- a1e5d2a7da4cabc29af0dda630564511a9b437d8@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubelet
- c133f55152c76c33d9b41894dcd311064904503e@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- e895202b1ff6beaee15232f0c3118f2dc0cd4862@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubelet
- b7b2bda2aff7a430a04c0e2b7a0ffe853ffb5288@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: nosshkey.example.com
ConfigBase: memfs://clusters.example.com/nosshkey.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/nosshkey.example.com/addons/bootstrap-channel.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,222 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 17.03.2
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.11.10
logLevel: 2
kubelet:
allowPrivileged: true
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- a1e5d2a7da4cabc29af0dda630564511a9b437d8@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubelet
- c133f55152c76c33d9b41894dcd311064904503e@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- e895202b1ff6beaee15232f0c3118f2dc0cd4862@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubelet
- b7b2bda2aff7a430a04c0e2b7a0ffe853ffb5288@https://storage.googleapis.com/kubernetes-release/release/v1.11.10/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: nosshkey.example.com
ConfigBase: memfs://clusters.example.com/nosshkey.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/nosshkey.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,98 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: nosshkey.example.com
spec:
api:
loadBalancer:
type: Public
additionalSecurityGroups:
- sg-exampleid3
- sg-exampleid4
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
cloudLabels:
Owner: John Doe
foo/bar: fib+baz
configBase: memfs://clusters.example.com/nosshkey.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubelet:
anonymousAuth: false
kubeAPIServer:
serviceNodePortRange: 28000-32767
kubernetesVersion: v1.11.10
masterInternalName: api.internal.nosshkey.example.com
masterPublicName: api.nosshkey.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nodePortAccess:
- 1.2.3.4/32
- 10.20.30.0/24
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
sshKeyName: ""
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: nosshkey.example.com
spec:
additionalSecurityGroups:
- sg-exampleid3
- sg-exampleid4
associatePublicIp: true
suspendProcesses:
- AZRebalance
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
detailedInstanceMonitoring: true
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: nosshkey.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -1,684 +0,0 @@
locals {
cluster_name = "nosshkey.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-nosshkey-example-com.id]
master_security_group_ids = [aws_security_group.masters-nosshkey-example-com.id]
masters_role_arn = aws_iam_role.masters-nosshkey-example-com.arn
masters_role_name = aws_iam_role.masters-nosshkey-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-nosshkey-example-com.id]
node_security_group_ids = [aws_security_group.nodes-nosshkey-example-com.id, "sg-exampleid3", "sg-exampleid4"]
node_subnet_ids = [aws_subnet.us-test-1a-nosshkey-example-com.id]
nodes_role_arn = aws_iam_role.nodes-nosshkey-example-com.arn
nodes_role_name = aws_iam_role.nodes-nosshkey-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.nosshkey-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-nosshkey-example-com.id
vpc_cidr_block = aws_vpc.nosshkey-example-com.cidr_block
vpc_id = aws_vpc.nosshkey-example-com.id
}
output "cluster_name" {
value = "nosshkey.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-nosshkey-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-nosshkey-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-nosshkey-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-nosshkey-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-nosshkey-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-nosshkey-example-com.id, "sg-exampleid3", "sg-exampleid4"]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-nosshkey-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-nosshkey-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-nosshkey-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.nosshkey-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-nosshkey-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.nosshkey-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.nosshkey-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_attachment" "master-us-test-1a-masters-nosshkey-example-com" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-nosshkey-example-com.id
elb = aws_elb.api-nosshkey-example-com.id
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-nosshkey-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-nosshkey-example-com.id
version = aws_launch_template.master-us-test-1a-masters-nosshkey-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.nosshkey.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "nosshkey.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.nosshkey.example.com"
}
tag {
key = "Owner"
propagate_at_launch = true
value = "John Doe"
}
tag {
key = "foo/bar"
propagate_at_launch = true
value = "fib+baz"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/nosshkey.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-nosshkey-example-com.id]
}
resource "aws_autoscaling_group" "nodes-nosshkey-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-nosshkey-example-com.id
version = aws_launch_template.nodes-nosshkey-example-com.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.nosshkey.example.com"
suspended_processes = ["AZRebalance"]
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "nosshkey.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.nosshkey.example.com"
}
tag {
key = "Owner"
propagate_at_launch = true
value = "John Doe"
}
tag {
key = "foo/bar"
propagate_at_launch = true
value = "fib+baz"
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/nosshkey.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-nosshkey-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-nosshkey-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "us-test-1a.etcd-events.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-nosshkey-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "us-test-1a.etcd-main.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
type = "gp2"
}
resource "aws_elb" "api-nosshkey-example-com" {
cross_zone_load_balancing = false
health_check {
healthy_threshold = 2
interval = 10
target = "SSL:443"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 443
instance_protocol = "TCP"
lb_port = 443
lb_protocol = "TCP"
ssl_certificate_id = ""
}
name = "api-nosshkey-example-com-bdulnp"
security_groups = [aws_security_group.api-elb-nosshkey-example-com.id, "sg-exampleid3", "sg-exampleid4"]
subnets = [aws_subnet.us-test-1a-nosshkey-example-com.id]
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "api.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "masters-nosshkey-example-com" {
name = "masters.nosshkey.example.com"
role = aws_iam_role.masters-nosshkey-example-com.name
}
resource "aws_iam_instance_profile" "nodes-nosshkey-example-com" {
name = "nodes.nosshkey.example.com"
role = aws_iam_role.nodes-nosshkey-example-com.name
}
resource "aws_iam_role_policy" "masters-nosshkey-example-com" {
name = "masters.nosshkey.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.nosshkey.example.com_policy")
role = aws_iam_role.masters-nosshkey-example-com.name
}
resource "aws_iam_role_policy" "nodes-nosshkey-example-com" {
name = "nodes.nosshkey.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.nosshkey.example.com_policy")
role = aws_iam_role.nodes-nosshkey-example-com.name
}
resource "aws_iam_role" "masters-nosshkey-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.nosshkey.example.com_policy")
name = "masters.nosshkey.example.com"
}
resource "aws_iam_role" "nodes-nosshkey-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.nosshkey.example.com_policy")
name = "nodes.nosshkey.example.com"
}
resource "aws_internet_gateway" "nosshkey-example-com" {
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
vpc_id = aws_vpc.nosshkey-example-com.id
}
resource "aws_launch_template" "master-us-test-1a-masters-nosshkey-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-nosshkey-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1a.masters.nosshkey.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.masters-nosshkey-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "master-us-test-1a.masters.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "master-us-test-1a.masters.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_master-us-test-1a.masters.nosshkey.example.com_user_data")
}
resource "aws_launch_template" "nodes-nosshkey-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 128
volume_type = "gp2"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-nosshkey-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
lifecycle {
create_before_destroy = true
}
name_prefix = "nodes.nosshkey.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.nodes-nosshkey-example-com.id, "sg-exampleid3", "sg-exampleid4"]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "nodes.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "nodes.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_nodes.nosshkey.example.com_user_data")
}
resource "aws_route53_record" "api-nosshkey-example-com" {
alias {
evaluate_target_health = false
name = aws_elb.api-nosshkey-example-com.dns_name
zone_id = aws_elb.api-nosshkey-example-com.zone_id
}
name = "api.nosshkey.example.com"
type = "A"
zone_id = "/hostedzone/Z1AFAKE1ZON3YO"
}
resource "aws_route_table_association" "us-test-1a-nosshkey-example-com" {
route_table_id = aws_route_table.nosshkey-example-com.id
subnet_id = aws_subnet.us-test-1a-nosshkey-example-com.id
}
resource "aws_route_table" "nosshkey-example-com" {
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.nosshkey-example-com.id
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.nosshkey-example-com.id
route_table_id = aws_route_table.nosshkey-example-com.id
}
resource "aws_security_group_rule" "all-master-to-master" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
source_security_group_id = aws_security_group.masters-nosshkey-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-master-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
source_security_group_id = aws_security_group.masters-nosshkey-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-node-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
source_security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "api-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.api-elb-nosshkey-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.api-elb-nosshkey-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "https-elb-to-master" {
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
source_security_group_id = aws_security_group.api-elb-nosshkey-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 3
protocol = "icmp"
security_group_id = aws_security_group.api-elb-nosshkey-example-com.id
to_port = 4
type = "ingress"
}
resource "aws_security_group_rule" "master-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
source_security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
source_security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
source_security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
source_security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-tcp-external-to-node-1-2-3-4--32" {
cidr_blocks = ["1.2.3.4/32"]
from_port = 28000
protocol = "tcp"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-tcp-external-to-node-10-20-30-0--24" {
cidr_blocks = ["10.20.30.0/24"]
from_port = 28000
protocol = "tcp"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-udp-external-to-node-1-2-3-4--32" {
cidr_blocks = ["1.2.3.4/32"]
from_port = 28000
protocol = "udp"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "nodeport-udp-external-to-node-10-20-30-0--24" {
cidr_blocks = ["10.20.30.0/24"]
from_port = 28000
protocol = "udp"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 32767
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-nosshkey-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-nosshkey-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group" "api-elb-nosshkey-example-com" {
description = "Security group for api ELB"
name = "api-elb.nosshkey.example.com"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "api-elb.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
vpc_id = aws_vpc.nosshkey-example-com.id
}
resource "aws_security_group" "masters-nosshkey-example-com" {
description = "Security group for masters"
name = "masters.nosshkey.example.com"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "masters.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
vpc_id = aws_vpc.nosshkey-example-com.id
}
resource "aws_security_group" "nodes-nosshkey-example-com" {
description = "Security group for nodes"
name = "nodes.nosshkey.example.com"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "nodes.nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
vpc_id = aws_vpc.nosshkey-example-com.id
}
resource "aws_subnet" "us-test-1a-nosshkey-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "us-test-1a.nosshkey.example.com"
"Owner" = "John Doe"
"SubnetType" = "Public"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.nosshkey-example-com.id
}
resource "aws_vpc_dhcp_options_association" "nosshkey-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.nosshkey-example-com.id
vpc_id = aws_vpc.nosshkey-example-com.id
}
resource "aws_vpc_dhcp_options" "nosshkey-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
}
resource "aws_vpc" "nosshkey-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "nosshkey.example.com"
"Name" = "nosshkey.example.com"
"Owner" = "John Doe"
"foo/bar" = "fib+baz"
"kubernetes.io/cluster/nosshkey.example.com" = "owned"
}
}
terraform {
required_version = ">= 0.12.0"
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,102 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1,68 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,314 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: restrictaccess.example.com
configureCloudRoutes: true
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: restrictaccess.example.com
ConfigBase: memfs://clusters.example.com/restrictaccess.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/restrictaccess.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/restrictaccess.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/restrictaccess.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,221 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH_AMD64=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
NODEUP_URL_ARM64=
NODEUP_HASH_ARM64=
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
ClusterName: restrictaccess.example.com
ConfigBase: memfs://clusters.example.com/restrictaccess.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/restrictaccess.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,81 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: restrictaccess.example.com
spec:
kubernetesApiAccess:
- 1.1.1.0/24
- 2001:0:8500::/40
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/restrictaccess.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubelet:
anonymousAuth: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.restrictaccess.example.com
masterPublicName: api.restrictaccess.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 1.1.1.1/32
- 2001:0:85a3::/48
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: restrictaccess.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: restrictaccess.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -1,550 +0,0 @@
locals {
cluster_name = "restrictaccess.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-restrictaccess-example-com.id]
master_security_group_ids = [aws_security_group.masters-restrictaccess-example-com.id]
masters_role_arn = aws_iam_role.masters-restrictaccess-example-com.arn
masters_role_name = aws_iam_role.masters-restrictaccess-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-restrictaccess-example-com.id]
node_security_group_ids = [aws_security_group.nodes-restrictaccess-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-restrictaccess-example-com.id]
nodes_role_arn = aws_iam_role.nodes-restrictaccess-example-com.arn
nodes_role_name = aws_iam_role.nodes-restrictaccess-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.restrictaccess-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-restrictaccess-example-com.id
vpc_cidr_block = aws_vpc.restrictaccess-example-com.cidr_block
vpc_id = aws_vpc.restrictaccess-example-com.id
}
output "cluster_name" {
value = "restrictaccess.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-restrictaccess-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-restrictaccess-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-restrictaccess-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-restrictaccess-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-restrictaccess-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-restrictaccess-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-restrictaccess-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-restrictaccess-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-restrictaccess-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.restrictaccess-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-restrictaccess-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.restrictaccess-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.restrictaccess-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-restrictaccess-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-restrictaccess-example-com.id
version = aws_launch_template.master-us-test-1a-masters-restrictaccess-example-com.latest_version
}
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.restrictaccess.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "restrictaccess.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.restrictaccess.example.com"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/restrictaccess.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-restrictaccess-example-com.id]
}
resource "aws_autoscaling_group" "nodes-restrictaccess-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-restrictaccess-example-com.id
version = aws_launch_template.nodes-restrictaccess-example-com.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.restrictaccess.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "restrictaccess.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.restrictaccess.example.com"
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/restrictaccess.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-restrictaccess-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-restrictaccess-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "us-test-1a.etcd-events.restrictaccess.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-restrictaccess-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "us-test-1a.etcd-main.restrictaccess.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
type = "gp2"
}
resource "aws_iam_instance_profile" "masters-restrictaccess-example-com" {
name = "masters.restrictaccess.example.com"
role = aws_iam_role.masters-restrictaccess-example-com.name
}
resource "aws_iam_instance_profile" "nodes-restrictaccess-example-com" {
name = "nodes.restrictaccess.example.com"
role = aws_iam_role.nodes-restrictaccess-example-com.name
}
resource "aws_iam_role_policy" "masters-restrictaccess-example-com" {
name = "masters.restrictaccess.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.restrictaccess.example.com_policy")
role = aws_iam_role.masters-restrictaccess-example-com.name
}
resource "aws_iam_role_policy" "nodes-restrictaccess-example-com" {
name = "nodes.restrictaccess.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.restrictaccess.example.com_policy")
role = aws_iam_role.nodes-restrictaccess-example-com.name
}
resource "aws_iam_role" "masters-restrictaccess-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.restrictaccess.example.com_policy")
name = "masters.restrictaccess.example.com"
}
resource "aws_iam_role" "nodes-restrictaccess-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.restrictaccess.example.com_policy")
name = "nodes.restrictaccess.example.com"
}
resource "aws_internet_gateway" "restrictaccess-example-com" {
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
vpc_id = aws_vpc.restrictaccess-example-com.id
}
resource "aws_key_pair" "kubernetes-restrictaccess-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.restrictaccess.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.restrictaccess.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
}
resource "aws_launch_template" "master-us-test-1a-masters-restrictaccess-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-restrictaccess-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-restrictaccess-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1a.masters.restrictaccess.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.masters-restrictaccess-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "master-us-test-1a.masters.restrictaccess.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "master-us-test-1a.masters.restrictaccess.example.com"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_master-us-test-1a.masters.restrictaccess.example.com_user_data")
}
resource "aws_launch_template" "nodes-restrictaccess-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 128
volume_type = "gp2"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-restrictaccess-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-restrictaccess-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "nodes.restrictaccess.example.com-"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.nodes-restrictaccess-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "nodes.restrictaccess.example.com"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "nodes.restrictaccess.example.com"
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
user_data = file("${path.module}/data/aws_launch_template_nodes.restrictaccess.example.com_user_data")
}
resource "aws_route_table_association" "us-test-1a-restrictaccess-example-com" {
route_table_id = aws_route_table.restrictaccess-example-com.id
subnet_id = aws_subnet.us-test-1a-restrictaccess-example-com.id
}
resource "aws_route_table" "restrictaccess-example-com" {
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.restrictaccess-example-com.id
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.restrictaccess-example-com.id
route_table_id = aws_route_table.restrictaccess-example-com.id
}
resource "aws_security_group_rule" "all-master-to-master" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
source_security_group_id = aws_security_group.masters-restrictaccess-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-master-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
source_security_group_id = aws_security_group.masters-restrictaccess-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-node-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
source_security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "https-external-to-master-1-1-1-0--24" {
cidr_blocks = ["1.1.1.0/24"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "https-external-to-master-2001_0_8500__--40" {
cidr_blocks = ["2001:0:8500::/40"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "master-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
source_security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
source_security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
source_security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
source_security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-master-1-1-1-1--32" {
cidr_blocks = ["1.1.1.1/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-master-2001_0_85a3__--48" {
cidr_blocks = ["2001:0:85a3::/48"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-restrictaccess-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-1-1-1-1--32" {
cidr_blocks = ["1.1.1.1/32"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-node-2001_0_85a3__--48" {
cidr_blocks = ["2001:0:85a3::/48"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-restrictaccess-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group" "masters-restrictaccess-example-com" {
description = "Security group for masters"
name = "masters.restrictaccess.example.com"
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "masters.restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
vpc_id = aws_vpc.restrictaccess-example-com.id
}
resource "aws_security_group" "nodes-restrictaccess-example-com" {
description = "Security group for nodes"
name = "nodes.restrictaccess.example.com"
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "nodes.restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
vpc_id = aws_vpc.restrictaccess-example-com.id
}
resource "aws_subnet" "us-test-1a-restrictaccess-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "us-test-1a.restrictaccess.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.restrictaccess-example-com.id
}
resource "aws_vpc_dhcp_options_association" "restrictaccess-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.restrictaccess-example-com.id
vpc_id = aws_vpc.restrictaccess-example-com.id
}
resource "aws_vpc_dhcp_options" "restrictaccess-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
resource "aws_vpc" "restrictaccess-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "restrictaccess.example.com"
"Name" = "restrictaccess.example.com"
"kubernetes.io/cluster/restrictaccess.example.com" = "owned"
}
}
terraform {
required_version = ">= 0.12.0"
}