Add some integration tests for cilium

This commit is contained in:
Ole Markus With 2020-04-16 15:06:25 +02:00
parent a7f631e7c9
commit d174faf116
29 changed files with 7109 additions and 0 deletions

View File

@ -230,6 +230,16 @@ func TestPrivateCalico(t *testing.T) {
newIntegrationTest("privatecalico.example.com", "privatecalico").withPrivate().runTestCloudformation(t)
}
func TestPrivateCilium(t *testing.T) {
newIntegrationTest("privatecilium.example.com", "privatecilium").withPrivate().runTestTerraformAWS(t)
newIntegrationTest("privatecilium.example.com", "privatecilium").withPrivate().runTestCloudformation(t)
}
func TestPrivateCiliumAdvanced(t *testing.T) {
newIntegrationTest("privateciliumadvanced.example.com", "privateciliumadvanced").withPrivate().runTestTerraformAWS(t)
newIntegrationTest("privateciliumadvanced.example.com", "privateciliumadvanced").withPrivate().runTestCloudformation(t)
}
// TestPrivateCanal runs the test on a configuration with private topology, canal networking
func TestPrivateCanal(t *testing.T) {
newIntegrationTest("privatecanal.example.com", "privatecanal").withPrivate().runTestTerraformAWS(t)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,497 @@
Resources.AWSAutoScalingLaunchConfigurationbastionprivateciliumexamplecom.Properties.UserData: ""
Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersprivateciliumexamplecom.Properties.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: privatecilium.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privatecilium.example.com
ConfigBase: memfs://clusters.example.com/privatecilium.example.com
InstanceGroupName: master-us-test-1a
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privatecilium.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/privatecilium.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/privatecilium.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSAutoScalingLaunchConfigurationnodesprivateciliumexamplecom.Properties.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privatecilium.example.com
ConfigBase: memfs://clusters.example.com/privatecilium.example.com
InstanceGroupName: nodes
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privatecilium.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,14 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
}
]
}

View File

@ -0,0 +1,102 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -0,0 +1,68 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,293 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: privatecilium.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privatecilium.example.com
ConfigBase: memfs://clusters.example.com/privatecilium.example.com
InstanceGroupName: master-us-test-1a
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privatecilium.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/privatecilium.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/privatecilium.example.com/manifests/etcd/events.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,201 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privatecilium.example.com
ConfigBase: memfs://clusters.example.com/privatecilium.example.com
InstanceGroupName: nodes
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privatecilium.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,101 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-12T04:13:14Z"
name: privatecilium.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/privatecilium.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubelet:
anonymousAuth: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.privatecilium.example.com
masterPublicName: api.privatecilium.example.com
networkCIDR: 172.20.0.0/16
networking:
cilium: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: private
nodes: private
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Private
zone: us-test-1a
- cidr: 172.20.4.0/22
name: utility-us-test-1a
type: Utility
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: privatecilium.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: nodes
labels:
kops.k8s.io/cluster: privatecilium.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-14T15:32:41Z"
name: bastion
labels:
kops.k8s.io/cluster: privatecilium.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.micro
maxSize: 1
minSize: 1
role: Bastion
subnets:
- utility-us-test-1a

View File

@ -0,0 +1,786 @@
locals {
bastion_autoscaling_group_ids = [aws_autoscaling_group.bastion-privatecilium-example-com.id]
bastion_security_group_ids = [aws_security_group.bastion-privatecilium-example-com.id]
bastions_role_arn = aws_iam_role.bastions-privatecilium-example-com.arn
bastions_role_name = aws_iam_role.bastions-privatecilium-example-com.name
cluster_name = "privatecilium.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-privatecilium-example-com.id]
master_security_group_ids = [aws_security_group.masters-privatecilium-example-com.id]
masters_role_arn = aws_iam_role.masters-privatecilium-example-com.arn
masters_role_name = aws_iam_role.masters-privatecilium-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-privatecilium-example-com.id]
node_security_group_ids = [aws_security_group.nodes-privatecilium-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-privatecilium-example-com.id]
nodes_role_arn = aws_iam_role.nodes-privatecilium-example-com.arn
nodes_role_name = aws_iam_role.nodes-privatecilium-example-com.name
region = "us-test-1"
route_table_private-us-test-1a_id = aws_route_table.private-us-test-1a-privatecilium-example-com.id
route_table_public_id = aws_route_table.privatecilium-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-privatecilium-example-com.id
subnet_utility-us-test-1a_id = aws_subnet.utility-us-test-1a-privatecilium-example-com.id
vpc_cidr_block = aws_vpc.privatecilium-example-com.cidr_block
vpc_id = aws_vpc.privatecilium-example-com.id
}
output "bastion_autoscaling_group_ids" {
value = [aws_autoscaling_group.bastion-privatecilium-example-com.id]
}
output "bastion_security_group_ids" {
value = [aws_security_group.bastion-privatecilium-example-com.id]
}
output "bastions_role_arn" {
value = aws_iam_role.bastions-privatecilium-example-com.arn
}
output "bastions_role_name" {
value = aws_iam_role.bastions-privatecilium-example-com.name
}
output "cluster_name" {
value = "privatecilium.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-privatecilium-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-privatecilium-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-privatecilium-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-privatecilium-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-privatecilium-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-privatecilium-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-privatecilium-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-privatecilium-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-privatecilium-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_private-us-test-1a_id" {
value = aws_route_table.private-us-test-1a-privatecilium-example-com.id
}
output "route_table_public_id" {
value = aws_route_table.privatecilium-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-privatecilium-example-com.id
}
output "subnet_utility-us-test-1a_id" {
value = aws_subnet.utility-us-test-1a-privatecilium-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.privatecilium-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.privatecilium-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_attachment" "bastion-privatecilium-example-com" {
autoscaling_group_name = aws_autoscaling_group.bastion-privatecilium-example-com.id
elb = aws_elb.bastion-privatecilium-example-com.id
}
resource "aws_autoscaling_attachment" "master-us-test-1a-masters-privatecilium-example-com" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-privatecilium-example-com.id
elb = aws_elb.api-privatecilium-example-com.id
}
resource "aws_autoscaling_group" "bastion-privatecilium-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_configuration = aws_launch_configuration.bastion-privatecilium-example-com.id
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "bastion.privatecilium.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "privatecilium.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "bastion.privatecilium.example.com"
}
tag {
key = "k8s.io/role/bastion"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "bastion"
}
tag {
key = "kubernetes.io/cluster/privatecilium.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.utility-us-test-1a-privatecilium-example-com.id]
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-privatecilium-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_configuration = aws_launch_configuration.master-us-test-1a-masters-privatecilium-example-com.id
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.privatecilium.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "privatecilium.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.privatecilium.example.com"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/privatecilium.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-privatecilium-example-com.id]
}
resource "aws_autoscaling_group" "nodes-privatecilium-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_configuration = aws_launch_configuration.nodes-privatecilium-example-com.id
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.privatecilium.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "privatecilium.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.privatecilium.example.com"
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/privatecilium.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-privatecilium-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-privatecilium-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "us-test-1a.etcd-events.privatecilium.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-privatecilium-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "us-test-1a.etcd-main.privatecilium.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
type = "gp2"
}
resource "aws_eip" "us-test-1a-privatecilium-example-com" {
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "us-test-1a.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
vpc = true
}
resource "aws_elb" "api-privatecilium-example-com" {
cross_zone_load_balancing = false
health_check {
healthy_threshold = 2
interval = 10
target = "SSL:443"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 443
instance_protocol = "TCP"
lb_port = 443
lb_protocol = "TCP"
ssl_certificate_id = ""
}
name = "api-privatecilium-example-fnt793"
security_groups = [aws_security_group.api-elb-privatecilium-example-com.id]
subnets = [aws_subnet.utility-us-test-1a-privatecilium-example-com.id]
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "api.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
}
resource "aws_elb" "bastion-privatecilium-example-com" {
health_check {
healthy_threshold = 2
interval = 10
target = "TCP:22"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 22
instance_protocol = "TCP"
lb_port = 22
lb_protocol = "TCP"
ssl_certificate_id = ""
}
name = "bastion-privatecilium-exa-l2ms01"
security_groups = [aws_security_group.bastion-elb-privatecilium-example-com.id]
subnets = [aws_subnet.utility-us-test-1a-privatecilium-example-com.id]
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "bastion.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "bastions-privatecilium-example-com" {
name = "bastions.privatecilium.example.com"
role = aws_iam_role.bastions-privatecilium-example-com.name
}
resource "aws_iam_instance_profile" "masters-privatecilium-example-com" {
name = "masters.privatecilium.example.com"
role = aws_iam_role.masters-privatecilium-example-com.name
}
resource "aws_iam_instance_profile" "nodes-privatecilium-example-com" {
name = "nodes.privatecilium.example.com"
role = aws_iam_role.nodes-privatecilium-example-com.name
}
resource "aws_iam_role_policy" "bastions-privatecilium-example-com" {
name = "bastions.privatecilium.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_bastions.privatecilium.example.com_policy")
role = aws_iam_role.bastions-privatecilium-example-com.name
}
resource "aws_iam_role_policy" "masters-privatecilium-example-com" {
name = "masters.privatecilium.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.privatecilium.example.com_policy")
role = aws_iam_role.masters-privatecilium-example-com.name
}
resource "aws_iam_role_policy" "nodes-privatecilium-example-com" {
name = "nodes.privatecilium.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.privatecilium.example.com_policy")
role = aws_iam_role.nodes-privatecilium-example-com.name
}
resource "aws_iam_role" "bastions-privatecilium-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_bastions.privatecilium.example.com_policy")
name = "bastions.privatecilium.example.com"
}
resource "aws_iam_role" "masters-privatecilium-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.privatecilium.example.com_policy")
name = "masters.privatecilium.example.com"
}
resource "aws_iam_role" "nodes-privatecilium-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.privatecilium.example.com_policy")
name = "nodes.privatecilium.example.com"
}
resource "aws_internet_gateway" "privatecilium-example-com" {
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_key_pair" "kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.privatecilium.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
}
resource "aws_launch_configuration" "bastion-privatecilium-example-com" {
associate_public_ip_address = true
enable_monitoring = false
iam_instance_profile = aws_iam_instance_profile.bastions-privatecilium-example-com.id
image_id = "ami-12345678"
instance_type = "t2.micro"
key_name = aws_key_pair.kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "bastion.privatecilium.example.com-"
root_block_device {
delete_on_termination = true
volume_size = 32
volume_type = "gp2"
}
security_groups = [aws_security_group.bastion-privatecilium-example-com.id]
}
resource "aws_launch_configuration" "master-us-test-1a-masters-privatecilium-example-com" {
associate_public_ip_address = false
enable_monitoring = false
ephemeral_block_device {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile = aws_iam_instance_profile.masters-privatecilium-example-com.id
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1a.masters.privatecilium.example.com-"
root_block_device {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
security_groups = [aws_security_group.masters-privatecilium-example-com.id]
user_data = file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.privatecilium.example.com_user_data")
}
resource "aws_launch_configuration" "nodes-privatecilium-example-com" {
associate_public_ip_address = false
enable_monitoring = false
iam_instance_profile = aws_iam_instance_profile.nodes-privatecilium-example-com.id
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "nodes.privatecilium.example.com-"
root_block_device {
delete_on_termination = true
volume_size = 128
volume_type = "gp2"
}
security_groups = [aws_security_group.nodes-privatecilium-example-com.id]
user_data = file("${path.module}/data/aws_launch_configuration_nodes.privatecilium.example.com_user_data")
}
resource "aws_nat_gateway" "us-test-1a-privatecilium-example-com" {
allocation_id = aws_eip.us-test-1a-privatecilium-example-com.id
subnet_id = aws_subnet.utility-us-test-1a-privatecilium-example-com.id
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "us-test-1a.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
}
resource "aws_route53_record" "api-privatecilium-example-com" {
alias {
evaluate_target_health = false
name = aws_elb.api-privatecilium-example-com.dns_name
zone_id = aws_elb.api-privatecilium-example-com.zone_id
}
name = "api.privatecilium.example.com"
type = "A"
zone_id = "/hostedzone/Z1AFAKE1ZON3YO"
}
resource "aws_route_table_association" "private-us-test-1a-privatecilium-example-com" {
route_table_id = aws_route_table.private-us-test-1a-privatecilium-example-com.id
subnet_id = aws_subnet.us-test-1a-privatecilium-example-com.id
}
resource "aws_route_table_association" "utility-us-test-1a-privatecilium-example-com" {
route_table_id = aws_route_table.privatecilium-example-com.id
subnet_id = aws_subnet.utility-us-test-1a-privatecilium-example-com.id
}
resource "aws_route_table" "private-us-test-1a-privatecilium-example-com" {
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "private-us-test-1a.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
"kubernetes.io/kops/role" = "private-us-test-1a"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_route_table" "privatecilium-example-com" {
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.privatecilium-example-com.id
route_table_id = aws_route_table.privatecilium-example-com.id
}
resource "aws_route" "route-private-us-test-1a-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.us-test-1a-privatecilium-example-com.id
route_table_id = aws_route_table.private-us-test-1a-privatecilium-example-com.id
}
resource "aws_security_group_rule" "all-master-to-master" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
source_security_group_id = aws_security_group.masters-privatecilium-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-master-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-privatecilium-example-com.id
source_security_group_id = aws_security_group.masters-privatecilium-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-node-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-privatecilium-example-com.id
source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "api-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.api-elb-privatecilium-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.bastion-privatecilium-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.bastion-elb-privatecilium-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-to-master-ssh" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
source_security_group_id = aws_security_group.bastion-privatecilium-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "bastion-to-node-ssh" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-privatecilium-example-com.id
source_security_group_id = aws_security_group.bastion-privatecilium-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.api-elb-privatecilium-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "https-elb-to-master" {
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
source_security_group_id = aws_security_group.api-elb-privatecilium-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 3
protocol = "icmp"
security_group_id = aws_security_group.api-elb-privatecilium-example-com.id
to_port = 4
type = "ingress"
}
resource "aws_security_group_rule" "master-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-privatecilium-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-privatecilium-example-com.id
source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.bastion-privatecilium-example-com.id
source_security_group_id = aws_security_group.bastion-elb-privatecilium-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-bastion-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.bastion-elb-privatecilium-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group" "api-elb-privatecilium-example-com" {
description = "Security group for api ELB"
name = "api-elb.privatecilium.example.com"
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "api-elb.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_security_group" "bastion-elb-privatecilium-example-com" {
description = "Security group for bastion ELB"
name = "bastion-elb.privatecilium.example.com"
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "bastion-elb.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_security_group" "bastion-privatecilium-example-com" {
description = "Security group for bastion"
name = "bastion.privatecilium.example.com"
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "bastion.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_security_group" "masters-privatecilium-example-com" {
description = "Security group for masters"
name = "masters.privatecilium.example.com"
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "masters.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_security_group" "nodes-privatecilium-example-com" {
description = "Security group for nodes"
name = "nodes.privatecilium.example.com"
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "nodes.privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_subnet" "us-test-1a-privatecilium-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "us-test-1a.privatecilium.example.com"
"SubnetType" = "Private"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_subnet" "utility-us-test-1a-privatecilium-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.4.0/22"
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "utility-us-test-1a.privatecilium.example.com"
"SubnetType" = "Utility"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_vpc_dhcp_options_association" "privatecilium-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.privatecilium-example-com.id
vpc_id = aws_vpc.privatecilium-example-com.id
}
resource "aws_vpc_dhcp_options" "privatecilium-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
}
resource "aws_vpc" "privatecilium-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "privatecilium.example.com"
"Name" = "privatecilium.example.com"
"kubernetes.io/cluster/privatecilium.example.com" = "owned"
}
}
terraform {
required_version = ">= 0.12.0"
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,502 @@
Resources.AWSAutoScalingLaunchConfigurationbastionprivateciliumadvancedexamplecom.Properties.UserData: ""
Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersprivateciliumadvancedexamplecom.Properties.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
cilium:
version: 3.3.10
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: privateciliumadvanced.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
enabled: false
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privateciliumadvanced.example.com
ConfigBase: memfs://clusters.example.com/privateciliumadvanced.example.com
InstanceGroupName: master-us-test-1a
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privateciliumadvanced.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/privateciliumadvanced.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/privateciliumadvanced.example.com/manifests/etcd/events.yaml
- memfs://clusters.example.com/privateciliumadvanced.example.com/manifests/etcd/cilium.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSAutoScalingLaunchConfigurationnodesprivateciliumadvancedexamplecom.Properties.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
enabled: false
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privateciliumadvanced.example.com
ConfigBase: memfs://clusters.example.com/privateciliumadvanced.example.com
InstanceGroupName: nodes
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privateciliumadvanced.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,14 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
}
]
}

View File

@ -0,0 +1,102 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
"ec2:DescribeLaunchTemplateVersions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:*"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -0,0 +1,68 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Effect": "Allow",
"Action": [
"route53:GetChange"
],
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ecr:GetAuthorizationToken",
"ecr:BatchCheckLayerAvailability",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:DescribeRepositories",
"ecr:ListImages",
"ecr:BatchGetImage"
],
"Resource": [
"*"
]
}
]
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,297 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
cilium:
version: 3.3.10
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: privateciliumadvanced.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
enabled: false
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privateciliumadvanced.example.com
ConfigBase: memfs://clusters.example.com/privateciliumadvanced.example.com
InstanceGroupName: master-us-test-1a
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privateciliumadvanced.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/privateciliumadvanced.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/privateciliumadvanced.example.com/manifests/etcd/events.yaml
- memfs://clusters.example.com/privateciliumadvanced.example.com/manifests/etcd/cilium.yaml
protokubeImage:
hash: 42a9c4324fe26d63ce11f3dd7836371bc93fa06ca8f479807728f3746e27061b
name: protokube:1.15.0
sources:
- https://artifacts.k8s.io/binaries/kops/1.15.0/images/protokube.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.15.0/images-protokube.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.15.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,202 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-nodeup,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/nodeup
NODEUP_HASH=9604ef18267ad7b5cf4cebbf7ab64423cf5bb0342d169c608ac6376e6af26d81
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: warn
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
enabled: false
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 52e9d2de8a5f927307d9397308735658ee44ab8d@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 71b7bc444ba0a5f7cd7a36e91b594c1c3d13890e160d85e0dfde38c46a24e416@https://artifacts.k8s.io/binaries/kops/1.15.0/linux/amd64/utils.tar.gz,https://github.com/kubernetes/kops/releases/download/v1.15.0/linux-amd64-utils.tar.gz,https://kubeupv2.s3.amazonaws.com/kops/1.15.0/linux/amd64/utils.tar.gz
ClusterName: privateciliumadvanced.example.com
ConfigBase: memfs://clusters.example.com/privateciliumadvanced.example.com
InstanceGroupName: nodes
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/privateciliumadvanced.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,111 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-12T04:13:14Z"
name: privateciliumadvanced.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/privateciliumadvanced.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: cilium
kubelet:
anonymousAuth: false
kubeProxy:
enabled: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.privateciliumadvanced.example.com
masterPublicName: api.privateciliumadvanced.example.com
networkCIDR: 172.20.0.0/16
networking:
cilium:
enableNodePort: true
etcdManaged: true
disableMasquerade: true
ipam: eni
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: private
nodes: private
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Private
zone: us-test-1a
- cidr: 172.20.4.0/22
name: utility-us-test-1a
type: Utility
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: privateciliumadvanced.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: nodes
labels:
kops.k8s.io/cluster: privateciliumadvanced.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-14T15:32:41Z"
name: bastion
labels:
kops.k8s.io/cluster: privateciliumadvanced.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.micro
maxSize: 1
minSize: 1
role: Bastion
subnets:
- utility-us-test-1a

View File

@ -0,0 +1,800 @@
locals {
bastion_autoscaling_group_ids = [aws_autoscaling_group.bastion-privateciliumadvanced-example-com.id]
bastion_security_group_ids = [aws_security_group.bastion-privateciliumadvanced-example-com.id]
bastions_role_arn = aws_iam_role.bastions-privateciliumadvanced-example-com.arn
bastions_role_name = aws_iam_role.bastions-privateciliumadvanced-example-com.name
cluster_name = "privateciliumadvanced.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-privateciliumadvanced-example-com.id]
master_security_group_ids = [aws_security_group.masters-privateciliumadvanced-example-com.id]
masters_role_arn = aws_iam_role.masters-privateciliumadvanced-example-com.arn
masters_role_name = aws_iam_role.masters-privateciliumadvanced-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-privateciliumadvanced-example-com.id]
node_security_group_ids = [aws_security_group.nodes-privateciliumadvanced-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-privateciliumadvanced-example-com.id]
nodes_role_arn = aws_iam_role.nodes-privateciliumadvanced-example-com.arn
nodes_role_name = aws_iam_role.nodes-privateciliumadvanced-example-com.name
region = "us-test-1"
route_table_private-us-test-1a_id = aws_route_table.private-us-test-1a-privateciliumadvanced-example-com.id
route_table_public_id = aws_route_table.privateciliumadvanced-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-privateciliumadvanced-example-com.id
subnet_utility-us-test-1a_id = aws_subnet.utility-us-test-1a-privateciliumadvanced-example-com.id
vpc_cidr_block = aws_vpc.privateciliumadvanced-example-com.cidr_block
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
output "bastion_autoscaling_group_ids" {
value = [aws_autoscaling_group.bastion-privateciliumadvanced-example-com.id]
}
output "bastion_security_group_ids" {
value = [aws_security_group.bastion-privateciliumadvanced-example-com.id]
}
output "bastions_role_arn" {
value = aws_iam_role.bastions-privateciliumadvanced-example-com.arn
}
output "bastions_role_name" {
value = aws_iam_role.bastions-privateciliumadvanced-example-com.name
}
output "cluster_name" {
value = "privateciliumadvanced.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-privateciliumadvanced-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-privateciliumadvanced-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-privateciliumadvanced-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-privateciliumadvanced-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-privateciliumadvanced-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-privateciliumadvanced-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-privateciliumadvanced-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-privateciliumadvanced-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-privateciliumadvanced-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_private-us-test-1a_id" {
value = aws_route_table.private-us-test-1a-privateciliumadvanced-example-com.id
}
output "route_table_public_id" {
value = aws_route_table.privateciliumadvanced-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-privateciliumadvanced-example-com.id
}
output "subnet_utility-us-test-1a_id" {
value = aws_subnet.utility-us-test-1a-privateciliumadvanced-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.privateciliumadvanced-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.privateciliumadvanced-example-com.id
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_attachment" "bastion-privateciliumadvanced-example-com" {
autoscaling_group_name = aws_autoscaling_group.bastion-privateciliumadvanced-example-com.id
elb = aws_elb.bastion-privateciliumadvanced-example-com.id
}
resource "aws_autoscaling_attachment" "master-us-test-1a-masters-privateciliumadvanced-example-com" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-privateciliumadvanced-example-com.id
elb = aws_elb.api-privateciliumadvanced-example-com.id
}
resource "aws_autoscaling_group" "bastion-privateciliumadvanced-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_configuration = aws_launch_configuration.bastion-privateciliumadvanced-example-com.id
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "bastion.privateciliumadvanced.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "privateciliumadvanced.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "bastion.privateciliumadvanced.example.com"
}
tag {
key = "k8s.io/role/bastion"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "bastion"
}
tag {
key = "kubernetes.io/cluster/privateciliumadvanced.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.utility-us-test-1a-privateciliumadvanced-example-com.id]
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-privateciliumadvanced-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_configuration = aws_launch_configuration.master-us-test-1a-masters-privateciliumadvanced-example-com.id
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.privateciliumadvanced.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "privateciliumadvanced.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.privateciliumadvanced.example.com"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/privateciliumadvanced.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-privateciliumadvanced-example-com.id]
}
resource "aws_autoscaling_group" "nodes-privateciliumadvanced-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_configuration = aws_launch_configuration.nodes-privateciliumadvanced-example-com.id
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.privateciliumadvanced.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "privateciliumadvanced.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.privateciliumadvanced.example.com"
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/privateciliumadvanced.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-privateciliumadvanced-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-cilium-privateciliumadvanced-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "us-test-1a.etcd-cilium.privateciliumadvanced.example.com"
"k8s.io/etcd/cilium" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-privateciliumadvanced-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "us-test-1a.etcd-events.privateciliumadvanced.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-privateciliumadvanced-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "us-test-1a.etcd-main.privateciliumadvanced.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
type = "gp2"
}
resource "aws_eip" "us-test-1a-privateciliumadvanced-example-com" {
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "us-test-1a.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
vpc = true
}
resource "aws_elb" "api-privateciliumadvanced-example-com" {
cross_zone_load_balancing = false
health_check {
healthy_threshold = 2
interval = 10
target = "SSL:443"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 443
instance_protocol = "TCP"
lb_port = 443
lb_protocol = "TCP"
ssl_certificate_id = ""
}
name = "api-privateciliumadvanced-0cffmm"
security_groups = [aws_security_group.api-elb-privateciliumadvanced-example-com.id]
subnets = [aws_subnet.utility-us-test-1a-privateciliumadvanced-example-com.id]
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "api.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
}
resource "aws_elb" "bastion-privateciliumadvanced-example-com" {
health_check {
healthy_threshold = 2
interval = 10
target = "TCP:22"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 22
instance_protocol = "TCP"
lb_port = 22
lb_protocol = "TCP"
ssl_certificate_id = ""
}
name = "bastion-privateciliumadva-0jni40"
security_groups = [aws_security_group.bastion-elb-privateciliumadvanced-example-com.id]
subnets = [aws_subnet.utility-us-test-1a-privateciliumadvanced-example-com.id]
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "bastion.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "bastions-privateciliumadvanced-example-com" {
name = "bastions.privateciliumadvanced.example.com"
role = aws_iam_role.bastions-privateciliumadvanced-example-com.name
}
resource "aws_iam_instance_profile" "masters-privateciliumadvanced-example-com" {
name = "masters.privateciliumadvanced.example.com"
role = aws_iam_role.masters-privateciliumadvanced-example-com.name
}
resource "aws_iam_instance_profile" "nodes-privateciliumadvanced-example-com" {
name = "nodes.privateciliumadvanced.example.com"
role = aws_iam_role.nodes-privateciliumadvanced-example-com.name
}
resource "aws_iam_role_policy" "bastions-privateciliumadvanced-example-com" {
name = "bastions.privateciliumadvanced.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_bastions.privateciliumadvanced.example.com_policy")
role = aws_iam_role.bastions-privateciliumadvanced-example-com.name
}
resource "aws_iam_role_policy" "masters-privateciliumadvanced-example-com" {
name = "masters.privateciliumadvanced.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.privateciliumadvanced.example.com_policy")
role = aws_iam_role.masters-privateciliumadvanced-example-com.name
}
resource "aws_iam_role_policy" "nodes-privateciliumadvanced-example-com" {
name = "nodes.privateciliumadvanced.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.privateciliumadvanced.example.com_policy")
role = aws_iam_role.nodes-privateciliumadvanced-example-com.name
}
resource "aws_iam_role" "bastions-privateciliumadvanced-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_bastions.privateciliumadvanced.example.com_policy")
name = "bastions.privateciliumadvanced.example.com"
}
resource "aws_iam_role" "masters-privateciliumadvanced-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.privateciliumadvanced.example.com_policy")
name = "masters.privateciliumadvanced.example.com"
}
resource "aws_iam_role" "nodes-privateciliumadvanced-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.privateciliumadvanced.example.com_policy")
name = "nodes.privateciliumadvanced.example.com"
}
resource "aws_internet_gateway" "privateciliumadvanced-example-com" {
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_key_pair" "kubernetes-privateciliumadvanced-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.privateciliumadvanced.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.privateciliumadvanced.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
}
resource "aws_launch_configuration" "bastion-privateciliumadvanced-example-com" {
associate_public_ip_address = true
enable_monitoring = false
iam_instance_profile = aws_iam_instance_profile.bastions-privateciliumadvanced-example-com.id
image_id = "ami-12345678"
instance_type = "t2.micro"
key_name = aws_key_pair.kubernetes-privateciliumadvanced-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "bastion.privateciliumadvanced.example.com-"
root_block_device {
delete_on_termination = true
volume_size = 32
volume_type = "gp2"
}
security_groups = [aws_security_group.bastion-privateciliumadvanced-example-com.id]
}
resource "aws_launch_configuration" "master-us-test-1a-masters-privateciliumadvanced-example-com" {
associate_public_ip_address = false
enable_monitoring = false
ephemeral_block_device {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile = aws_iam_instance_profile.masters-privateciliumadvanced-example-com.id
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-privateciliumadvanced-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "master-us-test-1a.masters.privateciliumadvanced.example.com-"
root_block_device {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
security_groups = [aws_security_group.masters-privateciliumadvanced-example-com.id]
user_data = file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.privateciliumadvanced.example.com_user_data")
}
resource "aws_launch_configuration" "nodes-privateciliumadvanced-example-com" {
associate_public_ip_address = false
enable_monitoring = false
iam_instance_profile = aws_iam_instance_profile.nodes-privateciliumadvanced-example-com.id
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-privateciliumadvanced-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name_prefix = "nodes.privateciliumadvanced.example.com-"
root_block_device {
delete_on_termination = true
volume_size = 128
volume_type = "gp2"
}
security_groups = [aws_security_group.nodes-privateciliumadvanced-example-com.id]
user_data = file("${path.module}/data/aws_launch_configuration_nodes.privateciliumadvanced.example.com_user_data")
}
resource "aws_nat_gateway" "us-test-1a-privateciliumadvanced-example-com" {
allocation_id = aws_eip.us-test-1a-privateciliumadvanced-example-com.id
subnet_id = aws_subnet.utility-us-test-1a-privateciliumadvanced-example-com.id
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "us-test-1a.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
}
resource "aws_route53_record" "api-privateciliumadvanced-example-com" {
alias {
evaluate_target_health = false
name = aws_elb.api-privateciliumadvanced-example-com.dns_name
zone_id = aws_elb.api-privateciliumadvanced-example-com.zone_id
}
name = "api.privateciliumadvanced.example.com"
type = "A"
zone_id = "/hostedzone/Z1AFAKE1ZON3YO"
}
resource "aws_route_table_association" "private-us-test-1a-privateciliumadvanced-example-com" {
route_table_id = aws_route_table.private-us-test-1a-privateciliumadvanced-example-com.id
subnet_id = aws_subnet.us-test-1a-privateciliumadvanced-example-com.id
}
resource "aws_route_table_association" "utility-us-test-1a-privateciliumadvanced-example-com" {
route_table_id = aws_route_table.privateciliumadvanced-example-com.id
subnet_id = aws_subnet.utility-us-test-1a-privateciliumadvanced-example-com.id
}
resource "aws_route_table" "private-us-test-1a-privateciliumadvanced-example-com" {
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "private-us-test-1a.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
"kubernetes.io/kops/role" = "private-us-test-1a"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_route_table" "privateciliumadvanced-example-com" {
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.privateciliumadvanced-example-com.id
route_table_id = aws_route_table.privateciliumadvanced-example-com.id
}
resource "aws_route" "route-private-us-test-1a-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.us-test-1a-privateciliumadvanced-example-com.id
route_table_id = aws_route_table.private-us-test-1a-privateciliumadvanced-example-com.id
}
resource "aws_security_group_rule" "all-master-to-master" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-master-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "all-node-to-node" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "api-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.api-elb-privateciliumadvanced-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.bastion-privateciliumadvanced-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.bastion-elb-privateciliumadvanced-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-to-master-ssh" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.bastion-privateciliumadvanced-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "bastion-to-node-ssh" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.bastion-privateciliumadvanced-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.api-elb-privateciliumadvanced-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "https-elb-to-master" {
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.api-elb-privateciliumadvanced-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 3
protocol = "icmp"
security_group_id = aws_security_group.api-elb-privateciliumadvanced-example-com.id
to_port = 4
type = "ingress"
}
resource "aws_security_group_rule" "master-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-2383-4000" {
from_port = 2383
protocol = "tcp"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.nodes-privateciliumadvanced-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.bastion-privateciliumadvanced-example-com.id
source_security_group_id = aws_security_group.bastion-elb-privateciliumadvanced-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-bastion-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.bastion-elb-privateciliumadvanced-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group" "api-elb-privateciliumadvanced-example-com" {
description = "Security group for api ELB"
name = "api-elb.privateciliumadvanced.example.com"
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "api-elb.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_security_group" "bastion-elb-privateciliumadvanced-example-com" {
description = "Security group for bastion ELB"
name = "bastion-elb.privateciliumadvanced.example.com"
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "bastion-elb.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_security_group" "bastion-privateciliumadvanced-example-com" {
description = "Security group for bastion"
name = "bastion.privateciliumadvanced.example.com"
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "bastion.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_security_group" "masters-privateciliumadvanced-example-com" {
description = "Security group for masters"
name = "masters.privateciliumadvanced.example.com"
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "masters.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_security_group" "nodes-privateciliumadvanced-example-com" {
description = "Security group for nodes"
name = "nodes.privateciliumadvanced.example.com"
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "nodes.privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_subnet" "us-test-1a-privateciliumadvanced-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "us-test-1a.privateciliumadvanced.example.com"
"SubnetType" = "Private"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_subnet" "utility-us-test-1a-privateciliumadvanced-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.4.0/22"
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "utility-us-test-1a.privateciliumadvanced.example.com"
"SubnetType" = "Utility"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_vpc_dhcp_options_association" "privateciliumadvanced-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.privateciliumadvanced-example-com.id
vpc_id = aws_vpc.privateciliumadvanced-example-com.id
}
resource "aws_vpc_dhcp_options" "privateciliumadvanced-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
}
resource "aws_vpc" "privateciliumadvanced-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "privateciliumadvanced.example.com"
"Name" = "privateciliumadvanced.example.com"
"kubernetes.io/cluster/privateciliumadvanced.example.com" = "owned"
}
}
terraform {
required_version = ">= 0.12.0"
}