Merge pull request #10374 from hakman/eip-egress

Add option to reuse existing Elastic IPs for NAT gateways
This commit is contained in:
Kubernetes Prow Robot 2020-12-06 04:55:25 -08:00 committed by GitHub
commit e68a4648b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 4034 additions and 4 deletions

View File

@ -55,6 +55,10 @@ func (m *MockEC2) AllocateAddressWithId(request *ec2.AllocateAddressInput, id st
Domain: s("vpc"),
PublicIp: s(publicIP.String()),
}
if request.Address != nil {
address.PublicIp = request.Address
}
if m.Addresses == nil {
m.Addresses = make(map[string]*ec2.Address)
}
@ -151,6 +155,7 @@ func (m *MockEC2) DescribeAddresses(request *ec2.DescribeAddressesInput) (*ec2.D
}
copy := *address
copy.Tags = m.getTags(ec2.ResourceTypeElasticIp, *address.AllocationId)
addresses = append(addresses, &copy)
}

View File

@ -259,6 +259,12 @@ func TestPrivateSharedSubnet(t *testing.T) {
newIntegrationTest("private-shared-subnet.example.com", "private-shared-subnet").withPrivate().runTestTerraformAWS(t)
}
// TestPrivateSharedIP runs the test on a configuration with private topology & shared subnets
func TestPrivateSharedIP(t *testing.T) {
newIntegrationTest("private-shared-ip.example.com", "private-shared-ip").withPrivate().runTestTerraformAWS(t)
newIntegrationTest("private-shared-ip.example.com", "private-shared-ip").withPrivate().runTestCloudformation(t)
}
// TestPrivateDns1 runs the test on a configuration with private topology, private dns
func TestPrivateDns1(t *testing.T) {
newIntegrationTest("privatedns1.example.com", "privatedns1").withPrivate().runTestTerraformAWS(t)

View File

@ -140,6 +140,15 @@ func TestLifecyclePrivateSharedSubnet(t *testing.T) {
})
}
// TestLifecyclePrivateSharedIP runs the test on a subnet with private topology and shared IP
func TestLifecyclePrivateSharedIP(t *testing.T) {
runLifecycleTestAWS(&LifecycleTestOptions{
t: t,
SrcDir: "private-shared-ip",
Shared: []string{"eipalloc-12345678"},
})
}
func runLifecycleTest(h *testutils.IntegrationTestHarness, o *LifecycleTestOptions, cloud *awsup.MockAWSCloud) {
ctx := context.Background()

View File

@ -240,6 +240,18 @@ spec:
zone: us-east-1a
```
In the case that you don't want to use an existing NAT gateway, but still want to use a pre-allocated elastic IP, kOps 1.19.0 introduced the possibility to specify an elastic IP as egress and kOps will create a NAT gateway that uses it.
```yaml
spec:
subnets:
- cidr: 10.20.64.0/21
name: us-east-1a
egress: eipalloc-0123456789abcdef0
type: Private
zone: us-east-1a
```
In the case that you don't use NAT gateways or internet gateways, kOps 1.12.0 introduced the "External" flag for egress to force kOps to ignore egress for the subnet. This can be useful when other tools are used to manage egress for the subnet such as virtual private gateways. Please note that your cluster may need to have access to the internet upon creation, so egress must be available upon initializing a cluster. This is intended for use when egress is managed external to kOps, typically with an existing cluster.
```yaml

View File

@ -551,8 +551,16 @@ const (
SubnetTypeUtility SubnetType = "Utility"
)
// EgressExternal means that egress configuration is done externally (preconfigured)
const EgressExternal = "External"
const (
// EgressNatGateway means that egress configuration is using an existing NAT Gateway
EgressNatGateway = "nat"
// EgressElasticIP means that egress configuration is using a NAT Gateway with an existing Elastic IP
EgressElasticIP = "eipalloc"
// EgressElasticIP means that egress configuration is using an existing NAT Instance
EgressNatInstance = "i"
// EgressExternal means that egress configuration is done externally (preconfigured)
EgressExternal = "External"
)
// ClusterSubnetSpec defines a subnet
type ClusterSubnetSpec struct {

View File

@ -343,8 +343,10 @@ func validateSubnet(subnet *kops.ClusterSubnetSpec, fieldPath *field.Path) field
}
if subnet.Egress != "" {
if !strings.HasPrefix(subnet.Egress, "nat-") && !strings.HasPrefix(subnet.Egress, "i-") && subnet.Egress != kops.EgressExternal {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("egress"), subnet.Egress, "egress must be of type NAT Gateway or NAT EC2 Instance or 'External'"))
egressType := strings.Split(subnet.Egress, "-")[0]
if egressType != kops.EgressNatGateway && egressType != kops.EgressElasticIP && egressType != kops.EgressNatInstance && egressType != kops.EgressExternal {
allErrs = append(allErrs, field.Invalid(fieldPath.Child("egress"), subnet.Egress,
"egress must be of type NAT Gateway, NAT Gateway with existing ElasticIP, NAT EC2 Instance or External"))
}
if subnet.Egress != kops.EgressExternal && subnet.Type != "Private" {
allErrs = append(allErrs, field.Forbidden(fieldPath.Child("egress"), "egress can only be specified for private subnets"))

View File

@ -320,6 +320,28 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(ngw)
} else if strings.HasPrefix(egress, "eipalloc-") {
eip := &awstasks.ElasticIP{
Name: s(zone + "." + b.ClusterName()),
ID: s(egress),
Lifecycle: b.Lifecycle,
AssociatedNatGatewayRouteTable: b.LinkToPrivateRouteTableInZone(zone),
Shared: fi.Bool(true),
Tags: b.CloudTags(zone+"."+b.ClusterName(), true),
}
c.AddTask(eip)
ngw = &awstasks.NatGateway{
Name: s(zone + "." + b.ClusterName()),
Lifecycle: b.Lifecycle,
Subnet: utilitySubnet,
ElasticIP: eip,
AssociatedRouteTable: b.LinkToPrivateRouteTableInZone(zone),
Tags: b.CloudTags(zone+"."+b.ClusterName(), false),
}
c.AddTask(ngw)
} else if strings.HasPrefix(egress, "i-") {
in = &awstasks.Instance{

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,550 @@
Resources.AWSEC2LaunchTemplatebastionprivatesharedipexamplecom.Properties.LaunchTemplateData.UserData: ""
Resources.AWSEC2LaunchTemplatemasterustest1amastersprivatesharedipexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/amd64/nodeup
NODEUP_HASH_AMD64=6980fda4fa37bbdc043738cf4ddac6388eb57f561895c69299c1b0ee263d465d
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/arm64/nodeup
NODEUP_HASH_ARM64=dcc7f9f3c180ee76a511627e46da0ac69cdcb518cdf3be348e5ed046d491eb87
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: private-shared-ip.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 346f9394393ee8db5f8bd1e229ee9d90e5b36931bdd754308b2ae68884dd6822@https://download.docker.com/linux/static/stable/x86_64/docker-18.06.3-ce.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
- defb2ccc95c0825833216c8b9e0e15baaa51bcedb3efc1f393f5352d184dead4@https://download.docker.com/linux/static/stable/aarch64/docker-18.06.3-ce.tgz
ClusterName: private-shared-ip.example.com
ConfigBase: memfs://clusters.example.com/private-shared-ip.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
KubeletConfig:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: master
node-role.kubernetes.io/master: ""
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
channels:
- memfs://clusters.example.com/private-shared-ip.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/private-shared-ip.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/private-shared-ip.example.com/manifests/etcd/events.yaml
protokubeImage:
amd64:
hash: 7b3c7f6adbda11b1ec740bd6b969c84f249b7eee818af95f2d321963088245a8
name: protokube:1.19.0-alpha.3
sources:
- https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/images/protokube-amd64.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/images-protokube-amd64.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/images/protokube-amd64.tar.gz
arm64:
hash: 69270ca9c1c950be65af40337adfccec0a728930fa3224bb0d2e88f181f39ead
name: protokube:1.19.0-alpha.3
sources:
- https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/images/protokube-arm64.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/images-protokube-arm64.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/images/protokube-arm64.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSEC2LaunchTemplatenodesprivatesharedipexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/amd64/nodeup
NODEUP_HASH_AMD64=6980fda4fa37bbdc043738cf4ddac6388eb57f561895c69299c1b0ee263d465d
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/arm64/nodeup
NODEUP_HASH_ARM64=dcc7f9f3c180ee76a511627e46da0ac69cdcb518cdf3be348e5ed046d491eb87
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 346f9394393ee8db5f8bd1e229ee9d90e5b36931bdd754308b2ae68884dd6822@https://download.docker.com/linux/static/stable/x86_64/docker-18.06.3-ce.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
- defb2ccc95c0825833216c8b9e0e15baaa51bcedb3efc1f393f5352d184dead4@https://download.docker.com/linux/static/stable/aarch64/docker-18.06.3-ce.tgz
ClusterName: private-shared-ip.example.com
ConfigBase: memfs://clusters.example.com/private-shared-ip.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
KubeletConfig:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
channels:
- memfs://clusters.example.com/private-shared-ip.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,14 @@
{
"Statement": [
{
"Action": [
"ec2:DescribeRegions"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,170 @@
{
"Statement": [
{
"Action": [
"ec2:DescribeAccountAttributes",
"ec2:DescribeInstances",
"ec2:DescribeInternetGateways",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVolumes"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:CreateVolume",
"ec2:DescribeVolumesModifications",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:CreateRoute",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:RevokeSecurityGroupIngress"
],
"Condition": {
"StringEquals": {
"ec2:ResourceTag/KubernetesCluster": "private-shared-ip.example.com"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"ec2:DescribeLaunchTemplateVersions"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup"
],
"Condition": {
"StringEquals": {
"autoscaling:ResourceTag/KubernetesCluster": "private-shared-ip.example.com"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"elasticloadbalancing:AddTags",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"ec2:DescribeVpcs",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"iam:ListServerCertificates",
"iam:GetServerCertificate"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,15 @@
{
"Statement": [
{
"Action": [
"ec2:DescribeInstances",
"ec2:DescribeRegions"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,324 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/amd64/nodeup
NODEUP_HASH_AMD64=6980fda4fa37bbdc043738cf4ddac6388eb57f561895c69299c1b0ee263d465d
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/arm64/nodeup
NODEUP_HASH_ARM64=dcc7f9f3c180ee76a511627e46da0ac69cdcb518cdf3be348e5ed046d491eb87
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
encryptionConfig: null
etcdClusters:
events:
version: 3.3.10
main:
version: 3.3.10
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.14.0
insecureBindAddress: 127.0.0.1
insecurePort: 8080
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: private-shared-ip.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.14.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 346f9394393ee8db5f8bd1e229ee9d90e5b36931bdd754308b2ae68884dd6822@https://download.docker.com/linux/static/stable/x86_64/docker-18.06.3-ce.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
- defb2ccc95c0825833216c8b9e0e15baaa51bcedb3efc1f393f5352d184dead4@https://download.docker.com/linux/static/stable/aarch64/docker-18.06.3-ce.tgz
ClusterName: private-shared-ip.example.com
ConfigBase: memfs://clusters.example.com/private-shared-ip.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
KubeletConfig:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: master
node-role.kubernetes.io/master: ""
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
channels:
- memfs://clusters.example.com/private-shared-ip.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://clusters.example.com/private-shared-ip.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/private-shared-ip.example.com/manifests/etcd/events.yaml
protokubeImage:
amd64:
hash: 7b3c7f6adbda11b1ec740bd6b969c84f249b7eee818af95f2d321963088245a8
name: protokube:1.19.0-alpha.3
sources:
- https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/images/protokube-amd64.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/images-protokube-amd64.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/images/protokube-amd64.tar.gz
arm64:
hash: 69270ca9c1c950be65af40337adfccec0a728930fa3224bb0d2e88f181f39ead
name: protokube:1.19.0-alpha.3
sources:
- https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/images/protokube-arm64.tar.gz
- https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/images-protokube-arm64.tar.gz
- https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/images/protokube-arm64.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,223 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/amd64/nodeup
NODEUP_HASH_AMD64=6980fda4fa37bbdc043738cf4ddac6388eb57f561895c69299c1b0ee263d465d
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.19.0-alpha.3/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.19.0-alpha.3/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.19.0-alpha.3/linux/arm64/nodeup
NODEUP_HASH_ARM64=dcc7f9f3c180ee76a511627e46da0ac69cdcb518cdf3be348e5ed046d491eb87
export AWS_REGION=us-test-1
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
containerRuntime: docker
containerd:
skipInstall: true
docker:
ipMasq: false
ipTables: false
logDriver: json-file
logLevel: info
logOpt:
- max-size=10m
- max-file=5
storage: overlay2,overlay,aufs
version: 18.06.3
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
hostnameOverride: '@aws'
image: k8s.gcr.io/kube-proxy:v1.14.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- c3b736fd0f003765c12d99f2c995a8369e6241f4@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubelet
- 7e3a3ea663153f900cbd52900a39c91fa9f334be@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl
- 3ca15c0a18ee830520cf3a95408be826cbd255a1535a38e0be9608b25ad8bf64@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-amd64-v0.7.5.tgz
- 346f9394393ee8db5f8bd1e229ee9d90e5b36931bdd754308b2ae68884dd6822@https://download.docker.com/linux/static/stable/x86_64/docker-18.06.3-ce.tgz
arm64:
- df38e04576026393055ccc77c0dce73612996561@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubelet
- 01c2b6b43d36b6bfafc80a3737391c19ebfb8ad5@https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/arm64/kubectl
- 7fec91af78e9548df306f0ec43bea527c8c10cc3a9682c33e971c8522a7fcded@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-plugins-arm64-v0.7.5.tgz
- defb2ccc95c0825833216c8b9e0e15baaa51bcedb3efc1f393f5352d184dead4@https://download.docker.com/linux/static/stable/aarch64/docker-18.06.3-ce.tgz
ClusterName: private-shared-ip.example.com
ConfigBase: memfs://clusters.example.com/private-shared-ip.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
KubeletConfig:
anonymousAuth: false
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
ExperimentalCriticalPodAnnotation: "true"
hostnameOverride: '@aws'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: k8s.gcr.io/pause:3.2
podManifestPath: /etc/kubernetes/manifests
channels:
- memfs://clusters.example.com/private-shared-ip.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,104 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-12T04:13:14Z"
name: private-shared-ip.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/private-shared-ip.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
iam: {}
kubelet:
anonymousAuth: false
kubernetesVersion: v1.14.0
masterInternalName: api.internal.private-shared-ip.example.com
masterPublicName: api.private-shared-ip.example.com
networkCIDR: 172.20.0.0/16
networkID: vpc-12345678
networking:
weave: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: private
nodes: private
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Private
egress: eipalloc-12345678
zone: us-test-1a
- cidr: 172.20.4.0/22
name: utility-us-test-1a
type: Utility
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: private-shared-ip.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2019-08-16
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: nodes
labels:
kops.k8s.io/cluster: private-shared-ip.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2019-08-16
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-14T15:32:41Z"
name: bastion
labels:
kops.k8s.io/cluster: private-shared-ip.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.14-debian-stretch-amd64-hvm-ebs-2019-08-16
machineType: t2.micro
maxSize: 1
minSize: 1
role: Bastion
subnets:
- utility-us-test-1a

View File

@ -0,0 +1,897 @@
locals {
bastion_autoscaling_group_ids = [aws_autoscaling_group.bastion-private-shared-ip-example-com.id]
bastion_security_group_ids = [aws_security_group.bastion-private-shared-ip-example-com.id]
bastions_role_arn = aws_iam_role.bastions-private-shared-ip-example-com.arn
bastions_role_name = aws_iam_role.bastions-private-shared-ip-example-com.name
cluster_name = "private-shared-ip.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-private-shared-ip-example-com.id]
master_security_group_ids = [aws_security_group.masters-private-shared-ip-example-com.id]
masters_role_arn = aws_iam_role.masters-private-shared-ip-example-com.arn
masters_role_name = aws_iam_role.masters-private-shared-ip-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-private-shared-ip-example-com.id]
node_security_group_ids = [aws_security_group.nodes-private-shared-ip-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-private-shared-ip-example-com.id]
nodes_role_arn = aws_iam_role.nodes-private-shared-ip-example-com.arn
nodes_role_name = aws_iam_role.nodes-private-shared-ip-example-com.name
region = "us-test-1"
route_table_private-us-test-1a_id = aws_route_table.private-us-test-1a-private-shared-ip-example-com.id
route_table_public_id = aws_route_table.private-shared-ip-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-private-shared-ip-example-com.id
subnet_utility-us-test-1a_id = aws_subnet.utility-us-test-1a-private-shared-ip-example-com.id
vpc_id = "vpc-12345678"
}
output "bastion_autoscaling_group_ids" {
value = [aws_autoscaling_group.bastion-private-shared-ip-example-com.id]
}
output "bastion_security_group_ids" {
value = [aws_security_group.bastion-private-shared-ip-example-com.id]
}
output "bastions_role_arn" {
value = aws_iam_role.bastions-private-shared-ip-example-com.arn
}
output "bastions_role_name" {
value = aws_iam_role.bastions-private-shared-ip-example-com.name
}
output "cluster_name" {
value = "private-shared-ip.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-private-shared-ip-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-private-shared-ip-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-private-shared-ip-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-private-shared-ip-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-private-shared-ip-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-private-shared-ip-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-private-shared-ip-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-private-shared-ip-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-private-shared-ip-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_private-us-test-1a_id" {
value = aws_route_table.private-us-test-1a-private-shared-ip-example-com.id
}
output "route_table_public_id" {
value = aws_route_table.private-shared-ip-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-private-shared-ip-example-com.id
}
output "subnet_utility-us-test-1a_id" {
value = aws_subnet.utility-us-test-1a-private-shared-ip-example-com.id
}
output "vpc_id" {
value = "vpc-12345678"
}
provider "aws" {
region = "us-test-1"
}
resource "aws_autoscaling_group" "bastion-private-shared-ip-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.bastion-private-shared-ip-example-com.id
version = aws_launch_template.bastion-private-shared-ip-example-com.latest_version
}
load_balancers = [aws_elb.bastion-private-shared-ip-example-com.id]
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "bastion.private-shared-ip.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "private-shared-ip.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "bastion.private-shared-ip.example.com"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "node"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/bastion"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "bastion"
}
tag {
key = "kubernetes.io/cluster/private-shared-ip.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.utility-us-test-1a-private-shared-ip-example-com.id]
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-private-shared-ip-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-private-shared-ip-example-com.id
version = aws_launch_template.master-us-test-1a-masters-private-shared-ip-example-com.latest_version
}
load_balancers = [aws_elb.api-private-shared-ip-example-com.id]
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.private-shared-ip.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "private-shared-ip.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.private-shared-ip.example.com"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "master"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/private-shared-ip.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-private-shared-ip-example-com.id]
}
resource "aws_autoscaling_group" "nodes-private-shared-ip-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-private-shared-ip-example-com.id
version = aws_launch_template.nodes-private-shared-ip-example-com.latest_version
}
max_size = 2
metrics_granularity = "1Minute"
min_size = 2
name = "nodes.private-shared-ip.example.com"
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "private-shared-ip.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.private-shared-ip.example.com"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role"
propagate_at_launch = true
value = "node"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/private-shared-ip.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-private-shared-ip-example-com.id]
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-private-shared-ip-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "us-test-1a.etcd-events.private-shared-ip.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
type = "gp2"
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-private-shared-ip-example-com" {
availability_zone = "us-test-1a"
encrypted = false
size = 20
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "us-test-1a.etcd-main.private-shared-ip.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
type = "gp2"
}
resource "aws_elb" "api-private-shared-ip-example-com" {
cross_zone_load_balancing = false
health_check {
healthy_threshold = 2
interval = 10
target = "SSL:443"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 443
instance_protocol = "TCP"
lb_port = 443
lb_protocol = "TCP"
}
name = "api-private-shared-ip-exa-ohatqj"
security_groups = [aws_security_group.api-elb-private-shared-ip-example-com.id]
subnets = [aws_subnet.utility-us-test-1a-private-shared-ip-example-com.id]
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "api.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
resource "aws_elb" "bastion-private-shared-ip-example-com" {
health_check {
healthy_threshold = 2
interval = 10
target = "TCP:22"
timeout = 5
unhealthy_threshold = 2
}
idle_timeout = 300
listener {
instance_port = 22
instance_protocol = "TCP"
lb_port = 22
lb_protocol = "TCP"
}
name = "bastion-private-shared-ip-eepmph"
security_groups = [aws_security_group.bastion-elb-private-shared-ip-example-com.id]
subnets = [aws_subnet.utility-us-test-1a-private-shared-ip-example-com.id]
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "bastion.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "bastions-private-shared-ip-example-com" {
name = "bastions.private-shared-ip.example.com"
role = aws_iam_role.bastions-private-shared-ip-example-com.name
}
resource "aws_iam_instance_profile" "masters-private-shared-ip-example-com" {
name = "masters.private-shared-ip.example.com"
role = aws_iam_role.masters-private-shared-ip-example-com.name
}
resource "aws_iam_instance_profile" "nodes-private-shared-ip-example-com" {
name = "nodes.private-shared-ip.example.com"
role = aws_iam_role.nodes-private-shared-ip-example-com.name
}
resource "aws_iam_role_policy" "bastions-private-shared-ip-example-com" {
name = "bastions.private-shared-ip.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_bastions.private-shared-ip.example.com_policy")
role = aws_iam_role.bastions-private-shared-ip-example-com.name
}
resource "aws_iam_role_policy" "masters-private-shared-ip-example-com" {
name = "masters.private-shared-ip.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.private-shared-ip.example.com_policy")
role = aws_iam_role.masters-private-shared-ip-example-com.name
}
resource "aws_iam_role_policy" "nodes-private-shared-ip-example-com" {
name = "nodes.private-shared-ip.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.private-shared-ip.example.com_policy")
role = aws_iam_role.nodes-private-shared-ip-example-com.name
}
resource "aws_iam_role" "bastions-private-shared-ip-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_bastions.private-shared-ip.example.com_policy")
name = "bastions.private-shared-ip.example.com"
}
resource "aws_iam_role" "masters-private-shared-ip-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.private-shared-ip.example.com_policy")
name = "masters.private-shared-ip.example.com"
}
resource "aws_iam_role" "nodes-private-shared-ip-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.private-shared-ip.example.com_policy")
name = "nodes.private-shared-ip.example.com"
}
resource "aws_key_pair" "kubernetes-private-shared-ip-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.private-shared-ip.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.private-shared-ip.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
resource "aws_launch_template" "bastion-private-shared-ip-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 32
volume_type = "gp2"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.bastions-private-shared-ip-example-com.id
}
image_id = "ami-11400000"
instance_type = "t2.micro"
key_name = aws_key_pair.kubernetes-private-shared-ip-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name = "bastion.private-shared-ip.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
security_groups = [aws_security_group.bastion-private-shared-ip-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "bastion.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/bastion" = "1"
"kops.k8s.io/instancegroup" = "bastion"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "bastion.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/bastion" = "1"
"kops.k8s.io/instancegroup" = "bastion"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "bastion.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/bastion" = "1"
"kops.k8s.io/instancegroup" = "bastion"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
resource "aws_launch_template" "master-us-test-1a-masters-private-shared-ip-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 64
volume_type = "gp2"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-private-shared-ip-example-com.id
}
image_id = "ami-11400000"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-private-shared-ip-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name = "master-us-test-1a.masters.private-shared-ip.example.com"
network_interfaces {
associate_public_ip_address = false
delete_on_termination = true
security_groups = [aws_security_group.masters-private-shared-ip-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "master-us-test-1a.masters.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "master-us-test-1a.masters.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "master-us-test-1a.masters.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = ""
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.private-shared-ip.example.com_user_data")
}
resource "aws_launch_template" "nodes-private-shared-ip-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
volume_size = 128
volume_type = "gp2"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-private-shared-ip-example-com.id
}
image_id = "ami-11400000"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-private-shared-ip-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
name = "nodes.private-shared-ip.example.com"
network_interfaces {
associate_public_ip_address = false
delete_on_termination = true
security_groups = [aws_security_group.nodes-private-shared-ip-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "nodes.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "nodes.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "nodes.private-shared-ip.example.com"
"k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_nodes.private-shared-ip.example.com_user_data")
}
resource "aws_nat_gateway" "us-test-1a-private-shared-ip-example-com" {
allocation_id = "eipalloc-12345678"
subnet_id = aws_subnet.utility-us-test-1a-private-shared-ip-example-com.id
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "us-test-1a.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
}
resource "aws_route53_record" "api-private-shared-ip-example-com" {
alias {
evaluate_target_health = false
name = aws_elb.api-private-shared-ip-example-com.dns_name
zone_id = aws_elb.api-private-shared-ip-example-com.zone_id
}
name = "api.private-shared-ip.example.com"
type = "A"
zone_id = "/hostedzone/Z1AFAKE1ZON3YO"
}
resource "aws_route_table_association" "private-us-test-1a-private-shared-ip-example-com" {
route_table_id = aws_route_table.private-us-test-1a-private-shared-ip-example-com.id
subnet_id = aws_subnet.us-test-1a-private-shared-ip-example-com.id
}
resource "aws_route_table_association" "utility-us-test-1a-private-shared-ip-example-com" {
route_table_id = aws_route_table.private-shared-ip-example-com.id
subnet_id = aws_subnet.utility-us-test-1a-private-shared-ip-example-com.id
}
resource "aws_route_table" "private-shared-ip-example-com" {
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = "vpc-12345678"
}
resource "aws_route_table" "private-us-test-1a-private-shared-ip-example-com" {
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "private-us-test-1a.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
"kubernetes.io/kops/role" = "private-us-test-1a"
}
vpc_id = "vpc-12345678"
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = "igw-1"
route_table_id = aws_route_table.private-shared-ip-example-com.id
}
resource "aws_route" "route-private-us-test-1a-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.us-test-1a-private-shared-ip-example-com.id
route_table_id = aws_route_table.private-us-test-1a-private-shared-ip-example-com.id
}
resource "aws_security_group_rule" "api-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.api-elb-private-shared-ip-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.bastion-private-shared-ip-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-elb-egress" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.bastion-elb-private-shared-ip-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "bastion-to-master-ssh" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.bastion-private-shared-ip-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "bastion-to-node-ssh" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.bastion-private-shared-ip-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.api-elb-private-shared-ip-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "https-elb-to-master" {
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.api-elb-private-shared-ip-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 3
protocol = "icmp"
security_group_id = aws_security_group.api-elb-private-shared-ip-example-com.id
to_port = 4
type = "ingress"
}
resource "aws_security_group_rule" "masters-private-shared-ip-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "masters-private-shared-ip-example-com-ingress-all-0to0-masters-private-shared-ip-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "masters-private-shared-ip-example-com-ingress-all-0to0-nodes-private-shared-ip-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "nodes-private-shared-ip-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "nodes-private-shared-ip-example-com-ingress-all-0to0-nodes-private-shared-ip-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "nodes-private-shared-ip-example-com-ingress-tcp-1to2379-masters-private-shared-ip-example-com" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "nodes-private-shared-ip-example-com-ingress-tcp-2382to4000-masters-private-shared-ip-example-com" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "nodes-private-shared-ip-example-com-ingress-tcp-4003to65535-masters-private-shared-ip-example-com" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "nodes-private-shared-ip-example-com-ingress-udp-1to65535-masters-private-shared-ip-example-com" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.nodes-private-shared-ip-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.bastion-private-shared-ip-example-com.id
source_security_group_id = aws_security_group.bastion-elb-private-shared-ip-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "ssh-external-to-bastion-elb-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.bastion-elb-private-shared-ip-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group" "api-elb-private-shared-ip-example-com" {
description = "Security group for api ELB"
name = "api-elb.private-shared-ip.example.com"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "api-elb.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
vpc_id = "vpc-12345678"
}
resource "aws_security_group" "bastion-elb-private-shared-ip-example-com" {
description = "Security group for bastion ELB"
name = "bastion-elb.private-shared-ip.example.com"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "bastion-elb.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
vpc_id = "vpc-12345678"
}
resource "aws_security_group" "bastion-private-shared-ip-example-com" {
description = "Security group for bastion"
name = "bastion.private-shared-ip.example.com"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "bastion.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
vpc_id = "vpc-12345678"
}
resource "aws_security_group" "masters-private-shared-ip-example-com" {
description = "Security group for masters"
name = "masters.private-shared-ip.example.com"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "masters.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
vpc_id = "vpc-12345678"
}
resource "aws_security_group" "nodes-private-shared-ip-example-com" {
description = "Security group for nodes"
name = "nodes.private-shared-ip.example.com"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "nodes.private-shared-ip.example.com"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
}
vpc_id = "vpc-12345678"
}
resource "aws_subnet" "us-test-1a-private-shared-ip-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "us-test-1a.private-shared-ip.example.com"
"SubnetType" = "Private"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = "vpc-12345678"
}
resource "aws_subnet" "utility-us-test-1a-private-shared-ip-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.4.0/22"
tags = {
"KubernetesCluster" = "private-shared-ip.example.com"
"Name" = "utility-us-test-1a.private-shared-ip.example.com"
"SubnetType" = "Utility"
"kubernetes.io/cluster/private-shared-ip.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
}
vpc_id = "vpc-12345678"
}
terraform {
required_version = ">= 0.12.26"
required_providers {
aws = {
"source" = "hashicorp/aws"
"version" = ">= 2.46.0"
}
}
}

View File

@ -38,6 +38,9 @@ type ElasticIP struct {
ID *string
PublicIP *string
// Shared is set if this is a shared IP
Shared *bool
// ElasticIPs don't support tags. We instead find it via a related resource.
// TagOnSubnet tags a subnet with the ElasticIP. Deprecated: doesn't round-trip with terraform.
@ -175,6 +178,7 @@ func (e *ElasticIP) find(cloud awsup.AWSCloud) (*ElasticIP, error) {
// Avoid spurious changes
actual.Lifecycle = e.Lifecycle
actual.Shared = e.Shared
return actual, nil
}
@ -271,6 +275,14 @@ type terraformElasticIP struct {
}
func (_ *ElasticIP) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ElasticIP) error {
if fi.BoolValue(e.Shared) {
if e.ID == nil {
return fmt.Errorf("ID must be set, if ElasticIP is shared: %v", e)
}
klog.V(4).Infof("reusing existing ElasticIP with id %q", aws.StringValue(e.ID))
return nil
}
tf := &terraformElasticIP{
VPC: aws.Bool(true),
Tags: e.Tags,
@ -280,6 +292,13 @@ func (_ *ElasticIP) RenderTerraform(t *terraform.TerraformTarget, a, e, changes
}
func (e *ElasticIP) TerraformLink() *terraform.Literal {
if fi.BoolValue(e.Shared) {
if e.ID == nil {
klog.Fatalf("ID must be set, if ElasticIP is shared: %v", e)
}
return terraform.LiteralFromStringValue(*e.ID)
}
return terraform.LiteralProperty("aws_eip", *e.Name, "id")
}
@ -289,6 +308,14 @@ type cloudformationElasticIP struct {
}
func (_ *ElasticIP) RenderCloudformation(t *cloudformation.CloudformationTarget, a, e, changes *ElasticIP) error {
if fi.BoolValue(e.Shared) {
if e.ID == nil {
return fmt.Errorf("ID must be set, if ElasticIP is shared: %v", e)
}
klog.V(4).Infof("reusing existing ElasticIP with id %q", aws.StringValue(e.ID))
return nil
}
tf := &cloudformationElasticIP{
Domain: aws.String("vpc"),
Tags: buildCloudformationTags(e.Tags),
@ -303,5 +330,12 @@ func (_ *ElasticIP) RenderCloudformation(t *cloudformation.CloudformationTarget,
//}
func (e *ElasticIP) CloudformationAllocationID() *cloudformation.Literal {
if fi.BoolValue(e.Shared) {
if e.ID == nil {
klog.Fatalf("ID must be set, if ElasticIP is shared: %v", e)
}
return cloudformation.LiteralString(*e.ID)
}
return cloudformation.GetAtt("AWS::EC2::EIP", *e.Name, "AllocationId")
}