diff --git a/cmd/kops/create_cluster.go b/cmd/kops/create_cluster.go index d633c9cf5c..be304821d1 100644 --- a/cmd/kops/create_cluster.go +++ b/cmd/kops/create_cluster.go @@ -964,6 +964,7 @@ func completeNetworking(options *CreateClusterOptions) func(cmd *cobra.Command, "cni", "calico", "cilium", + "cilium-eni", "cilium-etcd", } diff --git a/cmd/kops/create_cluster_integration_test.go b/cmd/kops/create_cluster_integration_test.go index 5e61e5f96e..af544e754c 100644 --- a/cmd/kops/create_cluster_integration_test.go +++ b/cmd/kops/create_cluster_integration_test.go @@ -64,6 +64,11 @@ func TestCreateClusterHetzner(t *testing.T) { runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal_hetzner", "v1alpha2") } +// TestCreateClusterCilium runs kops with the cilium networking flags +func TestCreateClusterCilium(t *testing.T) { + runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/cilium-eni", "v1alpha2") +} + // TestCreateClusterOverride tests the override flag func TestCreateClusterOverride(t *testing.T) { runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/overrides", "v1alpha2") diff --git a/cmd/kops/integration_test.go b/cmd/kops/integration_test.go index 4cc6038e84..692de47f9f 100644 --- a/cmd/kops/integration_test.go +++ b/cmd/kops/integration_test.go @@ -510,6 +510,13 @@ func TestPrivateCiliumAdvanced(t *testing.T) { runTestTerraformAWS(t) } +func TestPrivateCiliumENI(t *testing.T) { + newIntegrationTest("privatecilium.example.com", "privatecilium-eni"). + withPrivate(). + withAddons(ciliumAddon, dnsControllerAddon). + runTestTerraformAWS(t) +} + // TestPrivateCanal runs the test on a configuration with private topology, canal networking func TestPrivateCanal(t *testing.T) { newIntegrationTest("privatecanal.example.com", "privatecanal"). diff --git a/docs/networking/cilium.md b/docs/networking/cilium.md index 66acb96ff1..41f0c587b0 100644 --- a/docs/networking/cilium.md +++ b/docs/networking/cilium.md @@ -112,10 +112,10 @@ kops rolling-update cluster --yes {{ kops_feature_table(kops_added_default='1.18') }} -This feature is in beta state. - You can have Cilium provision AWS managed addresses and attach them directly to Pods much like AWS VPC. See [the Cilium docs for more information](https://docs.cilium.io/en/v1.6/concepts/ipam/eni/) +Enable this by setting `--networking=cilium-eni` (as of kOps 1.26) or by specifying the following in the cluster spec: + ```yaml networking: cilium: diff --git a/docs/releases/1.26-NOTES.md b/docs/releases/1.26-NOTES.md index 198d7f261c..75306b2d72 100644 --- a/docs/releases/1.26-NOTES.md +++ b/docs/releases/1.26-NOTES.md @@ -22,6 +22,8 @@ This is a document to gather the release notes prior to the release. * CapacityRebalance can be enabled/disabled on ASGs through a new `capacityRebalance` field in InstanceGroup specs. +* New clusters can more easily be configured to use Cilium in ENI mode by setting `--networking=cilium-eni`. + # Breaking changes ## Other breaking changes diff --git a/tests/integration/create_cluster/cilium-eni/expected-v1alpha2.yaml b/tests/integration/create_cluster/cilium-eni/expected-v1alpha2.yaml new file mode 100644 index 0000000000..a9ba64ffb2 --- /dev/null +++ b/tests/integration/create_cluster/cilium-eni/expected-v1alpha2.yaml @@ -0,0 +1,100 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + name: minimal.example.com +spec: + api: + dns: {} + authorization: + rbac: {} + channel: stable + cloudProvider: aws + configBase: memfs://tests/minimal.example.com + etcdClusters: + - cpuRequest: 200m + etcdMembers: + - encryptedVolume: true + instanceGroup: control-plane-us-test-1a + name: a + memoryRequest: 100Mi + name: main + - cpuRequest: 100m + etcdMembers: + - encryptedVolume: true + instanceGroup: control-plane-us-test-1a + name: a + memoryRequest: 100Mi + name: events + iam: + allowContainerRegistry: true + legacy: false + kubeProxy: + enabled: false + kubelet: + anonymousAuth: false + kubernetesApiAccess: + - 0.0.0.0/0 + - ::/0 + kubernetesVersion: v1.26.0 + masterPublicName: api.minimal.example.com + networkCIDR: 172.20.0.0/16 + networking: + cilium: + enableNodePort: true + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + - ::/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Public + zone: us-test-1a + topology: + dns: + type: Public + masters: public + nodes: public + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: minimal.example.com + name: control-plane-us-test-1a +spec: + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221018 + instanceMetadata: + httpPutResponseHopLimit: 3 + httpTokens: required + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2017-01-01T00:00:00Z" + labels: + kops.k8s.io/cluster: minimal.example.com + name: nodes-us-test-1a +spec: + image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221018 + instanceMetadata: + httpPutResponseHopLimit: 1 + httpTokens: required + machineType: t2.medium + maxSize: 1 + minSize: 1 + role: Node + subnets: + - us-test-1a diff --git a/tests/integration/create_cluster/cilium-eni/options.yaml b/tests/integration/create_cluster/cilium-eni/options.yaml new file mode 100644 index 0000000000..aa379db8fe --- /dev/null +++ b/tests/integration/create_cluster/cilium-eni/options.yaml @@ -0,0 +1,6 @@ +ClusterName: minimal.example.com +Zones: +- us-test-1a +CloudProvider: aws +Networking: cilium-eni +KubernetesVersion: v1.26.0 \ No newline at end of file diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_bastions.privatecilium.example.com_policy b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_bastions.privatecilium.example.com_policy new file mode 100644 index 0000000000..66d5de1d5a --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_bastions.privatecilium.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_masters.privatecilium.example.com_policy b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_masters.privatecilium.example.com_policy new file mode 100644 index 0000000000..66d5de1d5a --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_masters.privatecilium.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_nodes.privatecilium.example.com_policy b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_nodes.privatecilium.example.com_policy new file mode 100644 index 0000000000..66d5de1d5a --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_nodes.privatecilium.example.com_policy @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { "Service": "ec2.amazonaws.com"}, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_bastions.privatecilium.example.com_policy b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_bastions.privatecilium.example.com_policy new file mode 100644 index 0000000000..54912e12a5 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_bastions.privatecilium.example.com_policy @@ -0,0 +1,10 @@ +{ + "Statement": [ + { + "Action": "ec2:DescribeRegions", + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_masters.privatecilium.example.com_policy b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_masters.privatecilium.example.com_policy new file mode 100644 index 0000000000..9786e71f58 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_masters.privatecilium.example.com_policy @@ -0,0 +1,289 @@ +{ + "Statement": [ + { + "Action": "ec2:AttachVolume", + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com", + "aws:ResourceTag/k8s.io/role/master": "1" + } + }, + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": [ + "s3:Get*" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/privatecilium.example.com/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecilium.example.com/backups/etcd/main/*" + }, + { + "Action": [ + "s3:GetObject", + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:PutObject" + ], + "Effect": "Allow", + "Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecilium.example.com/backups/etcd/events/*" + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-write-bucket" + ] + }, + { + "Action": [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + "route53:GetHostedZone" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO" + ] + }, + { + "Action": [ + "route53:GetChange" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:route53:::change/*" + ] + }, + { + "Action": [ + "route53:ListHostedZones", + "route53:ListTagsForResource" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "privatecilium.example.com", + "ec2:CreateAction": [ + "CreateSecurityGroup" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:security-group/*" + ] + }, + { + "Action": "ec2:CreateTags", + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "privatecilium.example.com", + "ec2:CreateAction": [ + "CreateVolume", + "CreateSnapshot" + ] + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "ec2:CreateTags", + "ec2:DeleteTags" + ], + "Condition": { + "Null": { + "aws:RequestTag/KubernetesCluster": "true" + }, + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" + } + }, + "Effect": "Allow", + "Resource": [ + "arn:aws-test:ec2:*:*:volume/*", + "arn:aws-test:ec2:*:*:snapshot/*" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:AssignPrivateIpAddresses", + "ec2:AttachNetworkInterface", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateNetworkInterface", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:DeleteNetworkInterface", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DescribeAccountAttributes", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DescribeVolumesModifications", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:DetachNetworkInterface", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyNetworkInterfaceAttribute", + "ec2:ModifyVolume", + "ec2:UnassignPrivateIpAddresses", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:RegisterTargets", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:DescribeKey", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:RevokeSecurityGroupIngress", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener" + ], + "Condition": { + "StringEquals": { + "aws:ResourceTag/KubernetesCluster": "privatecilium.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": [ + "ec2:CreateSecurityGroup", + "ec2:CreateSnapshot", + "ec2:CreateVolume", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/KubernetesCluster": "privatecilium.example.com" + } + }, + "Effect": "Allow", + "Resource": "*" + }, + { + "Action": "ec2:CreateSecurityGroup", + "Effect": "Allow", + "Resource": "arn:aws-test:ec2:*:*:vpc/*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_nodes.privatecilium.example.com_policy b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_nodes.privatecilium.example.com_policy new file mode 100644 index 0000000000..153ab3c7f6 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_iam_role_policy_nodes.privatecilium.example.com_policy @@ -0,0 +1,30 @@ +{ + "Statement": [ + { + "Action": [ + "s3:GetBucketLocation", + "s3:GetEncryptionConfiguration", + "s3:ListBucket", + "s3:ListBucketVersions" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws-test:s3:::placeholder-read-bucket" + ] + }, + { + "Action": [ + "autoscaling:DescribeAutoScalingInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstances", + "ec2:DescribeRegions", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "kms:GenerateRandom" + ], + "Effect": "Allow", + "Resource": "*" + } + ], + "Version": "2012-10-17" +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_key_pair_kubernetes.privatecilium.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key b/tests/integration/update_cluster/privatecilium-eni/data/aws_key_pair_kubernetes.privatecilium.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key new file mode 100644 index 0000000000..81cb012783 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_key_pair_kubernetes.privatecilium.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_launch_template_master-us-test-1a.masters.privatecilium.example.com_user_data b/tests/integration/update_cluster/privatecilium-eni/data/aws_launch_template_master-us-test-1a.masters.privatecilium.example.com_user_data new file mode 100644 index 0000000000..cf6211b4e2 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_launch_template_master-us-test-1a.masters.privatecilium.example.com_user_data @@ -0,0 +1,253 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +encryptionConfig: null +etcdClusters: + events: + version: 3.4.13 + main: + version: 3.4.13 +kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.privatecilium.example.com + serviceAccountJWKSURI: https://api.internal.privatecilium.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 +kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: privatecilium.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigBase: memfs://clusters.example.com/privatecilium.example.com +InstanceGroupName: master-us-test-1a +InstanceGroupRole: ControlPlane +NodeupConfigHash: bE/1eP3ebjiERpmAXHGaOxi6wsg/nS0FFda8mVTSjVk= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_launch_template_nodes.privatecilium.example.com_user_data b/tests/integration/update_cluster/privatecilium-eni/data/aws_launch_template_nodes.privatecilium.example.com_user_data new file mode 100644 index 0000000000..2bf76462f1 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_launch_template_nodes.privatecilium.example.com_user_data @@ -0,0 +1,193 @@ +#!/bin/bash +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64 +NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924 +NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64 +NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865 + +export AWS_REGION=us-test-1 + + + + +sysctl -w net.core.rmem_max=16777216 || true +sysctl -w net.core.wmem_max=16777216 || true +sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true +sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true + + +function ensure-install-dir() { + INSTALL_DIR="/opt/kops" + # On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec + if [[ -d /var/lib/toolbox ]]; then + INSTALL_DIR="/var/lib/toolbox/kops" + fi + mkdir -p ${INSTALL_DIR}/bin + mkdir -p ${INSTALL_DIR}/conf + cd ${INSTALL_DIR} +} + +# Retry a download until we get it. args: name, sha, urls +download-or-bust() { + local -r file="$1" + local -r hash="$2" + local -r urls=( $(split-commas "$3") ) + + if [[ -f "${file}" ]]; then + if ! validate-hash "${file}" "${hash}"; then + rm -f "${file}" + else + return 0 + fi + fi + + while true; do + for url in "${urls[@]}"; do + commands=( + "curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + "curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10" + "wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10" + ) + for cmd in "${commands[@]}"; do + echo "Attempting download with: ${cmd} {url}" + if ! (${cmd} "${url}"); then + echo "== Download failed with ${cmd} ==" + continue + fi + if ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + rm -f "${file}" + else + echo "== Downloaded ${url} (SHA256 = ${hash}) ==" + return 0 + fi + done + done + + echo "All downloads failed; sleeping before retrying" + sleep 60 + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha256sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function download-release() { + case "$(uname -m)" in + x86_64*|i?86_64*|amd64*) + NODEUP_URL="${NODEUP_URL_AMD64}" + NODEUP_HASH="${NODEUP_HASH_AMD64}" + ;; + aarch64*|arm64*) + NODEUP_URL="${NODEUP_URL_ARM64}" + NODEUP_HASH="${NODEUP_HASH_ARM64}" + ;; + *) + echo "Unsupported host arch: $(uname -m)" >&2 + exit 1 + ;; + esac + + cd ${INSTALL_DIR}/bin + download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}" + + chmod +x nodeup + + echo "Running nodeup" + # We can't run in the foreground because of https://github.com/docker/docker/issues/23793 + ( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 ) +} + +#################################################################################### + +/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured" + +echo "== nodeup node config starting ==" +ensure-install-dir + +cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC' +cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true +containerRuntime: containerd +containerd: + logLevel: info + version: 1.4.13 +docker: + skipInstall: true +kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 +kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + +__EOF_CLUSTER_SPEC + +cat > conf/kube_env.yaml << '__EOF_KUBE_ENV' +CloudProvider: aws +ConfigServer: + CACertificates: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- + server: https://kops-controller.internal.privatecilium.example.com:3988/ +InstanceGroupName: nodes +InstanceGroupRole: Node +NodeupConfigHash: sN54AEbLjJsWbgX3bYbJ1zKa2/ez6JnG1YWBh3knm7w= + +__EOF_KUBE_ENV + +download-release +echo "== nodeup node config done ==" diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content new file mode 100644 index 0000000000..97d9fa875d --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_cluster-completed.spec_content @@ -0,0 +1,215 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-12T04:13:14Z" + name: privatecilium.example.com +spec: + api: + loadBalancer: + class: Classic + type: Public + authorization: + alwaysAllow: {} + channel: stable + cloudConfig: + awsEBSCSIDriver: + enabled: false + manageStorageClasses: true + cloudProvider: aws + clusterDNSDomain: cluster.local + configBase: memfs://clusters.example.com/privatecilium.example.com + configStore: memfs://clusters.example.com/privatecilium.example.com + containerRuntime: containerd + containerd: + logLevel: info + version: 1.4.13 + dnsZone: Z1AFAKE1ZON3YO + docker: + skipInstall: true + etcdClusters: + - backups: + backupStore: memfs://clusters.example.com/privatecilium.example.com/backups/etcd/main + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + version: 3.4.13 + - backups: + backupStore: memfs://clusters.example.com/privatecilium.example.com/backups/etcd/events + etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + version: 3.4.13 + externalDns: + provider: dns-controller + iam: + legacy: false + keyStore: memfs://clusters.example.com/privatecilium.example.com/pki + kubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.privatecilium.example.com + serviceAccountJWKSURI: https://api.internal.privatecilium.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + kubeControllerManager: + allocateNodeCIDRs: true + attachDetachReconcileSyncPeriod: 1m0s + cloudProvider: aws + clusterCIDR: 100.96.0.0/11 + clusterName: privatecilium.example.com + configureCloudRoutes: false + image: registry.k8s.io/kube-controller-manager:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + useServiceAccountCredentials: true + kubeDNS: + cacheMaxConcurrent: 150 + cacheMaxSize: 1000 + cpuRequest: 100m + domain: cluster.local + memoryLimit: 170Mi + memoryRequest: 70Mi + nodeLocalDNS: + cpuRequest: 25m + enabled: false + image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8 + memoryRequest: 5Mi + provider: CoreDNS + serverIP: 100.64.0.10 + kubeProxy: + clusterCIDR: 100.96.0.0/11 + cpuRequest: 100m + image: registry.k8s.io/kube-proxy:v1.21.0 + logLevel: 2 + kubeScheduler: + image: registry.k8s.io/kube-scheduler:v1.21.0 + leaderElection: + leaderElect: true + logLevel: 2 + kubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + kubernetesApiAccess: + - 0.0.0.0/0 + kubernetesVersion: 1.21.0 + masterKubelet: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + masterPublicName: api.privatecilium.example.com + networkCIDR: 172.20.0.0/16 + networking: + cilium: + agentPrometheusPort: 9090 + bpfCTGlobalAnyMax: 262144 + bpfCTGlobalTCPMax: 524288 + bpfLBAlgorithm: random + bpfLBMaglevTableSize: "16381" + bpfLBMapMax: 65536 + bpfNATGlobalMax: 524288 + bpfNeighGlobalMax: 524288 + bpfPolicyMapMax: 16384 + clusterName: default + cpuRequest: 25m + disableCNPStatusUpdates: true + disableMasquerade: true + enableBPFMasquerade: false + enableEndpointHealthChecking: true + enableL7Proxy: true + enableRemoteNodeIdentity: true + hubble: + enabled: false + identityAllocationMode: crd + identityChangeGracePeriod: 5s + ipam: eni + memoryRequest: 128Mi + monitorAggregation: medium + sidecarIstioProxyImage: cilium/istio_proxy + toFqdnsDnsRejectResponseCode: refused + tunnel: disabled + version: v1.11.11 + nonMasqueradeCIDR: 100.64.0.0/10 + podCIDR: 100.96.0.0/11 + secretStore: memfs://clusters.example.com/privatecilium.example.com/secrets + serviceClusterIPRange: 100.64.0.0/13 + sshAccess: + - 0.0.0.0/0 + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Private + zone: us-test-1a + - cidr: 172.20.4.0/22 + name: utility-us-test-1a + type: Utility + zone: us-test-1a + topology: + dns: + type: Public + masters: private + nodes: private diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_etcd-cluster-spec-events_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_etcd-cluster-spec-events_content new file mode 100644 index 0000000000..bb8ddb0e2e --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_etcd-cluster-spec-events_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_etcd-cluster-spec-main_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_etcd-cluster-spec-main_content new file mode 100644 index 0000000000..bb8ddb0e2e --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_etcd-cluster-spec-main_content @@ -0,0 +1,4 @@ +{ + "memberCount": 1, + "etcdVersion": "3.4.13" +} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_kops-version.txt_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_kops-version.txt_content new file mode 100644 index 0000000000..b7340298dc --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_kops-version.txt_content @@ -0,0 +1 @@ +1.21.0-alpha.1 diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content new file mode 100644 index 0000000000..28d38caa9d --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-events + name: etcd-manager-events + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/privatecilium.example.com/backups/etcd/events + --client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true + --dns-suffix=.internal.privatecilium.example.com --grpc-port=3997 --peer-urls=https://__name__:2381 + --quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events + --volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/control-plane=1 + --volume-tag=kubernetes.io/cluster/privatecilium.example.com=owned > /tmp/pipe + 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-events + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd-events.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content new file mode 100644 index 0000000000..18985cfc8f --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content @@ -0,0 +1,62 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + labels: + k8s-app: etcd-manager-main + name: etcd-manager-main + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager + --backup-store=memfs://clusters.example.com/privatecilium.example.com/backups/etcd/main + --client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true + --dns-suffix=.internal.privatecilium.example.com --grpc-port=3996 --peer-urls=https://__name__:2380 + --quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main + --volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/control-plane=1 + --volume-tag=kubernetes.io/cluster/privatecilium.example.com=owned > /tmp/pipe + 2>&1 + image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831 + name: etcd-manager + resources: + requests: + cpu: 200m + memory: 100Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /rootfs + name: rootfs + - mountPath: /run + name: run + - mountPath: /etc/kubernetes/pki/etcd-manager + name: pki + - mountPath: /var/log/etcd.log + name: varlogetcd + hostNetwork: true + hostPID: true + priorityClassName: system-cluster-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: / + type: Directory + name: rootfs + - hostPath: + path: /run + type: DirectoryOrCreate + name: run + - hostPath: + path: /etc/kubernetes/pki/etcd-manager-main + type: DirectoryOrCreate + name: pki + - hostPath: + path: /var/log/etcd.log + type: FileOrCreate + name: varlogetcd +status: {} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content new file mode 100644 index 0000000000..5cb249fea7 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null +spec: + containers: + - args: + - --ca-cert=/secrets/ca.crt + - --client-cert=/secrets/client.crt + - --client-key=/secrets/client.key + image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.26.0-alpha.1 + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /.kube-apiserver-healthcheck/healthz + port: 3990 + initialDelaySeconds: 5 + timeoutSeconds: 5 + name: healthcheck + resources: {} + securityContext: + runAsNonRoot: true + runAsUser: 10012 + volumeMounts: + - mountPath: /secrets + name: healthcheck-secrets + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/kube-apiserver-healthcheck/secrets + type: Directory + name: healthcheck-secrets +status: {} diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_nodeupconfig-master-us-test-1a_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_nodeupconfig-master-us-test-1a_content new file mode 100644 index 0000000000..c06fb1745b --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_nodeupconfig-master-us-test-1a_content @@ -0,0 +1,276 @@ +APIServerConfig: + KubeAPIServer: + allowPrivileged: true + anonymousAuth: false + apiAudiences: + - kubernetes.svc.default + apiServerCount: 1 + authorizationMode: AlwaysAllow + bindAddress: 0.0.0.0 + cloudProvider: aws + enableAdmissionPlugins: + - NamespaceLifecycle + - LimitRanger + - ServiceAccount + - DefaultStorageClass + - DefaultTolerationSeconds + - MutatingAdmissionWebhook + - ValidatingAdmissionWebhook + - NodeRestriction + - ResourceQuota + etcdServers: + - https://127.0.0.1:4001 + etcdServersOverrides: + - /events#https://127.0.0.1:4002 + image: registry.k8s.io/kube-apiserver:v1.21.0 + kubeletPreferredAddressTypes: + - InternalIP + - Hostname + - ExternalIP + logLevel: 2 + requestheaderAllowedNames: + - aggregator + requestheaderExtraHeaderPrefixes: + - X-Remote-Extra- + requestheaderGroupHeaders: + - X-Remote-Group + requestheaderUsernameHeaders: + - X-Remote-User + securePort: 443 + serviceAccountIssuer: https://api.internal.privatecilium.example.com + serviceAccountJWKSURI: https://api.internal.privatecilium.example.com/openid/v1/jwks + serviceClusterIPRange: 100.64.0.0/13 + storageBackend: etcd3 + ServiceAccountPublicKeys: | + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm + XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ== + -----END RSA PUBLIC KEY----- + -----BEGIN RSA PUBLIC KEY----- + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF + Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ== + -----END RSA PUBLIC KEY----- +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + - f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64 + - 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64 + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz + - 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64 + - 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64 +CAs: + apiserver-aggregator-ca: | + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX + DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX + WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk + CzMeMdr4 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe + BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX + DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It + Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM + x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB + o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU + ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN + QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW + HLtkTXH8 + -----END CERTIFICATE----- + etcd-clients-ca: | + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx + NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY + qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW + BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx + NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB + AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep + uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE + AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s + x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E + YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co= + -----END CERTIFICATE----- + etcd-manager-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN + MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H + g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6 + CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O + sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs + GS/VUw== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd + BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN + MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz + MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL + DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC + MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW + LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE + hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV + cPfVNg== + -----END CERTIFICATE----- + etcd-manager-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm + ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx + GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu + Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6 + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP + vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP + DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9 + t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd + -----END CERTIFICATE----- + etcd-peers-ca-events: | + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx + MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY + xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O + Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB + DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb + BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx + MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN + BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW + 03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO + BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh + cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI + J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb + -----END CERTIFICATE----- + etcd-peers-ca-main: | + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3 + MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA + aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf + OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt + uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4= + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ + BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3 + MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG + SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt + naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud + DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC + qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K + G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo= + -----END CERTIFICATE----- + kubernetes-ca: | + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw + ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1 + jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA + MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8 + tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w== + -----END CERTIFICATE----- + -----BEGIN CERTIFICATE----- + MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU + BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw + OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD + SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7 + WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG + MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn + MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA + 9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw== + -----END CERTIFICATE----- +ClusterName: privatecilium.example.com +FileAssets: +- content: | + apiVersion: kubescheduler.config.k8s.io/v1beta1 + clientConnection: + kubeconfig: /var/lib/kube-scheduler/kubeconfig + kind: KubeSchedulerConfiguration + path: /var/lib/kube-scheduler/config.yaml +Hooks: +- null +- null +KeypairIDs: + apiserver-aggregator-ca: "6980187172486667078076483355" + etcd-clients-ca: "6979622252718071085282986282" + etcd-manager-ca-events: "6982279354000777253151890266" + etcd-manager-ca-main: "6982279354000936168671127624" + etcd-peers-ca-events: "6982279353999767935825892873" + etcd-peers-ca-main: "6982279353998887468930183660" + kubernetes-ca: "6982820025135291416230495506" + service-account: "2" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kops.k8s.io/kops-controller-pki: "" + kubernetes.io/role: master + node-role.kubernetes.io/control-plane: "" + node-role.kubernetes.io/master: "" + node.kubernetes.io/exclude-from-external-load-balancers: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s + taints: + - node-role.kubernetes.io/master=:NoSchedule +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/privatecilium.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 +etcdManifests: +- memfs://clusters.example.com/privatecilium.example.com/manifests/etcd/main-master-us-test-1a.yaml +- memfs://clusters.example.com/privatecilium.example.com/manifests/etcd/events-master-us-test-1a.yaml +staticManifests: +- key: kube-apiserver-healthcheck + path: manifests/static/kube-apiserver-healthcheck.yaml diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_nodeupconfig-nodes_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_nodeupconfig-nodes_content new file mode 100644 index 0000000000..b6ffa14848 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_nodeupconfig-nodes_content @@ -0,0 +1,44 @@ +Assets: + amd64: + - 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet + - 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl + - 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz + - 29ef1e8635795c2a49a20a56e778f45ff163c5400a5428ca33999ed53d44e3d8@https://github.com/containerd/containerd/releases/download/v1.4.13/cri-containerd-cni-1.4.13-linux-amd64.tar.gz + arm64: + - 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet + - a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl + - ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz + - debed306ed9a4e70dcbcb228a0b3898f9730099e324f34bb0e76abbaddf7a6a7@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.13.tgz +CAs: {} +ClusterName: privatecilium.example.com +Hooks: +- null +- null +KeypairIDs: + kubernetes-ca: "6982820025135291416230495506" +KubeletConfig: + anonymousAuth: false + cgroupDriver: systemd + cgroupRoot: / + cloudProvider: aws + clusterDNS: 100.64.0.10 + clusterDomain: cluster.local + enableDebuggingHandlers: true + evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5% + kubeconfigPath: /var/lib/kubelet/kubeconfig + logLevel: 2 + networkPluginName: cni + nodeLabels: + kubernetes.io/role: node + node-role.kubernetes.io/node: "" + podInfraContainerImage: registry.k8s.io/pause:3.6 + podManifestPath: /etc/kubernetes/manifests + registerSchedulable: true + shutdownGracePeriod: 30s + shutdownGracePeriodCriticalPods: 10s +UpdatePolicy: automatic +channels: +- memfs://clusters.example.com/privatecilium.example.com/addons/bootstrap-channel.yaml +containerdConfig: + logLevel: info + version: 1.4.13 diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content new file mode 100644 index 0000000000..467197db0a --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content @@ -0,0 +1,56 @@ +kind: Addons +metadata: + creationTimestamp: null + name: bootstrap +spec: + addons: + - id: k8s-1.16 + manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml + manifestHash: 69dbf29695a6249f4cd08ef7d06f7e9e5b2ce23f097f85c382e8e9bb982c895d + name: kops-controller.addons.k8s.io + needsRollingUpdate: control-plane + selector: + k8s-addon: kops-controller.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: coredns.addons.k8s.io/k8s-1.12.yaml + manifestHash: cd1e8f47fe52b13fee5536b0d4b4429ef256829d87a51cbc189fa0f21ff3503b + name: coredns.addons.k8s.io + selector: + k8s-addon: coredns.addons.k8s.io + version: 9.99.0 + - id: k8s-1.9 + manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml + manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81 + name: kubelet-api.rbac.addons.k8s.io + selector: + k8s-addon: kubelet-api.rbac.addons.k8s.io + version: 9.99.0 + - manifest: limit-range.addons.k8s.io/v1.5.0.yaml + manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2 + name: limit-range.addons.k8s.io + selector: + k8s-addon: limit-range.addons.k8s.io + version: 9.99.0 + - id: k8s-1.12 + manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml + manifestHash: 6c8f01b2470d323965dfb22d410f322e0b429f7acc3831f41a763ec072dfc69b + name: dns-controller.addons.k8s.io + selector: + k8s-addon: dns-controller.addons.k8s.io + version: 9.99.0 + - id: v1.15.0 + manifest: storage-aws.addons.k8s.io/v1.15.0.yaml + manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780 + name: storage-aws.addons.k8s.io + selector: + k8s-addon: storage-aws.addons.k8s.io + version: 9.99.0 + - id: k8s-1.16 + manifest: networking.cilium.io/k8s-1.16-v1.11.yaml + manifestHash: 8a5107386f0fea73a5d7e14cd94fc20219ac1672e6e35bb9aa529128b0d9bec9 + name: networking.cilium.io + needsRollingUpdate: all + selector: + role.kubernetes.io/networking: "1" + version: 9.99.0 diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000..fd5b8a7c05 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/cluster-service: "true" + name: coredns + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system + +--- + +apiVersion: v1 +data: + Corefile: |- + .:53 { + errors + health { + lameduck 5s + } + ready + kubernetes cluster.local. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + cache 30 + loop + reload + loadbalance + } +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + addonmanager.kubernetes.io/mode: EnsureExists + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: coredns + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kube-dns + strategy: + rollingUpdate: + maxSurge: 10% + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + k8s-app: kube-dns + kops.k8s.io/managed-by: kops + spec: + containers: + - args: + - -conf + - /etc/coredns/Corefile + image: registry.k8s.io/coredns/coredns:v1.9.3 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 5 + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + successThreshold: 1 + timeoutSeconds: 5 + name: coredns + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /etc/coredns + name: config-volume + readOnly: true + dnsPolicy: Default + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns + tolerations: + - key: CriticalAddonsOnly + operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + k8s-app: kube-dns + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: ScheduleAnyway + volumes: + - configMap: + name: coredns + name: config-volume + +--- + +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: CoreDNS + name: kube-dns + namespace: kube-system + resourceVersion: "0" +spec: + clusterIP: 100.64.0.10 + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP + selector: + k8s-app: kube-dns + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: kube-dns + namespace: kube-system +spec: + maxUnavailable: 50% + selector: + matchLabels: + k8s-app: kube-dns + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - replicationcontrollers/scale + verbs: + - get + - update +- apiGroups: + - extensions + - apps + resources: + - deployments/scale + - replicasets/scale + verbs: + - get + - update +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + name: coredns-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: coredns-autoscaler +subjects: +- kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: coredns.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" + name: coredns-autoscaler + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + creationTimestamp: null + labels: + k8s-app: coredns-autoscaler + kops.k8s.io/managed-by: kops + spec: + containers: + - command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5 + name: autoscaler + resources: + requests: + cpu: 20m + memory: 10Mi + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: coredns-autoscaler + tolerations: + - key: CriticalAddonsOnly + operator: Exists diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content new file mode 100644 index 0000000000..2eab063fdb --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content @@ -0,0 +1,138 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + version: v1.26.0-alpha.1 + name: dns-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: dns-controller + strategy: + type: Recreate + template: + metadata: + creationTimestamp: null + labels: + k8s-addon: dns-controller.addons.k8s.io + k8s-app: dns-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --watch-ingress=false + - --dns=aws-route53 + - --zone=*/Z1AFAKE1ZON3YO + - --internal-ipv4 + - --zone=*/* + - -v=2 + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/dns-controller:1.26.0-alpha.1 + name: dns-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: dns-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: dns-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - ingress + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: dns-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: dns-controller.addons.k8s.io + name: kops:dns-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops:dns-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:dns-controller diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content new file mode 100644 index 0000000000..90d184f3f1 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content @@ -0,0 +1,225 @@ +apiVersion: v1 +data: + config.yaml: | + {"cloud":"aws","configBase":"memfs://clusters.example.com/privatecilium.example.com","secretStore":"memfs://clusters.example.com/privatecilium.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.privatecilium.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}} +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.26.0-alpha.1 + name: kops-controller + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: kops-controller + template: + metadata: + annotations: + dns.alpha.kubernetes.io/internal: kops-controller.internal.privatecilium.example.com + creationTimestamp: null + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + kops.k8s.io/managed-by: kops + version: v1.26.0-alpha.1 + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - key: kops.k8s.io/kops-controller-pki + operator: Exists + containers: + - args: + - --v=2 + - --conf=/etc/kubernetes/kops-controller/config/config.yaml + command: null + env: + - name: KUBERNETES_SERVICE_HOST + value: 127.0.0.1 + image: registry.k8s.io/kops/kops-controller:1.26.0-alpha.1 + name: kops-controller + resources: + requests: + cpu: 50m + memory: 50Mi + securityContext: + runAsNonRoot: true + runAsUser: 10011 + volumeMounts: + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + dnsPolicy: Default + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + serviceAccount: kops-controller + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + operator: Exists + - key: node.kubernetes.io/not-ready + operator: Exists + - key: node-role.kubernetes.io/master + operator: Exists + - key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - configMap: + name: kops-controller + name: kops-controller-config + - hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory + name: kops-controller-pki + updateStrategy: + type: OnDelete + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - patch + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create +- apiGroups: + - "" + - coordination.k8s.io + resourceNames: + - kops-controller-leader + resources: + - configmaps + - leases + verbs: + - get + - list + - watch + - patch + - update + - delete +- apiGroups: + - "" + - coordination.k8s.io + resources: + - configmaps + - leases + verbs: + - create + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kops-controller.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kops-controller.addons.k8s.io + name: kops-controller + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kops-controller +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kops-controller diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content new file mode 100644 index 0000000000..36761e1c56 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: kubelet-api.rbac.addons.k8s.io + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-limit-range.addons.k8s.io_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-limit-range.addons.k8s.io_content new file mode 100644 index 0000000000..4dcdce48b9 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-limit-range.addons.k8s.io_content @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: LimitRange +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: limit-range.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: limit-range.addons.k8s.io + name: limits + namespace: default +spec: + limits: + - defaultRequest: + cpu: 100m + type: Container diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content new file mode 100644 index 0000000000..76047e2a33 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content @@ -0,0 +1,674 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system + +--- + +apiVersion: v1 +data: + auto-create-cilium-node-resource: "true" + auto-direct-node-routes: "false" + blacklist-conflicting-routes: "false" + bpf-ct-global-any-max: "262144" + bpf-ct-global-tcp-max: "524288" + bpf-lb-algorithm: random + bpf-lb-maglev-table-size: "16381" + bpf-lb-map-max: "65536" + bpf-lb-sock-hostns-only: "false" + bpf-nat-global-max: "524288" + bpf-neigh-global-max: "524288" + bpf-policy-map-max: "16384" + cgroup-root: /run/cilium/cgroupv2 + cluster-name: default + debug: "false" + disable-cnp-status-updates: "true" + disable-endpoint-crd: "false" + enable-bpf-masquerade: "false" + enable-endpoint-health-checking: "true" + enable-endpoint-routes: "true" + enable-ipv4: "true" + enable-ipv6: "false" + enable-ipv6-masquerade: "false" + enable-l7-proxy: "true" + enable-node-port: "false" + enable-remote-node-identity: "true" + enable-service-topology: "false" + identity-allocation-mode: crd + identity-change-grace-period: 5s + install-iptables-rules: "true" + ipam: eni + kube-proxy-replacement: partial + masquerade: "false" + monitor-aggregation: medium + nodes-gc-interval: 5m0s + preallocate-bpf-maps: "false" + sidecar-istio-proxy-image: cilium/istio_proxy + tofqdns-dns-reject-response-code: refused + tofqdns-enable-poller: "false" + tunnel: disabled +kind: ConfigMap +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium-config + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium +rules: +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + - services + - pods + - endpoints + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - list + - watch + - update + - get +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumendpoints + - ciliumendpoints/status + - ciliumnodes + - ciliumnodes/status + - ciliumidentities + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumegressnatpolicies + verbs: + - '*' + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium-operator +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + - nodes/status + verbs: + - patch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services/status + verbs: + - update +- apiGroups: + - "" + resources: + - services + - endpoints + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - cilium.io + resources: + - ciliumnetworkpolicies + - ciliumnetworkpolicies/status + - ciliumnetworkpolicies/finalizers + - ciliumclusterwidenetworkpolicies + - ciliumclusterwidenetworkpolicies/status + - ciliumclusterwidenetworkpolicies/finalizers + - ciliumendpoints + - ciliumendpoints/status + - ciliumendpoints/finalizers + - ciliumnodes + - ciliumnodes/status + - ciliumnodes/finalizers + - ciliumidentities + - ciliumidentities/status + - ciliumidentities/finalizers + - ciliumlocalredirectpolicies + - ciliumlocalredirectpolicies/status + - ciliumlocalredirectpolicies/finalizers + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium +subjects: +- kind: ServiceAccount + name: cilium + namespace: kube-system + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + role.kubernetes.io/networking: "1" + name: cilium-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cilium-operator +subjects: +- kind: ServiceAccount + name: cilium-operator + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + k8s-app: cilium + kubernetes.io/cluster-service: "true" + role.kubernetes.io/networking: "1" + name: cilium + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: cilium + kubernetes.io/cluster-service: "true" + template: + metadata: + creationTimestamp: null + labels: + k8s-app: cilium + kops.k8s.io/managed-by: kops + kubernetes.io/cluster-service: "true" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - args: + - --config-dir=/tmp/cilium/config-map + command: + - cilium-agent + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_CLUSTERMESH_CONFIG + value: /var/lib/cilium/clustermesh/ + - name: CILIUM_CNI_CHAINING_MODE + valueFrom: + configMapKeyRef: + key: cni-chaining-mode + name: cilium-config + optional: true + - name: CILIUM_CUSTOM_CNI_CONF + valueFrom: + configMapKeyRef: + key: custom-cni-conf + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/cilium:v1.11.11 + imagePullPolicy: IfNotPresent + lifecycle: + postStart: + exec: + command: + - /cni-install.sh + - --cni-exclusive=true + preStop: + exec: + command: + - /cni-uninstall.sh + livenessProbe: + failureThreshold: 10 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + name: cilium-agent + readinessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 5 + resources: + requests: + cpu: 25m + memory: 128Mi + securityContext: + privileged: true + startupProbe: + failureThreshold: 105 + httpGet: + host: 127.0.0.1 + httpHeaders: + - name: brief + value: "true" + path: /healthz + port: 9879 + scheme: HTTP + periodSeconds: 2 + successThreshold: null + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + mountPropagation: Bidirectional + name: bpf-maps + - mountPath: /var/run/cilium + name: cilium-run + - mountPath: /host/opt/cni/bin + name: cni-path + - mountPath: /host/etc/cni/net.d + name: etc-cni-netd + - mountPath: /var/lib/cilium/clustermesh + name: clustermesh-secrets + readOnly: true + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + hostNetwork: true + initContainers: + - command: + - /init-container.sh + env: + - name: CILIUM_ALL_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-state + name: cilium-config + optional: true + - name: CILIUM_BPF_STATE + valueFrom: + configMapKeyRef: + key: clean-cilium-bpf-state + name: cilium-config + optional: true + image: quay.io/cilium/cilium:v1.11.11 + imagePullPolicy: IfNotPresent + name: clean-cilium-state + resources: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: true + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /sys/fs/bpf + name: bpf-maps + - mountPath: /run/cilium/cgroupv2 + mountPropagation: HostToContainer + name: cilium-cgroup + - mountPath: /var/run/cilium + name: cilium-run + priorityClassName: system-node-critical + restartPolicy: Always + serviceAccount: cilium + serviceAccountName: cilium + terminationGracePeriodSeconds: 1 + tolerations: + - operator: Exists + volumes: + - hostPath: + path: /var/run/cilium + type: DirectoryOrCreate + name: cilium-run + - hostPath: + path: /sys/fs/bpf + type: DirectoryOrCreate + name: bpf-maps + - hostPath: + path: /opt/cni/bin + type: DirectoryOrCreate + name: cni-path + - hostPath: + path: /run/cilium/cgroupv2 + type: Directory + name: cilium-cgroup + - hostPath: + path: /etc/cni/net.d + type: DirectoryOrCreate + name: etc-cni-netd + - hostPath: + path: /lib/modules + name: lib-modules + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: xtables-lock + - name: clustermesh-secrets + secret: + defaultMode: 420 + optional: true + secretName: cilium-clustermesh + - configMap: + name: cilium-config + name: cilium-config-path + updateStrategy: + type: OnDelete + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + io.cilium/app: operator + name: cilium-operator + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + io.cilium/app: operator + kops.k8s.io/managed-by: kops + name: cilium-operator + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + containers: + - args: + - --config-dir=/tmp/cilium/config-map + - --debug=$(CILIUM_DEBUG) + - --eni-tags=KubernetesCluster=privatecilium.example.com + command: + - cilium-operator + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CILIUM_K8S_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: CILIUM_DEBUG + valueFrom: + configMapKeyRef: + key: debug + name: cilium-config + optional: true + - name: KUBERNETES_SERVICE_HOST + value: api.internal.privatecilium.example.com + - name: KUBERNETES_SERVICE_PORT + value: "443" + image: quay.io/cilium/operator:v1.11.11 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 9234 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 3 + name: cilium-operator + resources: + requests: + cpu: 25m + memory: 128Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /tmp/cilium/config-map + name: cilium-config-path + readOnly: true + hostNetwork: true + nodeSelector: null + priorityClassName: system-cluster-critical + restartPolicy: Always + serviceAccount: cilium-operator + serviceAccountName: cilium-operator + tolerations: + - operator: Exists + topologySpreadConstraints: + - labelSelector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + maxSkew: 1 + topologyKey: topology.kubernetes.io/zone + whenUnsatisfiable: ScheduleAnyway + - labelSelector: + matchLabels: + io.cilium/app: operator + name: cilium-operator + maxSkew: 1 + topologyKey: kubernetes.io/hostname + whenUnsatisfiable: DoNotSchedule + volumes: + - configMap: + name: cilium-config + name: cilium-config-path + +--- + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: networking.cilium.io + app.kubernetes.io/managed-by: kops + io.cilium/app: operator + name: cilium-operator + role.kubernetes.io/networking: "1" + name: cilium-operator + namespace: kube-system +spec: + maxUnavailable: 1 + selector: + matchLabels: + io.cilium/app: operator + name: cilium-operator diff --git a/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content new file mode 100644 index 0000000000..21efd54326 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/data/aws_s3_object_privatecilium.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content @@ -0,0 +1,98 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: default +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "false" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: gp2 +parameters: + type: gp2 +provisioner: kubernetes.io/aws-ebs + +--- + +allowVolumeExpansion: true +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + annotations: + storageclass.kubernetes.io/is-default-class: "true" + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: kops-ssd-1-17 +parameters: + encrypted: "true" + type: gp2 +provisioner: kubernetes.io/aws-ebs +volumeBindingMode: WaitForFirstConsumer + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + addon.kops.k8s.io/name: storage-aws.addons.k8s.io + app.kubernetes.io/managed-by: kops + k8s-addon: storage-aws.addons.k8s.io + name: system:aws-cloud-provider +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:aws-cloud-provider +subjects: +- kind: ServiceAccount + name: aws-cloud-provider + namespace: kube-system diff --git a/tests/integration/update_cluster/privatecilium-eni/id_rsa.pub b/tests/integration/update_cluster/privatecilium-eni/id_rsa.pub new file mode 100755 index 0000000000..81cb012783 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ== diff --git a/tests/integration/update_cluster/privatecilium-eni/in-v1alpha2.yaml b/tests/integration/update_cluster/privatecilium-eni/in-v1alpha2.yaml new file mode 100644 index 0000000000..c7ceef2520 --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/in-v1alpha2.yaml @@ -0,0 +1,102 @@ +apiVersion: kops.k8s.io/v1alpha2 +kind: Cluster +metadata: + creationTimestamp: "2016-12-12T04:13:14Z" + name: privatecilium.example.com +spec: + kubernetesApiAccess: + - 0.0.0.0/0 + channel: stable + cloudProvider: aws + configBase: memfs://clusters.example.com/privatecilium.example.com + etcdClusters: + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: main + - etcdMembers: + - instanceGroup: master-us-test-1a + name: us-test-1a + name: events + iam: {} + kubelet: + anonymousAuth: false + kubernetesVersion: v1.21.0 + masterPublicName: api.privatecilium.example.com + networkCIDR: 172.20.0.0/16 + networking: + cilium: + ipam: "eni" + nonMasqueradeCIDR: 100.64.0.0/10 + sshAccess: + - 0.0.0.0/0 + topology: + masters: private + nodes: private + subnets: + - cidr: 172.20.32.0/19 + name: us-test-1a + type: Private + zone: us-test-1a + - cidr: 172.20.4.0/22 + name: utility-us-test-1a + type: Utility + zone: us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-12T04:13:15Z" + name: master-us-test-1a + labels: + kops.k8s.io/cluster: privatecilium.example.com +spec: + associatePublicIp: true + image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 + machineType: m3.medium + maxSize: 1 + minSize: 1 + role: Master + subnets: + - us-test-1a + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-12T04:13:15Z" + name: nodes + labels: + kops.k8s.io/cluster: privatecilium.example.com +spec: + associatePublicIp: true + image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 + machineType: t2.medium + maxSize: 2 + minSize: 2 + role: Node + subnets: + - us-test-1a + + +--- + +apiVersion: kops.k8s.io/v1alpha2 +kind: InstanceGroup +metadata: + creationTimestamp: "2016-12-14T15:32:41Z" + name: bastion + labels: + kops.k8s.io/cluster: privatecilium.example.com +spec: + associatePublicIp: true + image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404 + machineType: t2.micro + maxSize: 1 + minSize: 1 + role: Bastion + subnets: + - utility-us-test-1a diff --git a/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf b/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf new file mode 100644 index 0000000000..fdc469ac4e --- /dev/null +++ b/tests/integration/update_cluster/privatecilium-eni/kubernetes.tf @@ -0,0 +1,1265 @@ +locals { + bastion_autoscaling_group_ids = [aws_autoscaling_group.bastion-privatecilium-example-com.id] + bastion_security_group_ids = [aws_security_group.bastion-privatecilium-example-com.id] + bastions_role_arn = aws_iam_role.bastions-privatecilium-example-com.arn + bastions_role_name = aws_iam_role.bastions-privatecilium-example-com.name + cluster_name = "privatecilium.example.com" + master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-privatecilium-example-com.id] + master_security_group_ids = [aws_security_group.masters-privatecilium-example-com.id] + masters_role_arn = aws_iam_role.masters-privatecilium-example-com.arn + masters_role_name = aws_iam_role.masters-privatecilium-example-com.name + node_autoscaling_group_ids = [aws_autoscaling_group.nodes-privatecilium-example-com.id] + node_security_group_ids = [aws_security_group.nodes-privatecilium-example-com.id] + node_subnet_ids = [aws_subnet.us-test-1a-privatecilium-example-com.id] + nodes_role_arn = aws_iam_role.nodes-privatecilium-example-com.arn + nodes_role_name = aws_iam_role.nodes-privatecilium-example-com.name + region = "us-test-1" + route_table_private-us-test-1a_id = aws_route_table.private-us-test-1a-privatecilium-example-com.id + route_table_public_id = aws_route_table.privatecilium-example-com.id + subnet_us-test-1a_id = aws_subnet.us-test-1a-privatecilium-example-com.id + subnet_utility-us-test-1a_id = aws_subnet.utility-us-test-1a-privatecilium-example-com.id + vpc_cidr_block = aws_vpc.privatecilium-example-com.cidr_block + vpc_id = aws_vpc.privatecilium-example-com.id +} + +output "bastion_autoscaling_group_ids" { + value = [aws_autoscaling_group.bastion-privatecilium-example-com.id] +} + +output "bastion_security_group_ids" { + value = [aws_security_group.bastion-privatecilium-example-com.id] +} + +output "bastions_role_arn" { + value = aws_iam_role.bastions-privatecilium-example-com.arn +} + +output "bastions_role_name" { + value = aws_iam_role.bastions-privatecilium-example-com.name +} + +output "cluster_name" { + value = "privatecilium.example.com" +} + +output "master_autoscaling_group_ids" { + value = [aws_autoscaling_group.master-us-test-1a-masters-privatecilium-example-com.id] +} + +output "master_security_group_ids" { + value = [aws_security_group.masters-privatecilium-example-com.id] +} + +output "masters_role_arn" { + value = aws_iam_role.masters-privatecilium-example-com.arn +} + +output "masters_role_name" { + value = aws_iam_role.masters-privatecilium-example-com.name +} + +output "node_autoscaling_group_ids" { + value = [aws_autoscaling_group.nodes-privatecilium-example-com.id] +} + +output "node_security_group_ids" { + value = [aws_security_group.nodes-privatecilium-example-com.id] +} + +output "node_subnet_ids" { + value = [aws_subnet.us-test-1a-privatecilium-example-com.id] +} + +output "nodes_role_arn" { + value = aws_iam_role.nodes-privatecilium-example-com.arn +} + +output "nodes_role_name" { + value = aws_iam_role.nodes-privatecilium-example-com.name +} + +output "region" { + value = "us-test-1" +} + +output "route_table_private-us-test-1a_id" { + value = aws_route_table.private-us-test-1a-privatecilium-example-com.id +} + +output "route_table_public_id" { + value = aws_route_table.privatecilium-example-com.id +} + +output "subnet_us-test-1a_id" { + value = aws_subnet.us-test-1a-privatecilium-example-com.id +} + +output "subnet_utility-us-test-1a_id" { + value = aws_subnet.utility-us-test-1a-privatecilium-example-com.id +} + +output "vpc_cidr_block" { + value = aws_vpc.privatecilium-example-com.cidr_block +} + +output "vpc_id" { + value = aws_vpc.privatecilium-example-com.id +} + +provider "aws" { + region = "us-test-1" +} + +provider "aws" { + alias = "files" + region = "us-test-1" +} + +resource "aws_autoscaling_group" "bastion-privatecilium-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.bastion-privatecilium-example-com.id + version = aws_launch_template.bastion-privatecilium-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "bastion.privatecilium.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "privatecilium.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "bastion.privatecilium.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/bastion" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "bastion" + } + tag { + key = "kubernetes.io/cluster/privatecilium.example.com" + propagate_at_launch = true + value = "owned" + } + target_group_arns = [aws_lb_target_group.bastion-privatecilium-exa-l2ms01.id] + vpc_zone_identifier = [aws_subnet.utility-us-test-1a-privatecilium-example-com.id] +} + +resource "aws_autoscaling_group" "master-us-test-1a-masters-privatecilium-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.master-us-test-1a-masters-privatecilium-example-com.id + version = aws_launch_template.master-us-test-1a-masters-privatecilium-example-com.latest_version + } + load_balancers = [aws_elb.api-privatecilium-example-com.id] + max_instance_lifetime = 0 + max_size = 1 + metrics_granularity = "1Minute" + min_size = 1 + name = "master-us-test-1a.masters.privatecilium.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "privatecilium.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "master-us-test-1a.masters.privatecilium.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "master" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/control-plane" + propagate_at_launch = true + value = "1" + } + tag { + key = "k8s.io/role/master" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "master-us-test-1a" + } + tag { + key = "kubernetes.io/cluster/privatecilium.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-privatecilium-example-com.id] +} + +resource "aws_autoscaling_group" "nodes-privatecilium-example-com" { + enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"] + launch_template { + id = aws_launch_template.nodes-privatecilium-example-com.id + version = aws_launch_template.nodes-privatecilium-example-com.latest_version + } + max_instance_lifetime = 0 + max_size = 2 + metrics_granularity = "1Minute" + min_size = 2 + name = "nodes.privatecilium.example.com" + protect_from_scale_in = false + tag { + key = "KubernetesCluster" + propagate_at_launch = true + value = "privatecilium.example.com" + } + tag { + key = "Name" + propagate_at_launch = true + value = "nodes.privatecilium.example.com" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" + propagate_at_launch = true + value = "node" + } + tag { + key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" + propagate_at_launch = true + value = "" + } + tag { + key = "k8s.io/role/node" + propagate_at_launch = true + value = "1" + } + tag { + key = "kops.k8s.io/instancegroup" + propagate_at_launch = true + value = "nodes" + } + tag { + key = "kubernetes.io/cluster/privatecilium.example.com" + propagate_at_launch = true + value = "owned" + } + vpc_zone_identifier = [aws_subnet.us-test-1a-privatecilium-example-com.id] +} + +resource "aws_ebs_volume" "us-test-1a-etcd-events-privatecilium-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "us-test-1a.etcd-events.privatecilium.example.com" + "k8s.io/etcd/events" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_ebs_volume" "us-test-1a-etcd-main-privatecilium-example-com" { + availability_zone = "us-test-1a" + encrypted = false + iops = 3000 + size = 20 + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "us-test-1a.etcd-main.privatecilium.example.com" + "k8s.io/etcd/main" = "us-test-1a/us-test-1a" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + throughput = 125 + type = "gp3" +} + +resource "aws_eip" "us-test-1a-privatecilium-example-com" { + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "us-test-1a.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + vpc = true +} + +resource "aws_elb" "api-privatecilium-example-com" { + connection_draining = true + connection_draining_timeout = 300 + cross_zone_load_balancing = false + health_check { + healthy_threshold = 2 + interval = 10 + target = "SSL:443" + timeout = 5 + unhealthy_threshold = 2 + } + idle_timeout = 300 + listener { + instance_port = 443 + instance_protocol = "TCP" + lb_port = 443 + lb_protocol = "TCP" + } + name = "api-privatecilium-example-fnt793" + security_groups = [aws_security_group.api-elb-privatecilium-example-com.id] + subnets = [aws_subnet.utility-us-test-1a-privatecilium-example-com.id] + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "api.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "bastions-privatecilium-example-com" { + name = "bastions.privatecilium.example.com" + role = aws_iam_role.bastions-privatecilium-example-com.name + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastions.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "masters-privatecilium-example-com" { + name = "masters.privatecilium.example.com" + role = aws_iam_role.masters-privatecilium-example-com.name + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "masters.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_iam_instance_profile" "nodes-privatecilium-example-com" { + name = "nodes.privatecilium.example.com" + role = aws_iam_role.nodes-privatecilium-example-com.name + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "nodes.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_iam_role" "bastions-privatecilium-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_bastions.privatecilium.example.com_policy") + name = "bastions.privatecilium.example.com" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastions.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_iam_role" "masters-privatecilium-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_masters.privatecilium.example.com_policy") + name = "masters.privatecilium.example.com" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "masters.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_iam_role" "nodes-privatecilium-example-com" { + assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.privatecilium.example.com_policy") + name = "nodes.privatecilium.example.com" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "nodes.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_iam_role_policy" "bastions-privatecilium-example-com" { + name = "bastions.privatecilium.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_bastions.privatecilium.example.com_policy") + role = aws_iam_role.bastions-privatecilium-example-com.name +} + +resource "aws_iam_role_policy" "masters-privatecilium-example-com" { + name = "masters.privatecilium.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_masters.privatecilium.example.com_policy") + role = aws_iam_role.masters-privatecilium-example-com.name +} + +resource "aws_iam_role_policy" "nodes-privatecilium-example-com" { + name = "nodes.privatecilium.example.com" + policy = file("${path.module}/data/aws_iam_role_policy_nodes.privatecilium.example.com_policy") + role = aws_iam_role.nodes-privatecilium-example-com.name +} + +resource "aws_internet_gateway" "privatecilium-example-com" { + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_key_pair" "kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" { + key_name = "kubernetes.privatecilium.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57" + public_key = file("${path.module}/data/aws_key_pair_kubernetes.privatecilium.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key") + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_launch_template" "bastion-privatecilium-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 32 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.bastions-privatecilium-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.micro" + key_name = aws_key_pair.kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "bastion.privatecilium.example.com" + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.bastion-privatecilium-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastion.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/bastion" = "1" + "kops.k8s.io/instancegroup" = "bastion" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastion.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/bastion" = "1" + "kops.k8s.io/instancegroup" = "bastion" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastion.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/bastion" = "1" + "kops.k8s.io/instancegroup" = "bastion" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_launch_template" "master-us-test-1a-masters-privatecilium-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 64 + volume_type = "gp3" + } + } + block_device_mappings { + device_name = "/dev/sdc" + virtual_name = "ephemeral0" + } + iam_instance_profile { + name = aws_iam_instance_profile.masters-privatecilium-example-com.id + } + image_id = "ami-12345678" + instance_type = "m3.medium" + key_name = aws_key_pair.kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "master-us-test-1a.masters.privatecilium.example.com" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.masters-privatecilium-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "master-us-test-1a.masters.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "master-us-test-1a.masters.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "master-us-test-1a.masters.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = "" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "master" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = "" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/master" = "" + "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = "" + "k8s.io/role/control-plane" = "1" + "k8s.io/role/master" = "1" + "kops.k8s.io/instancegroup" = "master-us-test-1a" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.privatecilium.example.com_user_data") +} + +resource "aws_launch_template" "nodes-privatecilium-example-com" { + block_device_mappings { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + encrypted = true + iops = 3000 + throughput = 125 + volume_size = 128 + volume_type = "gp3" + } + } + iam_instance_profile { + name = aws_iam_instance_profile.nodes-privatecilium-example-com.id + } + image_id = "ami-12345678" + instance_type = "t2.medium" + key_name = aws_key_pair.kubernetes-privatecilium-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id + lifecycle { + create_before_destroy = true + } + metadata_options { + http_endpoint = "enabled" + http_protocol_ipv6 = "disabled" + http_put_response_hop_limit = 1 + http_tokens = "optional" + } + monitoring { + enabled = false + } + name = "nodes.privatecilium.example.com" + network_interfaces { + associate_public_ip_address = false + delete_on_termination = true + ipv6_address_count = 0 + security_groups = [aws_security_group.nodes-privatecilium-example-com.id] + } + tag_specifications { + resource_type = "instance" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "nodes.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + } + tag_specifications { + resource_type = "volume" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "nodes.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + } + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "nodes.privatecilium.example.com" + "k8s.io/cluster-autoscaler/node-template/label/kubernetes.io/role" = "node" + "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = "" + "k8s.io/role/node" = "1" + "kops.k8s.io/instancegroup" = "nodes" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + user_data = filebase64("${path.module}/data/aws_launch_template_nodes.privatecilium.example.com_user_data") +} + +resource "aws_lb" "bastion-privatecilium-example-com" { + enable_cross_zone_load_balancing = false + internal = false + load_balancer_type = "network" + name = "bastion-privatecilium-exa-l2ms01" + subnet_mapping { + subnet_id = aws_subnet.utility-us-test-1a-privatecilium-example-com.id + } + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastion.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_lb_listener" "bastion-privatecilium-example-com-22" { + default_action { + target_group_arn = aws_lb_target_group.bastion-privatecilium-exa-l2ms01.id + type = "forward" + } + load_balancer_arn = aws_lb.bastion-privatecilium-example-com.id + port = 22 + protocol = "TCP" +} + +resource "aws_lb_target_group" "bastion-privatecilium-exa-l2ms01" { + health_check { + healthy_threshold = 2 + interval = 10 + protocol = "TCP" + unhealthy_threshold = 2 + } + name = "bastion-privatecilium-exa-l2ms01" + port = 22 + protocol = "TCP" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastion-privatecilium-exa-l2ms01" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_nat_gateway" "us-test-1a-privatecilium-example-com" { + allocation_id = aws_eip.us-test-1a-privatecilium-example-com.id + subnet_id = aws_subnet.utility-us-test-1a-privatecilium-example-com.id + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "us-test-1a.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_route" "route-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.privatecilium-example-com.id + route_table_id = aws_route_table.privatecilium-example-com.id +} + +resource "aws_route" "route-__--0" { + destination_ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.privatecilium-example-com.id + route_table_id = aws_route_table.privatecilium-example-com.id +} + +resource "aws_route" "route-private-us-test-1a-0-0-0-0--0" { + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.us-test-1a-privatecilium-example-com.id + route_table_id = aws_route_table.private-us-test-1a-privatecilium-example-com.id +} + +resource "aws_route53_record" "api-privatecilium-example-com" { + alias { + evaluate_target_health = false + name = aws_elb.api-privatecilium-example-com.dns_name + zone_id = aws_elb.api-privatecilium-example-com.zone_id + } + name = "api.privatecilium.example.com" + type = "A" + zone_id = "/hostedzone/Z1AFAKE1ZON3YO" +} + +resource "aws_route_table" "private-us-test-1a-privatecilium-example-com" { + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "private-us-test-1a.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + "kubernetes.io/kops/role" = "private-us-test-1a" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_route_table" "privatecilium-example-com" { + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + "kubernetes.io/kops/role" = "public" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_route_table_association" "private-us-test-1a-privatecilium-example-com" { + route_table_id = aws_route_table.private-us-test-1a-privatecilium-example-com.id + subnet_id = aws_subnet.us-test-1a-privatecilium-example-com.id +} + +resource "aws_route_table_association" "utility-us-test-1a-privatecilium-example-com" { + route_table_id = aws_route_table.privatecilium-example-com.id + subnet_id = aws_subnet.utility-us-test-1a-privatecilium-example-com.id +} + +resource "aws_s3_object" "cluster-completed-spec" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content") + key = "clusters.example.com/privatecilium.example.com/cluster-completed.spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-events" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content") + key = "clusters.example.com/privatecilium.example.com/backups/etcd/events/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "etcd-cluster-spec-main" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content") + key = "clusters.example.com/privatecilium.example.com/backups/etcd/main/control/etcd-cluster-spec" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "kops-version-txt" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_kops-version.txt_content") + key = "clusters.example.com/privatecilium.example.com/kops-version.txt" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content") + key = "clusters.example.com/privatecilium.example.com/manifests/etcd/events-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content") + key = "clusters.example.com/privatecilium.example.com/manifests/etcd/main-master-us-test-1a.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content") + key = "clusters.example.com/privatecilium.example.com/manifests/static/kube-apiserver-healthcheck.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-master-us-test-1a" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content") + key = "clusters.example.com/privatecilium.example.com/igconfig/control-plane/master-us-test-1a/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "nodeupconfig-nodes" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content") + key = "clusters.example.com/privatecilium.example.com/igconfig/node/nodes/nodeupconfig.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-bootstrap" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-bootstrap_content") + key = "clusters.example.com/privatecilium.example.com/addons/bootstrap-channel.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-coredns-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/privatecilium.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content") + key = "clusters.example.com/privatecilium.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content") + key = "clusters.example.com/privatecilium.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content") + key = "clusters.example.com/privatecilium.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-limit-range-addons-k8s-io" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-limit-range.addons.k8s.io_content") + key = "clusters.example.com/privatecilium.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-networking-cilium-io-k8s-1-16" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-networking.cilium.io-k8s-1.16_content") + key = "clusters.example.com/privatecilium.example.com/addons/networking.cilium.io/k8s-1.16-v1.11.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_s3_object" "privatecilium-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" { + bucket = "testingBucket" + content = file("${path.module}/data/aws_s3_object_privatecilium.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content") + key = "clusters.example.com/privatecilium.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml" + provider = aws.files + server_side_encryption = "AES256" +} + +resource "aws_security_group" "api-elb-privatecilium-example-com" { + description = "Security group for api ELB" + name = "api-elb.privatecilium.example.com" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "api-elb.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_security_group" "bastion-privatecilium-example-com" { + description = "Security group for bastion" + name = "bastion.privatecilium.example.com" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "bastion.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_security_group" "masters-privatecilium-example-com" { + description = "Security group for masters" + name = "masters.privatecilium.example.com" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "masters.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_security_group" "nodes-privatecilium-example-com" { + description = "Security group for nodes" + name = "nodes.privatecilium.example.com" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "nodes.privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-bastion-privatecilium-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-api-elb-privatecilium-example-com" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.api-elb-privatecilium-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "from-172-20-4-0--22-ingress-tcp-22to22-bastion-privatecilium-example-com" { + cidr_blocks = ["172.20.4.0/22"] + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-api-elb-privatecilium-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.api-elb-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-api-elb-privatecilium-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.api-elb-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-bastion-privatecilium-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-bastion-privatecilium-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-bastion-privatecilium-example-com-ingress-tcp-22to22-masters-privatecilium-example-com" { + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + source_security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-bastion-privatecilium-example-com-ingress-tcp-22to22-nodes-privatecilium-example-com" { + from_port = 22 + protocol = "tcp" + security_group_id = aws_security_group.nodes-privatecilium-example-com.id + source_security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 22 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-privatecilium-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-privatecilium-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-masters-privatecilium-example-com-ingress-all-0to0-masters-privatecilium-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + source_security_group_id = aws_security_group.masters-privatecilium-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-masters-privatecilium-example-com-ingress-all-0to0-nodes-privatecilium-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-privatecilium-example-com.id + source_security_group_id = aws_security_group.masters-privatecilium-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-privatecilium-example-com-egress-all-0to0-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-privatecilium-example-com-egress-all-0to0-__--0" { + from_port = 0 + ipv6_cidr_blocks = ["::/0"] + protocol = "-1" + security_group_id = aws_security_group.nodes-privatecilium-example-com.id + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "from-nodes-privatecilium-example-com-ingress-all-0to0-nodes-privatecilium-example-com" { + from_port = 0 + protocol = "-1" + security_group_id = aws_security_group.nodes-privatecilium-example-com.id + source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id + to_port = 0 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-privatecilium-example-com-ingress-tcp-1to2379-masters-privatecilium-example-com" { + from_port = 1 + protocol = "tcp" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id + to_port = 2379 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-privatecilium-example-com-ingress-tcp-2382to4000-masters-privatecilium-example-com" { + from_port = 2382 + protocol = "tcp" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id + to_port = 4000 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-privatecilium-example-com-ingress-tcp-4003to65535-masters-privatecilium-example-com" { + from_port = 4003 + protocol = "tcp" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "from-nodes-privatecilium-example-com-ingress-udp-1to65535-masters-privatecilium-example-com" { + from_port = 1 + protocol = "udp" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + source_security_group_id = aws_security_group.nodes-privatecilium-example-com.id + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "https-elb-to-master" { + from_port = 443 + protocol = "tcp" + security_group_id = aws_security_group.masters-privatecilium-example-com.id + source_security_group_id = aws_security_group.api-elb-privatecilium-example-com.id + to_port = 443 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-api-elb-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.api-elb-privatecilium-example-com.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-ssh-nlb-0-0-0-0--0" { + cidr_blocks = ["0.0.0.0/0"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 4 + type = "ingress" +} + +resource "aws_security_group_rule" "icmp-pmtu-ssh-nlb-172-20-4-0--22" { + cidr_blocks = ["172.20.4.0/22"] + from_port = 3 + protocol = "icmp" + security_group_id = aws_security_group.bastion-privatecilium-example-com.id + to_port = 4 + type = "ingress" +} + +resource "aws_subnet" "us-test-1a-privatecilium-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.32.0/19" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "us-test-1a.privatecilium.example.com" + "SubnetType" = "Private" + "kops.k8s.io/instance-group/master-us-test-1a" = "true" + "kops.k8s.io/instance-group/nodes" = "true" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + "kubernetes.io/role/internal-elb" = "1" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_subnet" "utility-us-test-1a-privatecilium-example-com" { + availability_zone = "us-test-1a" + cidr_block = "172.20.4.0/22" + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "utility-us-test-1a.privatecilium.example.com" + "SubnetType" = "Utility" + "kops.k8s.io/instance-group/bastion" = "true" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + "kubernetes.io/role/elb" = "1" + } + vpc_id = aws_vpc.privatecilium-example-com.id +} + +resource "aws_vpc" "privatecilium-example-com" { + assign_generated_ipv6_cidr_block = true + cidr_block = "172.20.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options" "privatecilium-example-com" { + domain_name = "us-test-1.compute.internal" + domain_name_servers = ["AmazonProvidedDNS"] + tags = { + "KubernetesCluster" = "privatecilium.example.com" + "Name" = "privatecilium.example.com" + "kubernetes.io/cluster/privatecilium.example.com" = "owned" + } +} + +resource "aws_vpc_dhcp_options_association" "privatecilium-example-com" { + dhcp_options_id = aws_vpc_dhcp_options.privatecilium-example-com.id + vpc_id = aws_vpc.privatecilium-example-com.id +} + +terraform { + required_version = ">= 0.15.0" + required_providers { + aws = { + "configuration_aliases" = [aws.files] + "source" = "hashicorp/aws" + "version" = ">= 4.0.0" + } + } +} diff --git a/upup/pkg/fi/cloudup/new_cluster.go b/upup/pkg/fi/cloudup/new_cluster.go index e07e2990cb..6d36ec4be9 100644 --- a/upup/pkg/fi/cloudup/new_cluster.go +++ b/upup/pkg/fi/cloudup/new_cluster.go @@ -1141,6 +1141,8 @@ func setupNetworking(opt *NewClusterOptions, cluster *api.Cluster) error { case "cilium-etcd": addCiliumNetwork(cluster) cluster.Spec.Networking.Cilium.EtcdManaged = true + case "cilium-eni": + addCiliumNetwork(cluster) case "gce": cluster.Spec.Networking.GCE = &api.GCENetworkingSpec{} default: