chore: Remove support for Kubernetes 1.28

Signed-off-by: Ciprian Hacman <ciprian@hakman.dev>
This commit is contained in:
Ciprian Hacman 2025-08-30 15:19:59 +03:00
parent adfdc7797e
commit d3b5bfceff
126 changed files with 19 additions and 21878 deletions

View File

@ -46,8 +46,6 @@ var MagicTimestamp = metav1.Time{Time: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UT
// TestCreateClusterMinimal runs kops create cluster minimal.example.com --zones us-test-1a
func TestCreateClusterMinimal(t *testing.T) {
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.27", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.28", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.29", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.30", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.31", "v1alpha2")

View File

@ -246,28 +246,6 @@ func TestMinimalAWS(t *testing.T) {
runTestTerraformAWS(t)
}
// TestMinimal runs the test on a minimum configuration
func TestMinimal_v1_27(t *testing.T) {
newIntegrationTest("minimal.example.com", "minimal-1.27").
withAddons(
awsEBSCSIAddon,
dnsControllerAddon,
awsCCMAddon,
).
runTestTerraformAWS(t)
}
// TestMinimal runs the test on a minimum configuration
func TestMinimal_v1_28(t *testing.T) {
newIntegrationTest("minimal.example.com", "minimal-1.28").
withAddons(
awsEBSCSIAddon,
dnsControllerAddon,
awsCCMAddon,
).
runTestTerraformAWS(t)
}
// TestMinimal runs the test on a minimum configuration
func TestMinimal_v1_29(t *testing.T) {
newIntegrationTest("minimal.example.com", "minimal-1.29").
@ -722,15 +700,6 @@ func TestPrivateCiliumENI(t *testing.T) {
runTestTerraformAWS(t)
}
// TestPrivateCanal runs the test on a configuration with private topology, canal networking
func TestPrivateCanal(t *testing.T) {
newIntegrationTest("privatecanal.example.com", "privatecanal").
withPrivate().
withDefaultAddons30().
withAddons(canalAddon).
runTestTerraformAWS(t)
}
const kopeioNetworkingAddon = "networking.kope.io-k8s-1.12"
// TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking

View File

@ -48,7 +48,7 @@ func TestTaintsApplied(t *testing.T) {
expectTaints []string
}{
{
version: "1.28.0",
version: "1.29.0",
taints: []string{"foo", "bar", "baz"},
expectTaints: []string{"foo", "bar", "baz", "node-role.kubernetes.io/control-plane=:NoSchedule"},
},

View File

@ -73,7 +73,7 @@ func UseExternalKubeletCredentialProvider(k8sVersion *KubernetesVersion, cloudPr
case kops.CloudProviderGCE:
return k8sVersion.IsGTE("1.29")
case kops.CloudProviderAWS:
return k8sVersion.IsGTE("1.27")
return true
default:
return false
}

View File

@ -1127,11 +1127,7 @@ func validateNetworking(cluster *kops.Cluster, v *kops.NetworkingSpec, fldPath *
}
if v.Canal != nil {
if cluster.IsKubernetesGTE("1.28") {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("canal"), "Canal is not supported for Kubernetes >= 1.28"))
} else {
allErrs = append(allErrs, validateNetworkingCanal(cluster, v.Canal, fldPath.Child("canal"))...)
}
allErrs = append(allErrs, field.Forbidden(fldPath.Child("canal"), "Canal is not supported for Kubernetes >= 1.28"))
}
if v.KubeRouter != nil {

View File

@ -379,7 +379,7 @@ func TestValidateKubeControllermanager(t *testing.T) {
if g.Cluster == nil {
g.Cluster = &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "1.28.0",
KubernetesVersion: "1.29.0",
},
}
}
@ -436,7 +436,7 @@ func Test_Validate_Networking_Flannel(t *testing.T) {
for _, g := range grid {
cluster := &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "1.27.0",
KubernetesVersion: "1.29.0",
Networking: kops.NetworkingSpec{
NetworkCIDR: "10.0.0.0/8",
NonMasqueradeCIDR: "100.64.0.0/10",
@ -502,7 +502,7 @@ func Test_Validate_Networking_Kindnet(t *testing.T) {
for _, g := range grid {
cluster := &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "1.27.0",
KubernetesVersion: "1.29.0",
Networking: kops.NetworkingSpec{
NetworkCIDR: "10.0.0.0/8",
NonMasqueradeCIDR: "100.64.0.0/10",
@ -596,7 +596,7 @@ func Test_Validate_Networking_OverlappingCIDR(t *testing.T) {
t.Run(g.Name, func(t *testing.T) {
cluster := &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "1.27.0",
KubernetesVersion: "1.29.0",
},
}
cluster.Spec.Networking = g.Networking

View File

@ -307,8 +307,6 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.CloudupMode
if ig.Spec.InstanceMetadata != nil && ig.Spec.InstanceMetadata.HTTPTokens != nil {
lt.HTTPTokens = fi.PtrTo(ec2types.LaunchTemplateHttpTokensState(fi.ValueOf(ig.Spec.InstanceMetadata.HTTPTokens)))
} else if b.IsKubernetesLT("1.27") {
lt.HTTPTokens = fi.PtrTo(ec2types.LaunchTemplateHttpTokensStateOptional)
}
switch rootVolumeType {

View File

@ -194,10 +194,6 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(cluster *kops.Cluster) error
if _, found := c.FeatureGates["InTreePluginAWSUnregister"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.31") {
c.FeatureGates["InTreePluginAWSUnregister"] = "true"
}
if _, found := c.FeatureGates["CSIMigrationAWS"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.27") {
c.FeatureGates["CSIMigrationAWS"] = "true"
}
}
return nil

View File

@ -42,11 +42,6 @@ func (b *ContainerdOptionsBuilder) BuildOptions(o *kops.Cluster) error {
// Set version based on Kubernetes version
if fi.ValueOf(containerd.Version) == "" {
switch {
case b.IsKubernetesLT("1.27.2"):
containerd.Version = fi.PtrTo("1.6.20")
containerd.Runc = &kops.Runc{
Version: fi.PtrTo("1.1.5"),
}
case b.IsKubernetesLT("1.32"):
containerd.Version = fi.PtrTo("1.7.28")
containerd.Runc = &kops.Runc{

View File

@ -161,10 +161,6 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o *kops.Cluster) erro
if _, found := kcm.FeatureGates["InTreePluginAWSUnregister"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.31") {
kcm.FeatureGates["InTreePluginAWSUnregister"] = "true"
}
if _, found := kcm.FeatureGates["CSIMigrationAWS"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.27") {
kcm.FeatureGates["CSIMigrationAWS"] = "true"
}
}
return nil

View File

@ -183,10 +183,6 @@ func (b *KubeletOptionsBuilder) configureKubelet(cluster *kops.Cluster, kubelet
if _, found := kubelet.FeatureGates["InTreePluginAWSUnregister"]; !found && kubernetesVersion.IsLT("1.31") {
kubelet.FeatureGates["InTreePluginAWSUnregister"] = "true"
}
if _, found := kubelet.FeatureGates["CSIMigrationAWS"]; !found && kubernetesVersion.IsLT("1.27") {
kubelet.FeatureGates["CSIMigrationAWS"] = "true"
}
}
// Set systemd as the default cgroup driver for kubelet

View File

@ -66,10 +66,6 @@ func (b *KubeSchedulerOptionsBuilder) BuildOptions(o *kops.Cluster) error {
if _, found := config.FeatureGates["InTreePluginAWSUnregister"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.31") {
config.FeatureGates["InTreePluginAWSUnregister"] = "true"
}
if _, found := config.FeatureGates["CSIMigrationAWS"]; !found && b.ControlPlaneKubernetesVersion().IsLT("1.27") {
config.FeatureGates["CSIMigrationAWS"] = "true"
}
}
return nil
}

View File

@ -95,12 +95,7 @@ func FindCNIAssets(ig model.InstanceGroup, assetBuilder *assets.AssetBuilder, ar
cniAssetURL = defaultCNIAssetAmd64K8s_30
case ig.KubernetesVersion().IsGTE("1.29"):
cniAssetURL = defaultCNIAssetAmd64K8s_29
case ig.KubernetesVersion().IsGTE("1.27"):
cniAssetURL = defaultCNIAssetAmd64K8s_27
default:
cniAssetURL = defaultCNIAssetAmd64K8s_22
}
klog.V(2).Infof("Adding default ARM64 CNI plugin binaries asset: %s", cniAssetURL)
case architectures.ArchitectureArm64:
switch {
case ig.KubernetesVersion().IsGTE("1.32"):
@ -111,16 +106,17 @@ func FindCNIAssets(ig model.InstanceGroup, assetBuilder *assets.AssetBuilder, ar
cniAssetURL = defaultCNIAssetArm64K8s_30
case ig.KubernetesVersion().IsGTE("1.29"):
cniAssetURL = defaultCNIAssetArm64K8s_29
case ig.KubernetesVersion().IsGTE("1.27"):
cniAssetURL = defaultCNIAssetArm64K8s_27
default:
cniAssetURL = defaultCNIAssetArm64K8s_22
}
klog.V(2).Infof("Adding default AMD64 CNI plugin binaries asset: %s", cniAssetURL)
default:
return nil, fmt.Errorf("unknown arch for CNI plugin binaries asset: %s", arch)
}
if cniAssetURL == "" {
return nil, fmt.Errorf("unknown CNI plugin binaries asset: %s", arch)
} else {
klog.V(2).Infof("Adding CNI plugin binaries asset: %s", cniAssetURL)
}
u, err := url.Parse(cniAssetURL)
if err != nil {
return nil, fmt.Errorf("unable to parse CNI plugin binaries asset URL %q: %v", cniAssetURL, err)

View File

@ -59,12 +59,12 @@ func Test_FindCNIAssetFromEnvironmentVariable(t *testing.T) {
}
}
func Test_FindCNIAssetFromDefaults122(t *testing.T) {
desiredCNIVersionURL := "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz"
desiredCNIVersionHash := "sha256:962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7"
func Test_FindCNIAssetFromDefaults134(t *testing.T) {
desiredCNIVersionURL := "https://storage.googleapis.com/k8s-artifacts-cni/release/v1.6.1/cni-plugins-linux-amd64-v1.6.1.tgz"
desiredCNIVersionHash := "sha256:2503ce29ac445715ebe146073f45468153f9e28f45fa173cb060cfd9e735f563"
cluster := &api.Cluster{}
cluster.Spec.KubernetesVersion = "v1.22.0"
cluster.Spec.KubernetesVersion = "v1.34.0"
ig := &api.InstanceGroup{}

View File

@ -33,12 +33,6 @@ func (t *Tester) setSkipRegexFlag() error {
return nil
}
kopsVersion, err := t.getKopsVersion()
if err != nil {
return err
}
isPre28 := kopsVersion < "1.28"
cluster, err := t.getKopsCluster()
if err != nil {
return err
@ -54,13 +48,6 @@ func (t *Tester) setSkipRegexFlag() error {
skipRegex += "|blackbox.*should.not.be.able.to.pull.image.from.invalid.registry"
skipRegex += "|blackbox.*should.be.able.to.pull.from.private.registry.with.secret"
if !isPre28 {
// K8s 1.28 promoted ProxyTerminatingEndpoints to GA, but it has limited CNI support
// https://github.com/kubernetes/kubernetes/pull/117718
// https://github.com/cilium/cilium/issues/27358
skipRegex += "|fallback.to.local.terminating.endpoints.when.there.are.no.ready.endpoints.with.externalTrafficPolicy.Local"
}
networking := cluster.Spec.LegacyNetworking
switch {
case networking.Kubenet != nil, networking.Canal != nil, networking.Cilium != nil:
@ -92,21 +79,6 @@ func (t *Tester) setSkipRegexFlag() error {
skipRegex += "|Services.should.implement.NodePort.and.HealthCheckNodePort.correctly.when.ExternalTrafficPolicy.changes"
}
if isPre28 {
// These may be fixed in Cilium 1.13 but skipping for now
skipRegex += "|Service.with.multiple.ports.specified.in.multiple.EndpointSlices"
// https://github.com/cilium/cilium/issues/18241
skipRegex += "|Services.should.create.endpoints.for.unready.pods"
skipRegex += "|Services.should.be.able.to.connect.to.terminating.and.unready.endpoints.if.PublishNotReadyAddresses.is.true"
}
if k8sVersion.Minor < 27 {
// Partially implemented in Cilium 1.13 but kops doesn't enable it
// Ref: https://github.com/cilium/cilium/pull/20033
// K8s 1.27+ added [Serial] to the test case, which is skipped by default
// Ref: https://github.com/kubernetes/kubernetes/pull/113335
skipRegex += "|should.create.a.Pod.with.SCTP.HostPort"
}
if k8sVersion.Minor < 35 {
// < 35 so we revisit this in future
// This test checks for kube-proxy on port 10249 (`127.0.0.1:10249/proxyMode`)

View File

@ -1,94 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
loadBalancer:
class: Network
type: Public
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/minimal.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.27.0
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.0.0/16
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: None
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: control-plane-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20250617
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: nodes-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20250617
machineType: t2.medium
maxSize: 1
minSize: 1
role: Node
subnets:
- us-test-1a

View File

@ -1,6 +0,0 @@
ClusterName: minimal.example.com
Zones:
- us-test-1a
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.27.0

View File

@ -1,94 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
loadBalancer:
class: Network
type: Public
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/minimal.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.28.0
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.0.0/16
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: None
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: control-plane-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20250617
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: nodes-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-amd64-server-20250617
machineType: t2.medium
maxSize: 1
minSize: 1
role: Node
subnets:
- us-test-1a

View File

@ -1,6 +0,0 @@
ClusterName: minimal.example.com
Zones:
- us-test-1a
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.28.0

View File

@ -1 +0,0 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -1 +0,0 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -1 +0,0 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -1 +0,0 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,285 +0,0 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-read-bucket/tests/minimal.example.com/*"
},
{
"Action": [
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/tests/minimal.example.com/backups/etcd/main/*"
},
{
"Action": [
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/tests/minimal.example.com/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-write-bucket"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:GetHostedZone",
"route53:ListResourceRecordSets"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com",
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:snapshot/*",
"arn:aws-test:ec2:*:*:volume/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:snapshot/*",
"arn:aws-test:ec2:*:*:volume/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com",
"ec2:CreateAction": [
"CreateSecurityGroup"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeScalingActivities",
"autoscaling:DescribeTags",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeImages",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"ec2:GetInstanceTypesFromInstanceRequirements",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroupAttributes",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"iam:CreateServiceLinkedRole",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:CreateGrant",
"kms:Decrypt",
"kms:DescribeKey",
"kms:Encrypt",
"kms:GenerateDataKey*",
"kms:GenerateRandom",
"kms:ReEncrypt*",
"sqs:DeleteMessage",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:ModifyTargetGroupAttributes",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "ec2:CreateSecurityGroup",
"Effect": "Allow",
"Resource": "arn:aws-test:ec2:*:*:vpc/*"
}
],
"Version": "2012-10-17"
}

View File

@ -1,37 +0,0 @@
{
"Statement": [
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,134 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
echo "== Downloading $1 with hash $2 from $3 =="
local -r file="$1"
local -r hash="$2"
local -a urls
IFS=, read -r -a urls <<< "$3"
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O ${file} --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "== Downloading ${url} using ${cmd} =="
if ! (${cmd} "${url}"); then
echo "== Failed to download ${url} using ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Failed to validate hash for ${url} =="
rm -f "${file}"
else
echo "== Downloaded ${url} with hash ${hash} =="
return 0
fi
done
done
echo "== All downloads failed; sleeping before retrying =="
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum "${file}" | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "== Running nodeup =="
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured =="
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ClusterName: minimal.example.com
ConfigBase: memfs://tests/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: Sa/hgUyUuopO4NABDhnhbu5FTQOM6uffmdWade1vbVw=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,157 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
echo "== Downloading $1 with hash $2 from $3 =="
local -r file="$1"
local -r hash="$2"
local -a urls
IFS=, read -r -a urls <<< "$3"
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O ${file} --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "== Downloading ${url} using ${cmd} =="
if ! (${cmd} "${url}"); then
echo "== Failed to download ${url} using ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Failed to validate hash for ${url} =="
rm -f "${file}"
else
echo "== Downloaded ${url} with hash ${hash} =="
return 0
fi
done
done
echo "== All downloads failed; sleeping before retrying =="
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum "${file}" | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "== Running nodeup =="
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured =="
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ClusterName: minimal.example.com
ConfigServer:
CACertificates: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
servers:
- https://kops-controller.internal.minimal.example.com:3988/
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: 4saNjAnGATsLWDIyb+PJfD0iv7Uryq6SEaY0x/JwRq8=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,225 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
version: v1.47.0
manageStorageClasses: true
cloudControllerManager:
allocateNodeCIDRs: true
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.27.9
leaderElection:
leaderElect: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: memfs://tests/minimal.example.com
containerd:
logLevel: info
runc:
version: 1.3.0
version: 1.7.28
dnsZone: Z1AFAKE1ZON3YO
etcdClusters:
- backups:
backupStore: memfs://tests/minimal.example.com/backups/etcd/main
cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: main
version: 3.5.21
- backups:
backupStore: memfs://tests/minimal.example.com/backups/etcd/events
cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: events
version: 3.5.21
externalDns:
provider: dns-controller
iam:
allowContainerRegistry: true
legacy: false
keyStore: memfs://tests/minimal.example.com/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: external
enableAdmissionPlugins:
- DefaultStorageClass
- DefaultTolerationSeconds
- LimitRanger
- MutatingAdmissionWebhook
- NamespaceLifecycle
- NodeRestriction
- ResourceQuota
- RuntimeClass
- ServiceAccount
- ValidatingAdmissionPolicy
- ValidatingAdmissionWebhook
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.27.2
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: external
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.27.2
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
image: registry.k8s.io/dns/k8s-dns-node-cache:1.26.0
memoryRequest: 5Mi
provider: CoreDNS
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.27.2
logLevel: 2
kubeScheduler:
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.27.2
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: 1.27.2
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nodeTerminationHandler:
cpuRequest: 50m
deleteSQSMsgIfNodeNotFound: false
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableScheduledEventDraining: true
enableSpotInterruptionDraining: true
enabled: true
excludeFromLoadBalancers: true
managedASGTag: aws-node-termination-handler/managed
memoryRequest: 64Mi
podTerminationGracePeriod: -1
prometheusEnable: false
taintNode: false
version: v1.22.0
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://tests/minimal.example.com/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public

View File

@ -1,4 +0,0 @@
{
"memberCount": 1,
"etcdVersion": "3.5.21"
}

View File

@ -1,4 +0,0 @@
{
"memberCount": 1,
"etcdVersion": "3.5.21"
}

View File

@ -1,138 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /ko-app/etcd-manager
--backup-store=memfs://tests/minimal.example.com/backups/etcd/events --client-urls=https://__name__:4002
--cluster-name=etcd-events --containerized=true --dns-suffix=.internal.minimal.example.com
--grpc-port=3997 --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION
value: 90d
image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20250803
name: etcd-manager
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /opt
name: opt
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
initContainers:
- args:
- --target-dir=/opt/kops-utils/
- --src=/ko-app/kops-utils-cp
command:
- /ko-app/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: kops-utils-cp
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.4.13
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.4.13
name: init-etcd-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.5.21
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.5.21
name: init-etcd-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.4.3
- --src=/opt/etcd-v3.4.13/etcd
- --src=/opt/etcd-v3.4.13/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.5.0
- --target-dir=/opt/etcd-v3.5.1
- --target-dir=/opt/etcd-v3.5.13
- --target-dir=/opt/etcd-v3.5.17
- --target-dir=/opt/etcd-v3.5.3
- --target-dir=/opt/etcd-v3.5.4
- --target-dir=/opt/etcd-v3.5.6
- --target-dir=/opt/etcd-v3.5.7
- --target-dir=/opt/etcd-v3.5.9
- --src=/opt/etcd-v3.5.21/etcd
- --src=/opt/etcd-v3.5.21/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- emptyDir: {}
name: opt
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -1,138 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /ko-app/etcd-manager
--backup-store=memfs://tests/minimal.example.com/backups/etcd/main --client-urls=https://__name__:4001
--cluster-name=etcd --containerized=true --dns-suffix=.internal.minimal.example.com
--grpc-port=3996 --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION
value: 90d
image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20250803
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /opt
name: opt
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
initContainers:
- args:
- --target-dir=/opt/kops-utils/
- --src=/ko-app/kops-utils-cp
command:
- /ko-app/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: kops-utils-cp
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.4.13
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.4.13
name: init-etcd-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.5.21
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.5.21
name: init-etcd-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.4.3
- --src=/opt/etcd-v3.4.13/etcd
- --src=/opt/etcd-v3.4.13/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.5.0
- --target-dir=/opt/etcd-v3.5.1
- --target-dir=/opt/etcd-v3.5.13
- --target-dir=/opt/etcd-v3.5.17
- --target-dir=/opt/etcd-v3.5.3
- --target-dir=/opt/etcd-v3.5.4
- --target-dir=/opt/etcd-v3.5.6
- --target-dir=/opt/etcd-v3.5.7
- --target-dir=/opt/etcd-v3.5.9
- --src=/opt/etcd-v3.5.21/etcd
- --src=/opt/etcd-v3.5.21/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- emptyDir: {}
name: opt
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.34.0-alpha.1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
securityContext:
runAsNonRoot: true
runAsUser: 10012
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@ -1,237 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
k8s-app: aws-cloud-controller-manager
name: aws-cloud-controller-manager
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: aws-cloud-controller-manager
template:
metadata:
creationTimestamp: null
labels:
k8s-app: aws-cloud-controller-manager
kops.k8s.io/managed-by: kops
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --allocate-node-cidrs=true
- --cluster-cidr=100.96.0.0/11
- --cluster-name=minimal.example.com
- --configure-cloud-routes=false
- --leader-elect=true
- --v=2
- --cloud-provider=aws
- --use-service-account-credentials=true
- --cloud-config=/etc/kubernetes/cloud.config
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.27.9
imagePullPolicy: IfNotPresent
name: aws-cloud-controller-manager
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes/cloud.config
name: cloudconfig
readOnly: true
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccountName: aws-cloud-controller-manager
tolerations:
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
- effect: NoSchedule
key: node.kubernetes.io/not-ready
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- hostPath:
path: /etc/kubernetes/cloud.config
type: ""
name: cloudconfig
updateStrategy:
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: aws-cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: cloud-controller-manager:apiserver-authentication-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: aws-cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: system:cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resourceNames:
- node-controller
- service-controller
- route-controller
resources:
- serviceaccounts/token
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- apiGroup: ""
kind: ServiceAccount
name: aws-cloud-controller-manager
namespace: kube-system

View File

@ -1,113 +0,0 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: 44cac7d5e9087cebd7acf1ef581425bbceb93a95b4b2d89d0cd3082a51085f71
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 776ca39fa0034ba09a4335cf3ee1bfa9c136407aaed07223555934e6907edd91
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
version: 9.99.0
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
version: 9.99.0
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 4547fd9281fdef75bb50e82a90136a721fe7bd01a42d58dbe837a422cf54466d
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: 1d0968eea99ca0d78400867a76af8b1dfe93ef2ff9640f0d755b21b2db7fec41
name: node-termination-handler.aws
prune:
kinds:
- kind: ConfigMap
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- kind: Service
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- kind: ServiceAccount
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: admissionregistration.k8s.io
kind: ValidatingWebhookConfiguration
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: ClusterRoleBinding
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: Role
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: RoleBinding
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
selector:
k8s-addon: node-termination-handler.aws
version: 9.99.0
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.18
manifest: aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml
manifestHash: fdcbb173585218f08cf29f1fe3ca94cdc47b8b85a0f722db8f16eb25dccc7e97
name: aws-cloud-controller.addons.k8s.io
selector:
k8s-addon: aws-cloud-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.17
manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml
manifestHash: 93c7269843ed2f8acef3f95774cf1f1d9851d88d157e0b0da04336741694393f
name: aws-ebs-csi-driver.addons.k8s.io
selector:
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
version: 9.99.0

View File

@ -1,383 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.11.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- configMap:
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
maxUnavailable: 50%
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
creationTimestamp: null
labels:
k8s-app: coredns-autoscaler
kops.k8s.io/managed-by: kops
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.9.0
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@ -1,138 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.34.0-alpha.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
kops.k8s.io/managed-by: kops
version: v1.34.0-alpha.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --internal-ipv4
- --zone=*/*
- -v=2
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/dns-controller:1.34.0-alpha.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -1,227 +0,0 @@
apiVersion: v1
data:
config.yaml: |
{"clusterName":"minimal.example.com","cloud":"aws","configBase":"memfs://tests/minimal.example.com","secretStore":"memfs://tests/minimal.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.34.0-alpha.1
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com
creationTimestamp: null
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
kops.k8s.io/managed-by: kops
version: v1.34.0-alpha.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
containers:
- args:
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KOPS_RUN_TOO_NEW_VERSION
value: "1"
image: registry.k8s.io/kops/kops-controller:1.34.0-alpha.1
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
runAsUser: 10011
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: kops-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@ -1,285 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
k8s-app: aws-node-termination-handler
name: aws-node-termination-handler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- get
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- extensions
resources:
- daemonsets
verbs:
- get
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aws-node-termination-handler
subjects:
- kind: ServiceAccount
name: aws-node-termination-handler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
k8s-app: aws-node-termination-handler
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kubernetes.io/os: linux
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
value: /healthz
- name: LOG_LEVEL
value: info
- name: JSON_LOGGING
value: "true"
- name: LOG_FORMAT_VERSION
value: "2"
- name: ENABLE_PROMETHEUS_SERVER
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
value: "false"
- name: TAINT_NODE
value: "false"
- name: EXCLUDE_FROM_LOAD_BALANCERS
value: "true"
- name: DELETE_LOCAL_DATA
value: "true"
- name: IGNORE_DAEMON_SETS
value: "true"
- name: POD_TERMINATION_GRACE_PERIOD
value: "-1"
- name: NODE_TERMINATION_GRACE_PERIOD
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: DELETE_SQS_MSG_IF_NODE_NOT_FOUND
value: "false"
- name: WORKERS
value: "10"
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.22.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -1,118 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-csi-1-21
parameters:
encrypted: "true"
type: gp3
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@ -1,332 +0,0 @@
APIServerConfig:
API:
dns: {}
publicName: api.minimal.example.com
ClusterDNSDomain: cluster.local
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: external
enableAdmissionPlugins:
- DefaultStorageClass
- DefaultTolerationSeconds
- LimitRanger
- MutatingAdmissionWebhook
- NamespaceLifecycle
- NodeRestriction
- ResourceQuota
- RuntimeClass
- ServiceAccount
- ValidatingAdmissionPolicy
- ValidatingAdmissionWebhook
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.27.2
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm
XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ==
-----END RSA PUBLIC KEY-----
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF
Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ==
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- a0d12afcab3b2836de4a427558d067bebdff040e9b306b0512c93d9d2a066579@https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubelet,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubelet
- 4f38ee903f35b300d3b005a9c6bfb9a46a57f92e89ae602ef9c129b91dc6c5a5@https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl
- 7644623e4ec9ad443ab352a8a5800a5180ee28741288be805286ba72bb8e7164@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/amd64/ecr-credential-provider-linux-amd64
- f3a841324845ca6bf0d4091b4fc7f97e18a623172158b72fc3fdcdb9d42d2d37@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz
- 7a8c262deb63becc877e82d23749e4f99f4a17e8e660f9b8c257ca87a5c056b6@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-amd64.tar.gz
- 028986516ab5646370edce981df2d8e8a8d12188deaf837142a02097000ae2f2@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.amd64
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- 810cd9a611e9f084e57c9ee466e33c324b2228d4249ff38c2588a0cc3224f10d@https://dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubelet,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubelet
- 1b0966692e398efe71fe59f913eaec44ffd4468cc1acd00bf91c29fa8ff8f578@https://dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubectl,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubectl
- 1980e3a038cb16da48a137743b31fb81de6c0b59fa06c206c2bc20ce0a52f849@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/arm64/ecr-credential-provider-linux-arm64
- 525e2b62ba92a1b6f3dc9612449a84aa61652e680f7ebf4eff579795fe464b57@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz
- 97457594ff8549cb82d664306593cafd3d2c781c706f9fffed885a46d8919bec@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-arm64.tar.gz
- 85c5e4e4f72e442c8c17bac07527cd4f961ee48e4f2b71797f7533c94f4a52b9@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.arm64
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX
DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX
WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk
CzMeMdr4
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX
DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN
QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW
HLtkTXH8
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx
NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY
qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx
NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E
YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co=
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN
MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H
g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6
CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O
sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs
GS/VUw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN
MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL
DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW
LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE
hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV
cPfVNg==
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm
ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx
GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu
Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP
vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP
DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9
t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY
xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O
Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB
DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW
03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh
cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI
J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3
MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA
aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf
OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt
uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3
MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt
naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC
qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K
G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo=
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
ControlPlaneConfig:
KubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: external
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.27.2
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
KubeScheduler:
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.27.2
leaderElection:
leaderElect: true
logLevel: 2
DNSZone: Z1AFAKE1ZON3YO
EtcdClusterNames:
- main
- events
FileAssets:
- content: |
apiVersion: kubescheduler.config.k8s.io/v1
clientConnection:
kubeconfig: /var/lib/kube-scheduler/kubeconfig
kind: KubeSchedulerConfiguration
path: /var/lib/kube-scheduler/config.yaml
Hooks:
- null
- null
InstallCNIAssets: true
KeypairIDs:
apiserver-aggregator-ca: "6980187172486667078076483355"
etcd-clients-ca: "6979622252718071085282986282"
etcd-manager-ca-events: "6982279354000777253151890266"
etcd-manager-ca-main: "6982279354000936168671127624"
etcd-peers-ca-events: "6982279353999767935825892873"
etcd-peers-ca-main: "6982279353998887468930183660"
kubernetes-ca: "6982820025135291416230495506"
service-account: "2"
KubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.27.2
logLevel: 2
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
nodeLabels:
kops.k8s.io/instancegroup: master-us-test-1a
kops.k8s.io/kops-controller-pki: ""
node-role.kubernetes.io/control-plane: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
taints:
- node-role.kubernetes.io/control-plane=:NoSchedule
KubernetesVersion: 1.27.2
Networking:
nonMasqueradeCIDR: 100.64.0.0/10
serviceClusterIPRange: 100.64.0.0/13
UpdatePolicy: automatic
channels:
- memfs://tests/minimal.example.com/addons/bootstrap-channel.yaml
configStore:
keypairs: memfs://tests/minimal.example.com/pki
secrets: memfs://tests/minimal.example.com/secrets
containerdConfig:
logLevel: info
runc:
version: 1.3.0
version: 1.7.28
etcdManifests:
- memfs://tests/minimal.example.com/manifests/etcd/main-master-us-test-1a.yaml
- memfs://tests/minimal.example.com/manifests/etcd/events-master-us-test-1a.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml
usesLegacyGossip: false
usesNoneDNS: false

View File

@ -1,62 +0,0 @@
Assets:
amd64:
- a0d12afcab3b2836de4a427558d067bebdff040e9b306b0512c93d9d2a066579@https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubelet,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubelet
- 4f38ee903f35b300d3b005a9c6bfb9a46a57f92e89ae602ef9c129b91dc6c5a5@https://dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/amd64/kubectl
- 7644623e4ec9ad443ab352a8a5800a5180ee28741288be805286ba72bb8e7164@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/amd64/ecr-credential-provider-linux-amd64
- f3a841324845ca6bf0d4091b4fc7f97e18a623172158b72fc3fdcdb9d42d2d37@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz
- 7a8c262deb63becc877e82d23749e4f99f4a17e8e660f9b8c257ca87a5c056b6@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-amd64.tar.gz
- 028986516ab5646370edce981df2d8e8a8d12188deaf837142a02097000ae2f2@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.amd64
arm64:
- 810cd9a611e9f084e57c9ee466e33c324b2228d4249ff38c2588a0cc3224f10d@https://dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubelet,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubelet
- 1b0966692e398efe71fe59f913eaec44ffd4468cc1acd00bf91c29fa8ff8f578@https://dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubectl,https://cdn.dl.k8s.io/release/v1.27.2/bin/linux/arm64/kubectl
- 1980e3a038cb16da48a137743b31fb81de6c0b59fa06c206c2bc20ce0a52f849@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/arm64/ecr-credential-provider-linux-arm64
- 525e2b62ba92a1b6f3dc9612449a84aa61652e680f7ebf4eff579795fe464b57@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz
- 97457594ff8549cb82d664306593cafd3d2c781c706f9fffed885a46d8919bec@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-arm64.tar.gz
- 85c5e4e4f72e442c8c17bac07527cd4f961ee48e4f2b71797f7533c94f4a52b9@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.arm64
CAs: {}
ClusterName: minimal.example.com
Hooks:
- null
- null
InstallCNIAssets: true
KeypairIDs:
kubernetes-ca: "6982820025135291416230495506"
KubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.27.2
logLevel: 2
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
nodeLabels:
kops.k8s.io/instancegroup: nodes-us-test-1a
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
KubernetesVersion: 1.27.2
Networking:
nonMasqueradeCIDR: 100.64.0.0/10
serviceClusterIPRange: 100.64.0.0/13
UpdatePolicy: automatic
containerdConfig:
logLevel: info
runc:
version: 1.3.0
version: 1.7.28
usesLegacyGossip: false
usesNoneDNS: false

View File

@ -1,16 +0,0 @@
{
"Statement": [
{
"Action": "sqs:SendMessage",
"Effect": "Allow",
"Principal": {
"Service": [
"events.amazonaws.com",
"sqs.amazonaws.com"
]
},
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}
],
"Version": "2012-10-17"
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,99 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/minimal.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.27.2
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: master-us-test-1a
spec:
image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: m3.medium
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: master-us-test-1a
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: nodes
spec:
image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404
instanceMetadata:
httpPutResponseHopLimit: 1
httpTokens: required
machineType: t2.medium
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: nodes-us-test-1a
role: Node
subnets:
- us-test-1a

View File

@ -1,986 +0,0 @@
locals {
cluster_name = "minimal.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
master_security_group_ids = [aws_security_group.masters-minimal-example-com.id]
masters_role_arn = aws_iam_role.masters-minimal-example-com.arn
masters_role_name = aws_iam_role.masters-minimal-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-example-com.id]
node_security_group_ids = [aws_security_group.nodes-minimal-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-minimal-example-com.id]
nodes_role_arn = aws_iam_role.nodes-minimal-example-com.arn
nodes_role_name = aws_iam_role.nodes-minimal-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.minimal-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-example-com.id
vpc_cidr_block = aws_vpc.minimal-example-com.cidr_block
vpc_id = aws_vpc.minimal-example-com.id
vpc_ipv6_cidr_block = aws_vpc.minimal-example-com.ipv6_cidr_block
vpc_ipv6_cidr_length = local.vpc_ipv6_cidr_block == "" ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0])
}
output "cluster_name" {
value = "minimal.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-minimal-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-minimal-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-minimal-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-minimal-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-minimal-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-minimal-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-minimal-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-minimal-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.minimal-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-minimal-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.minimal-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.minimal-example-com.id
}
output "vpc_ipv6_cidr_block" {
value = aws_vpc.minimal-example-com.ipv6_cidr_block
}
output "vpc_ipv6_cidr_length" {
value = local.vpc_ipv6_cidr_block == "" ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0])
}
provider "aws" {
region = "us-test-1"
}
provider "aws" {
alias = "files"
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-minimal-example-com.id
version = aws_launch_template.master-us-test-1a-masters-minimal-example-com.latest_version
}
max_instance_lifetime = 0
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/control-plane"
propagate_at_launch = true
value = "1"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_group" "nodes-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-minimal-example-com.id
version = aws_launch_template.nodes-minimal-example-com.latest_version
}
max_instance_lifetime = 0
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "nodes.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes-us-test-1a"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = true
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "a.etcd-events.minimal.example.com"
"k8s.io/etcd/events" = "a/a"
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_ebs_volume" "a-etcd-main-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = true
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "a.etcd-main.minimal.example.com"
"k8s.io/etcd/main" = "a/a"
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_iam_instance_profile" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
role = aws_iam_role.masters-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
role = aws_iam_role.nodes-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "masters-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal.example.com_policy")
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "nodes-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal.example.com_policy")
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role_policy" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal.example.com_policy")
role = aws_iam_role.masters-minimal-example-com.name
}
resource "aws_iam_role_policy" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal.example.com_policy")
role = aws_iam_role.nodes-minimal-example-com.name
}
resource "aws_internet_gateway" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_key_pair" "kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 64
volume_type = "gp3"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_protocol_ipv6 = "disabled"
http_put_response_hop_limit = 3
http_tokens = "required"
}
monitoring {
enabled = false
}
name = "master-us-test-1a.masters.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.masters-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data")
}
resource "aws_launch_template" "nodes-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 128
volume_type = "gp3"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_protocol_ipv6 = "disabled"
http_put_response_hop_limit = 1
http_tokens = "required"
}
monitoring {
enabled = false
}
name = "nodes.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.nodes-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "nodes-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "nodes-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "nodes-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal.example.com_user_data")
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route" "route-__--0" {
destination_ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route_table" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_route_table_association" "us-test-1a-minimal-example-com" {
route_table_id = aws_route_table.minimal-example-com.id
subnet_id = aws_subnet.us-test-1a-minimal-example-com.id
}
resource "aws_s3_object" "cluster-completed-spec" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content")
key = "tests/minimal.example.com/cluster-completed.spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "etcd-cluster-spec-events" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content")
key = "tests/minimal.example.com/backups/etcd/events/control/etcd-cluster-spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "etcd-cluster-spec-main" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content")
key = "tests/minimal.example.com/backups/etcd/main/control/etcd-cluster-spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "kops-version-txt" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_kops-version.txt_content")
key = "tests/minimal.example.com/kops-version.txt"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content")
key = "tests/minimal.example.com/manifests/etcd/events-master-us-test-1a.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content")
key = "tests/minimal.example.com/manifests/etcd/main-master-us-test-1a.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content")
key = "tests/minimal.example.com/manifests/static/kube-apiserver-healthcheck.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-aws-cloud-controller-addons-k8s-io-k8s-1-18" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content")
key = "tests/minimal.example.com/addons/aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-aws-ebs-csi-driver-addons-k8s-io-k8s-1-17" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content")
key = "tests/minimal.example.com/addons/aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-bootstrap" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-bootstrap_content")
key = "tests/minimal.example.com/addons/bootstrap-channel.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-coredns-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content")
key = "tests/minimal.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content")
key = "tests/minimal.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content")
key = "tests/minimal.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content")
key = "tests/minimal.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-limit-range.addons.k8s.io_content")
key = "tests/minimal.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-node-termination-handler-aws-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content")
key = "tests/minimal.example.com/addons/node-termination-handler.aws/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content")
key = "tests/minimal.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "nodeupconfig-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content")
key = "tests/minimal.example.com/igconfig/control-plane/master-us-test-1a/nodeupconfig.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "nodeupconfig-nodes" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content")
key = "tests/minimal.example.com/igconfig/node/nodes/nodeupconfig.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_security_group" "masters-minimal-example-com" {
description = "Security group for masters"
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group" "nodes-minimal-example-com" {
description = "Security group for nodes"
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-masters-minimal-example-com" {
from_port = 22
ipv6_cidr_blocks = ["::/0"]
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-nodes-minimal-example-com" {
from_port = 22
ipv6_cidr_blocks = ["::/0"]
protocol = "tcp"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-__--0-ingress-tcp-443to443-masters-minimal-example-com" {
from_port = 443
ipv6_cidr_blocks = ["::/0"]
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-masters-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-1to2379-masters-minimal-example-com" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-2382to4000-masters-minimal-example-com" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-4003to65535-masters-minimal-example-com" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1to65535-masters-minimal-example-com" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.minimal.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_vpc" "minimal-example-com" {
assign_generated_ipv6_cidr_block = true
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options" "minimal-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options_association" "minimal-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.minimal-example-com.id
vpc_id = aws_vpc.minimal-example-com.id
}
terraform {
required_version = ">= 0.15.0"
required_providers {
aws = {
"configuration_aliases" = [aws.files]
"source" = "hashicorp/aws"
"version" = ">= 5.0.0"
}
}
}

View File

@ -1 +0,0 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -1 +0,0 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -1 +0,0 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -1 +0,0 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,285 +0,0 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-read-bucket/tests/minimal.example.com/*"
},
{
"Action": [
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/tests/minimal.example.com/backups/etcd/main/*"
},
{
"Action": [
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/tests/minimal.example.com/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-write-bucket"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:GetHostedZone",
"route53:ListResourceRecordSets"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com",
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:snapshot/*",
"arn:aws-test:ec2:*:*:volume/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:snapshot/*",
"arn:aws-test:ec2:*:*:volume/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com",
"ec2:CreateAction": [
"CreateSecurityGroup"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeScalingActivities",
"autoscaling:DescribeTags",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeImages",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"ec2:GetInstanceTypesFromInstanceRequirements",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroupAttributes",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"iam:CreateServiceLinkedRole",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:CreateGrant",
"kms:Decrypt",
"kms:DescribeKey",
"kms:Encrypt",
"kms:GenerateDataKey*",
"kms:GenerateRandom",
"kms:ReEncrypt*",
"sqs:DeleteMessage",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:ModifyTargetGroupAttributes",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "ec2:CreateSecurityGroup",
"Effect": "Allow",
"Resource": "arn:aws-test:ec2:*:*:vpc/*"
}
],
"Version": "2012-10-17"
}

View File

@ -1,37 +0,0 @@
{
"Statement": [
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ecr:BatchCheckLayerAvailability",
"ecr:BatchGetImage",
"ecr:DescribeRepositories",
"ecr:GetAuthorizationToken",
"ecr:GetDownloadUrlForLayer",
"ecr:GetRepositoryPolicy",
"ecr:ListImages",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,134 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
echo "== Downloading $1 with hash $2 from $3 =="
local -r file="$1"
local -r hash="$2"
local -a urls
IFS=, read -r -a urls <<< "$3"
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O ${file} --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "== Downloading ${url} using ${cmd} =="
if ! (${cmd} "${url}"); then
echo "== Failed to download ${url} using ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Failed to validate hash for ${url} =="
rm -f "${file}"
else
echo "== Downloaded ${url} with hash ${hash} =="
return 0
fi
done
done
echo "== All downloads failed; sleeping before retrying =="
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum "${file}" | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "== Running nodeup =="
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured =="
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ClusterName: minimal.example.com
ConfigBase: memfs://tests/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: Sj8EDlrNAMivsWEzj3cc6cphoH6xBh7oL6QVgT2Iu/k=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,157 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
echo "== Downloading $1 with hash $2 from $3 =="
local -r file="$1"
local -r hash="$2"
local -a urls
IFS=, read -r -a urls <<< "$3"
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O ${file} --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "== Downloading ${url} using ${cmd} =="
if ! (${cmd} "${url}"); then
echo "== Failed to download ${url} using ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Failed to validate hash for ${url} =="
rm -f "${file}"
else
echo "== Downloaded ${url} with hash ${hash} =="
return 0
fi
done
done
echo "== All downloads failed; sleeping before retrying =="
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum "${file}" | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "== Running nodeup =="
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured =="
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ClusterName: minimal.example.com
ConfigServer:
CACertificates: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
servers:
- https://kops-controller.internal.minimal.example.com:3988/
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: 7mYrPiHlaO+JW9sKYSuvfF1BaoG/sIa0vSR7ZHk5uX0=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,225 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
version: v1.47.0
manageStorageClasses: true
cloudControllerManager:
allocateNodeCIDRs: true
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.9
leaderElection:
leaderElect: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: memfs://tests/minimal.example.com
containerd:
logLevel: info
runc:
version: 1.3.0
version: 1.7.28
dnsZone: Z1AFAKE1ZON3YO
etcdClusters:
- backups:
backupStore: memfs://tests/minimal.example.com/backups/etcd/main
cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: main
version: 3.5.21
- backups:
backupStore: memfs://tests/minimal.example.com/backups/etcd/events
cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
manager:
backupRetentionDays: 90
memoryRequest: 100Mi
name: events
version: 3.5.21
externalDns:
provider: dns-controller
iam:
allowContainerRegistry: true
legacy: false
keyStore: memfs://tests/minimal.example.com/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: external
enableAdmissionPlugins:
- DefaultStorageClass
- DefaultTolerationSeconds
- LimitRanger
- MutatingAdmissionWebhook
- NamespaceLifecycle
- NodeRestriction
- ResourceQuota
- RuntimeClass
- ServiceAccount
- ValidatingAdmissionPolicy
- ValidatingAdmissionWebhook
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.28.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: external
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.28.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
image: registry.k8s.io/dns/k8s-dns-node-cache:1.26.0
memoryRequest: 5Mi
provider: CoreDNS
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.28.0
logLevel: 2
kubeScheduler:
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.28.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: 1.28.0
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nodeTerminationHandler:
cpuRequest: 50m
deleteSQSMsgIfNodeNotFound: false
enableRebalanceDraining: false
enableRebalanceMonitoring: false
enableScheduledEventDraining: true
enableSpotInterruptionDraining: true
enabled: true
excludeFromLoadBalancers: true
managedASGTag: aws-node-termination-handler/managed
memoryRequest: 64Mi
podTerminationGracePeriod: -1
prometheusEnable: false
taintNode: false
version: v1.22.0
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://tests/minimal.example.com/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public

View File

@ -1,4 +0,0 @@
{
"memberCount": 1,
"etcdVersion": "3.5.21"
}

View File

@ -1,4 +0,0 @@
{
"memberCount": 1,
"etcdVersion": "3.5.21"
}

View File

@ -1,138 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /ko-app/etcd-manager
--backup-store=memfs://tests/minimal.example.com/backups/etcd/events --client-urls=https://__name__:4002
--cluster-name=etcd-events --containerized=true --dns-suffix=.internal.minimal.example.com
--grpc-port=3997 --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION
value: 90d
image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20250803
name: etcd-manager
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /opt
name: opt
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
initContainers:
- args:
- --target-dir=/opt/kops-utils/
- --src=/ko-app/kops-utils-cp
command:
- /ko-app/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: kops-utils-cp
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.4.13
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.4.13
name: init-etcd-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.5.21
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.5.21
name: init-etcd-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.4.3
- --src=/opt/etcd-v3.4.13/etcd
- --src=/opt/etcd-v3.4.13/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.5.0
- --target-dir=/opt/etcd-v3.5.1
- --target-dir=/opt/etcd-v3.5.13
- --target-dir=/opt/etcd-v3.5.17
- --target-dir=/opt/etcd-v3.5.3
- --target-dir=/opt/etcd-v3.5.4
- --target-dir=/opt/etcd-v3.5.6
- --target-dir=/opt/etcd-v3.5.7
- --target-dir=/opt/etcd-v3.5.9
- --src=/opt/etcd-v3.5.21/etcd
- --src=/opt/etcd-v3.5.21/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- emptyDir: {}
name: opt
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -1,138 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /ko-app/etcd-manager
--backup-store=memfs://tests/minimal.example.com/backups/etcd/main --client-urls=https://__name__:4001
--cluster-name=etcd --containerized=true --dns-suffix=.internal.minimal.example.com
--grpc-port=3996 --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/control-plane=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: ETCD_MANAGER_DAILY_BACKUPS_RETENTION
value: 90d
image: registry.k8s.io/etcd-manager/etcd-manager-slim:v3.0.20250803
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /opt
name: opt
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
initContainers:
- args:
- --target-dir=/opt/kops-utils/
- --src=/ko-app/kops-utils-cp
command:
- /ko-app/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: kops-utils-cp
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.4.13
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.4.13
name: init-etcd-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --target-dir=/opt/etcd-v3.5.21
- --src=/usr/local/bin/etcd
- --src=/usr/local/bin/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/etcd:v3.5.21
name: init-etcd-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.4.3
- --src=/opt/etcd-v3.4.13/etcd
- --src=/opt/etcd-v3.4.13/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-4-13
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
- args:
- --symlink
- --target-dir=/opt/etcd-v3.5.0
- --target-dir=/opt/etcd-v3.5.1
- --target-dir=/opt/etcd-v3.5.13
- --target-dir=/opt/etcd-v3.5.17
- --target-dir=/opt/etcd-v3.5.3
- --target-dir=/opt/etcd-v3.5.4
- --target-dir=/opt/etcd-v3.5.6
- --target-dir=/opt/etcd-v3.5.7
- --target-dir=/opt/etcd-v3.5.9
- --src=/opt/etcd-v3.5.21/etcd
- --src=/opt/etcd-v3.5.21/etcdctl
command:
- /opt/kops-utils/kops-utils-cp
image: registry.k8s.io/kops/kops-utils-cp:1.34.0-alpha.1
name: init-etcd-symlinks-3-5-21
resources: {}
volumeMounts:
- mountPath: /opt
name: opt
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- emptyDir: {}
name: opt
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -1,33 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.34.0-alpha.1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
securityContext:
runAsNonRoot: true
runAsUser: 10012
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@ -1,237 +0,0 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
k8s-app: aws-cloud-controller-manager
name: aws-cloud-controller-manager
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: aws-cloud-controller-manager
template:
metadata:
creationTimestamp: null
labels:
k8s-app: aws-cloud-controller-manager
kops.k8s.io/managed-by: kops
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --allocate-node-cidrs=true
- --cluster-cidr=100.96.0.0/11
- --cluster-name=minimal.example.com
- --configure-cloud-routes=false
- --leader-elect=true
- --v=2
- --cloud-provider=aws
- --use-service-account-credentials=true
- --cloud-config=/etc/kubernetes/cloud.config
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/provider-aws/cloud-controller-manager:v1.28.9
imagePullPolicy: IfNotPresent
name: aws-cloud-controller-manager
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes/cloud.config
name: cloudconfig
readOnly: true
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccountName: aws-cloud-controller-manager
tolerations:
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
- effect: NoSchedule
key: node.kubernetes.io/not-ready
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- hostPath:
path: /etc/kubernetes/cloud.config
type: ""
name: cloudconfig
updateStrategy:
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: aws-cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: cloud-controller-manager:apiserver-authentication-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: aws-cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: system:cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services/status
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- get
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resourceNames:
- node-controller
- service-controller
- route-controller
resources:
- serviceaccounts/token
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: aws-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: aws-cloud-controller.addons.k8s.io
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- apiGroup: ""
kind: ServiceAccount
name: aws-cloud-controller-manager
namespace: kube-system

View File

@ -1,113 +0,0 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: 44cac7d5e9087cebd7acf1ef581425bbceb93a95b4b2d89d0cd3082a51085f71
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 776ca39fa0034ba09a4335cf3ee1bfa9c136407aaed07223555934e6907edd91
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
version: 9.99.0
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
version: 9.99.0
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 4547fd9281fdef75bb50e82a90136a721fe7bd01a42d58dbe837a422cf54466d
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: node-termination-handler.aws/k8s-1.11.yaml
manifestHash: 1d0968eea99ca0d78400867a76af8b1dfe93ef2ff9640f0d755b21b2db7fec41
name: node-termination-handler.aws
prune:
kinds:
- kind: ConfigMap
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- kind: Service
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- kind: ServiceAccount
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: admissionregistration.k8s.io
kind: MutatingWebhookConfiguration
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: admissionregistration.k8s.io
kind: ValidatingWebhookConfiguration
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: ClusterRoleBinding
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: Role
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: RoleBinding
labelSelector: addon.kops.k8s.io/name=node-termination-handler.aws,app.kubernetes.io/managed-by=kops
selector:
k8s-addon: node-termination-handler.aws
version: 9.99.0
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 4e2cda50cd5048133aad1b5e28becb60f4629d3f9e09c514a2757c27998b4200
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.18
manifest: aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml
manifestHash: 0aeebf155056b98bdbf8be473e8b798eed3ca86cb94b806a12a55638b444a930
name: aws-cloud-controller.addons.k8s.io
selector:
k8s-addon: aws-cloud-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.17
manifest: aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml
manifestHash: 93c7269843ed2f8acef3f95774cf1f1d9851d88d157e0b0da04336741694393f
name: aws-ebs-csi-driver.addons.k8s.io
selector:
k8s-addon: aws-ebs-csi-driver.addons.k8s.io
version: 9.99.0

View File

@ -1,383 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.11.4
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- configMap:
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
maxUnavailable: 50%
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
creationTimestamp: null
labels:
k8s-app: coredns-autoscaler
kops.k8s.io/managed-by: kops
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: registry.k8s.io/cpa/cluster-proportional-autoscaler:v1.9.0
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@ -1,138 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.34.0-alpha.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
kops.k8s.io/managed-by: kops
version: v1.34.0-alpha.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --internal-ipv4
- --zone=*/*
- -v=2
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/dns-controller:1.34.0-alpha.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -1,227 +0,0 @@
apiVersion: v1
data:
config.yaml: |
{"clusterName":"minimal.example.com","cloud":"aws","configBase":"memfs://tests/minimal.example.com","secretStore":"memfs://tests/minimal.example.com/secrets","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.34.0-alpha.1
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com
creationTimestamp: null
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
kops.k8s.io/managed-by: kops
version: v1.34.0-alpha.1
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
containers:
- args:
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: KOPS_RUN_TOO_NEW_VERSION
value: "1"
image: registry.k8s.io/kops/kops-controller:1.34.0-alpha.1
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
runAsUser: 10011
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: kops-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@ -1,285 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
k8s-app: aws-node-termination-handler
name: aws-node-termination-handler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- get
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- extensions
resources:
- daemonsets
verbs:
- get
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- get
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aws-node-termination-handler
subjects:
- kind: ServiceAccount
name: aws-node-termination-handler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
app.kubernetes.io/part-of: aws-node-termination-handler
app.kubernetes.io/version: v1.22.0
k8s-addon: node-termination-handler.aws
k8s-app: aws-node-termination-handler
name: aws-node-termination-handler
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kubernetes.io/os: linux
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/component: deployment
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
k8s-app: aws-node-termination-handler
kops.k8s.io/managed-by: kops
kops.k8s.io/nth-mode: sqs
kubernetes.io/os: linux
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: ENABLE_PROBES_SERVER
value: "true"
- name: PROBES_SERVER_PORT
value: "8080"
- name: PROBES_SERVER_ENDPOINT
value: /healthz
- name: LOG_LEVEL
value: info
- name: JSON_LOGGING
value: "true"
- name: LOG_FORMAT_VERSION
value: "2"
- name: ENABLE_PROMETHEUS_SERVER
value: "false"
- name: PROMETHEUS_SERVER_PORT
value: "9092"
- name: CHECK_TAG_BEFORE_DRAINING
value: "true"
- name: MANAGED_TAG
value: aws-node-termination-handler/managed
- name: USE_PROVIDER_ID
value: "true"
- name: DRY_RUN
value: "false"
- name: CORDON_ONLY
value: "false"
- name: TAINT_NODE
value: "false"
- name: EXCLUDE_FROM_LOAD_BALANCERS
value: "true"
- name: DELETE_LOCAL_DATA
value: "true"
- name: IGNORE_DAEMON_SETS
value: "true"
- name: POD_TERMINATION_GRACE_PERIOD
value: "-1"
- name: NODE_TERMINATION_GRACE_PERIOD
value: "120"
- name: EMIT_KUBERNETES_EVENTS
value: "true"
- name: COMPLETE_LIFECYCLE_ACTION_DELAY_SECONDS
value: "-1"
- name: ENABLE_SQS_TERMINATION_DRAINING
value: "true"
- name: QUEUE_URL
value: https://sqs.us-test-1.amazonaws.com/123456789012/minimal-example-com-nth
- name: DELETE_SQS_MSG_IF_NODE_NOT_FOUND
value: "false"
- name: WORKERS
value: "10"
image: public.ecr.aws/aws-ec2/aws-node-termination-handler:v1.22.0
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
name: aws-node-termination-handler
ports:
- containerPort: 8080
name: liveness-probe
protocol: TCP
- containerPort: 9092
name: metrics
protocol: TCP
resources:
requests:
cpu: 50m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 1000
serviceAccountName: aws-node-termination-handler
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: node-termination-handler.aws
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: aws-node-termination-handler
k8s-addon: node-termination-handler.aws
name: aws-node-termination-handler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
app.kubernetes.io/instance: aws-node-termination-handler
app.kubernetes.io/name: aws-node-termination-handler
kops.k8s.io/nth-mode: sqs

View File

@ -1,118 +0,0 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-csi-1-21
parameters:
encrypted: "true"
type: gp3
provisioner: ebs.csi.aws.com
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@ -1,332 +0,0 @@
APIServerConfig:
API:
dns: {}
publicName: api.minimal.example.com
ClusterDNSDomain: cluster.local
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: external
enableAdmissionPlugins:
- DefaultStorageClass
- DefaultTolerationSeconds
- LimitRanger
- MutatingAdmissionWebhook
- NamespaceLifecycle
- NodeRestriction
- ResourceQuota
- RuntimeClass
- ServiceAccount
- ValidatingAdmissionPolicy
- ValidatingAdmissionWebhook
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-apiserver:v1.28.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm
XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ==
-----END RSA PUBLIC KEY-----
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF
Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ==
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- bfb6b977100963f2879a33e5fbaa59a5276ba829a957a6819c936e9c1465f981@https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubelet,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubelet
- 4717660fd1466ec72d59000bb1d9f5cdc91fac31d491043ca62b34398e0799ce@https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl
- 7644623e4ec9ad443ab352a8a5800a5180ee28741288be805286ba72bb8e7164@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/amd64/ecr-credential-provider-linux-amd64
- f3a841324845ca6bf0d4091b4fc7f97e18a623172158b72fc3fdcdb9d42d2d37@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz
- 7a8c262deb63becc877e82d23749e4f99f4a17e8e660f9b8c257ca87a5c056b6@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-amd64.tar.gz
- 028986516ab5646370edce981df2d8e8a8d12188deaf837142a02097000ae2f2@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.amd64
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- 05dd12e35783cab4960e885ec0e7d0e461989b94297e7bea9018ccbd15c4dce9@https://dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubelet,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubelet
- f5484bd9cac66b183c653abed30226b561f537d15346c605cc81d98095f1717c@https://dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubectl,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubectl
- 1980e3a038cb16da48a137743b31fb81de6c0b59fa06c206c2bc20ce0a52f849@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/arm64/ecr-credential-provider-linux-arm64
- 525e2b62ba92a1b6f3dc9612449a84aa61652e680f7ebf4eff579795fe464b57@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz
- 97457594ff8549cb82d664306593cafd3d2c781c706f9fffed885a46d8919bec@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-arm64.tar.gz
- 85c5e4e4f72e442c8c17bac07527cd4f961ee48e4f2b71797f7533c94f4a52b9@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.arm64
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX
DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX
WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk
CzMeMdr4
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX
DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN
QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW
HLtkTXH8
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx
NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY
qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx
NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E
YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co=
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN
MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H
g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6
CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O
sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs
GS/VUw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN
MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL
DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW
LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE
hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV
cPfVNg==
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm
ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx
GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu
Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP
vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP
DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9
t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY
xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O
Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB
DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW
03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh
cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI
J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3
MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA
aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf
OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt
uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3
MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt
naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC
qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K
G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo=
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
ControlPlaneConfig:
KubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: external
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-controller-manager:v1.28.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
KubeScheduler:
featureGates:
InTreePluginAWSUnregister: "true"
image: registry.k8s.io/kube-scheduler:v1.28.0
leaderElection:
leaderElect: true
logLevel: 2
DNSZone: Z1AFAKE1ZON3YO
EtcdClusterNames:
- main
- events
FileAssets:
- content: |
apiVersion: kubescheduler.config.k8s.io/v1
clientConnection:
kubeconfig: /var/lib/kube-scheduler/kubeconfig
kind: KubeSchedulerConfiguration
path: /var/lib/kube-scheduler/config.yaml
Hooks:
- null
- null
InstallCNIAssets: true
KeypairIDs:
apiserver-aggregator-ca: "6980187172486667078076483355"
etcd-clients-ca: "6979622252718071085282986282"
etcd-manager-ca-events: "6982279354000777253151890266"
etcd-manager-ca-main: "6982279354000936168671127624"
etcd-peers-ca-events: "6982279353999767935825892873"
etcd-peers-ca-main: "6982279353998887468930183660"
kubernetes-ca: "6982820025135291416230495506"
service-account: "2"
KubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.28.0
logLevel: 2
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
nodeLabels:
kops.k8s.io/instancegroup: master-us-test-1a
kops.k8s.io/kops-controller-pki: ""
node-role.kubernetes.io/control-plane: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
taints:
- node-role.kubernetes.io/control-plane=:NoSchedule
KubernetesVersion: 1.28.0
Networking:
nonMasqueradeCIDR: 100.64.0.0/10
serviceClusterIPRange: 100.64.0.0/13
UpdatePolicy: automatic
channels:
- memfs://tests/minimal.example.com/addons/bootstrap-channel.yaml
configStore:
keypairs: memfs://tests/minimal.example.com/pki
secrets: memfs://tests/minimal.example.com/secrets
containerdConfig:
logLevel: info
runc:
version: 1.3.0
version: 1.7.28
etcdManifests:
- memfs://tests/minimal.example.com/manifests/etcd/main-master-us-test-1a.yaml
- memfs://tests/minimal.example.com/manifests/etcd/events-master-us-test-1a.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml
usesLegacyGossip: false
usesNoneDNS: false

View File

@ -1,62 +0,0 @@
Assets:
amd64:
- bfb6b977100963f2879a33e5fbaa59a5276ba829a957a6819c936e9c1465f981@https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubelet,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubelet
- 4717660fd1466ec72d59000bb1d9f5cdc91fac31d491043ca62b34398e0799ce@https://dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/amd64/kubectl
- 7644623e4ec9ad443ab352a8a5800a5180ee28741288be805286ba72bb8e7164@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/amd64/ecr-credential-provider-linux-amd64
- f3a841324845ca6bf0d4091b4fc7f97e18a623172158b72fc3fdcdb9d42d2d37@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-amd64-v1.2.0.tgz
- 7a8c262deb63becc877e82d23749e4f99f4a17e8e660f9b8c257ca87a5c056b6@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-amd64.tar.gz
- 028986516ab5646370edce981df2d8e8a8d12188deaf837142a02097000ae2f2@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.amd64
arm64:
- 05dd12e35783cab4960e885ec0e7d0e461989b94297e7bea9018ccbd15c4dce9@https://dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubelet,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubelet
- f5484bd9cac66b183c653abed30226b561f537d15346c605cc81d98095f1717c@https://dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubectl,https://cdn.dl.k8s.io/release/v1.28.0/bin/linux/arm64/kubectl
- 1980e3a038cb16da48a137743b31fb81de6c0b59fa06c206c2bc20ce0a52f849@https://artifacts.k8s.io/binaries/cloud-provider-aws/v1.31.7/linux/arm64/ecr-credential-provider-linux-arm64
- 525e2b62ba92a1b6f3dc9612449a84aa61652e680f7ebf4eff579795fe464b57@https://storage.googleapis.com/k8s-artifacts-cni/release/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz,https://github.com/containernetworking/plugins/releases/download/v1.2.0/cni-plugins-linux-arm64-v1.2.0.tgz
- 97457594ff8549cb82d664306593cafd3d2c781c706f9fffed885a46d8919bec@https://github.com/containerd/containerd/releases/download/v1.7.28/containerd-1.7.28-linux-arm64.tar.gz
- 85c5e4e4f72e442c8c17bac07527cd4f961ee48e4f2b71797f7533c94f4a52b9@https://github.com/opencontainers/runc/releases/download/v1.3.0/runc.arm64
CAs: {}
ClusterName: minimal.example.com
Hooks:
- null
- null
InstallCNIAssets: true
KeypairIDs:
kubernetes-ca: "6982820025135291416230495506"
KubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.28.0
logLevel: 2
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
featureGates:
InTreePluginAWSUnregister: "true"
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
nodeLabels:
kops.k8s.io/instancegroup: nodes-us-test-1a
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.9
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
KubernetesVersion: 1.28.0
Networking:
nonMasqueradeCIDR: 100.64.0.0/10
serviceClusterIPRange: 100.64.0.0/13
UpdatePolicy: automatic
containerdConfig:
logLevel: info
runc:
version: 1.3.0
version: 1.7.28
usesLegacyGossip: false
usesNoneDNS: false

View File

@ -1,16 +0,0 @@
{
"Statement": [
{
"Action": "sqs:SendMessage",
"Effect": "Allow",
"Principal": {
"Service": [
"events.amazonaws.com",
"sqs.amazonaws.com"
]
},
"Resource": "arn:aws-test:sqs:us-test-1:123456789012:minimal-example-com-nth"
}
],
"Version": "2012-10-17"
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,99 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/minimal.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: master-us-test-1a
name: a
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.28.0
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: master-us-test-1a
spec:
image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404
instanceMetadata:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: m3.medium
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: master-us-test-1a
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: nodes
spec:
image: ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20220404
instanceMetadata:
httpPutResponseHopLimit: 1
httpTokens: required
machineType: t2.medium
maxSize: 1
minSize: 1
nodeLabels:
kops.k8s.io/instancegroup: nodes-us-test-1a
role: Node
subnets:
- us-test-1a

View File

@ -1,986 +0,0 @@
locals {
cluster_name = "minimal.example.com"
master_autoscaling_group_ids = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
master_security_group_ids = [aws_security_group.masters-minimal-example-com.id]
masters_role_arn = aws_iam_role.masters-minimal-example-com.arn
masters_role_name = aws_iam_role.masters-minimal-example-com.name
node_autoscaling_group_ids = [aws_autoscaling_group.nodes-minimal-example-com.id]
node_security_group_ids = [aws_security_group.nodes-minimal-example-com.id]
node_subnet_ids = [aws_subnet.us-test-1a-minimal-example-com.id]
nodes_role_arn = aws_iam_role.nodes-minimal-example-com.arn
nodes_role_name = aws_iam_role.nodes-minimal-example-com.name
region = "us-test-1"
route_table_public_id = aws_route_table.minimal-example-com.id
subnet_us-test-1a_id = aws_subnet.us-test-1a-minimal-example-com.id
vpc_cidr_block = aws_vpc.minimal-example-com.cidr_block
vpc_id = aws_vpc.minimal-example-com.id
vpc_ipv6_cidr_block = aws_vpc.minimal-example-com.ipv6_cidr_block
vpc_ipv6_cidr_length = local.vpc_ipv6_cidr_block == "" ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0])
}
output "cluster_name" {
value = "minimal.example.com"
}
output "master_autoscaling_group_ids" {
value = [aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id]
}
output "master_security_group_ids" {
value = [aws_security_group.masters-minimal-example-com.id]
}
output "masters_role_arn" {
value = aws_iam_role.masters-minimal-example-com.arn
}
output "masters_role_name" {
value = aws_iam_role.masters-minimal-example-com.name
}
output "node_autoscaling_group_ids" {
value = [aws_autoscaling_group.nodes-minimal-example-com.id]
}
output "node_security_group_ids" {
value = [aws_security_group.nodes-minimal-example-com.id]
}
output "node_subnet_ids" {
value = [aws_subnet.us-test-1a-minimal-example-com.id]
}
output "nodes_role_arn" {
value = aws_iam_role.nodes-minimal-example-com.arn
}
output "nodes_role_name" {
value = aws_iam_role.nodes-minimal-example-com.name
}
output "region" {
value = "us-test-1"
}
output "route_table_public_id" {
value = aws_route_table.minimal-example-com.id
}
output "subnet_us-test-1a_id" {
value = aws_subnet.us-test-1a-minimal-example-com.id
}
output "vpc_cidr_block" {
value = aws_vpc.minimal-example-com.cidr_block
}
output "vpc_id" {
value = aws_vpc.minimal-example-com.id
}
output "vpc_ipv6_cidr_block" {
value = aws_vpc.minimal-example-com.ipv6_cidr_block
}
output "vpc_ipv6_cidr_length" {
value = local.vpc_ipv6_cidr_block == "" ? null : tonumber(regex(".*/(\\d+)", local.vpc_ipv6_cidr_block)[0])
}
provider "aws" {
region = "us-test-1"
}
provider "aws" {
alias = "files"
region = "us-test-1"
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.master-us-test-1a-masters-minimal-example-com.id
version = aws_launch_template.master-us-test-1a-masters-minimal-example-com.latest_version
}
max_instance_lifetime = 0
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "master-us-test-1a.masters.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "master-us-test-1a.masters.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/control-plane"
propagate_at_launch = true
value = "1"
}
tag {
key = "k8s.io/role/master"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "master-us-test-1a"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_group" "nodes-minimal-example-com" {
enabled_metrics = ["GroupDesiredCapacity", "GroupInServiceInstances", "GroupMaxSize", "GroupMinSize", "GroupPendingInstances", "GroupStandbyInstances", "GroupTerminatingInstances", "GroupTotalInstances"]
launch_template {
id = aws_launch_template.nodes-minimal-example-com.id
version = aws_launch_template.nodes-minimal-example-com.latest_version
}
max_instance_lifetime = 0
max_size = 1
metrics_granularity = "1Minute"
min_size = 1
name = "nodes.minimal.example.com"
protect_from_scale_in = false
tag {
key = "KubernetesCluster"
propagate_at_launch = true
value = "minimal.example.com"
}
tag {
key = "Name"
propagate_at_launch = true
value = "nodes.minimal.example.com"
}
tag {
key = "aws-node-termination-handler/managed"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes-us-test-1a"
}
tag {
key = "k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node"
propagate_at_launch = true
value = ""
}
tag {
key = "k8s.io/role/node"
propagate_at_launch = true
value = "1"
}
tag {
key = "kops.k8s.io/instancegroup"
propagate_at_launch = true
value = "nodes"
}
tag {
key = "kubernetes.io/cluster/minimal.example.com"
propagate_at_launch = true
value = "owned"
}
vpc_zone_identifier = [aws_subnet.us-test-1a-minimal-example-com.id]
}
resource "aws_autoscaling_lifecycle_hook" "master-us-test-1a-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.master-us-test-1a-masters-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "master-us-test-1a-NTHLifecycleHook"
}
resource "aws_autoscaling_lifecycle_hook" "nodes-NTHLifecycleHook" {
autoscaling_group_name = aws_autoscaling_group.nodes-minimal-example-com.id
default_result = "CONTINUE"
heartbeat_timeout = 300
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
name = "nodes-NTHLifecycleHook"
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-ASGLifecycle" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-ASGLifecycle_event_pattern")
name = "minimal.example.com-ASGLifecycle"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-ASGLifecycle"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceScheduledChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceScheduledChange_event_pattern")
name = "minimal.example.com-InstanceScheduledChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceScheduledChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-InstanceStateChange" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-InstanceStateChange_event_pattern")
name = "minimal.example.com-InstanceStateChange"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-InstanceStateChange"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_rule" "minimal-example-com-SpotInterruption" {
event_pattern = file("${path.module}/data/aws_cloudwatch_event_rule_minimal.example.com-SpotInterruption_event_pattern")
name = "minimal.example.com-SpotInterruption"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com-SpotInterruption"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_cloudwatch_event_target" "minimal-example-com-ASGLifecycle-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-ASGLifecycle.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceScheduledChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceScheduledChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-InstanceStateChange-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-InstanceStateChange.id
}
resource "aws_cloudwatch_event_target" "minimal-example-com-SpotInterruption-Target" {
arn = aws_sqs_queue.minimal-example-com-nth.arn
rule = aws_cloudwatch_event_rule.minimal-example-com-SpotInterruption.id
}
resource "aws_ebs_volume" "a-etcd-events-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = true
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "a.etcd-events.minimal.example.com"
"k8s.io/etcd/events" = "a/a"
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_ebs_volume" "a-etcd-main-minimal-example-com" {
availability_zone = "us-test-1a"
encrypted = true
iops = 3000
size = 20
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "a.etcd-main.minimal.example.com"
"k8s.io/etcd/main" = "a/a"
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
throughput = 125
type = "gp3"
}
resource "aws_iam_instance_profile" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
role = aws_iam_role.masters-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_instance_profile" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
role = aws_iam_role.nodes-minimal-example-com.name
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "masters-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_masters.minimal.example.com_policy")
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role" "nodes-minimal-example-com" {
assume_role_policy = file("${path.module}/data/aws_iam_role_nodes.minimal.example.com_policy")
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_iam_role_policy" "masters-minimal-example-com" {
name = "masters.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_masters.minimal.example.com_policy")
role = aws_iam_role.masters-minimal-example-com.name
}
resource "aws_iam_role_policy" "nodes-minimal-example-com" {
name = "nodes.minimal.example.com"
policy = file("${path.module}/data/aws_iam_role_policy_nodes.minimal.example.com_policy")
role = aws_iam_role.nodes-minimal-example-com.name
}
resource "aws_internet_gateway" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_key_pair" "kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.minimal.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = file("${path.module}/data/aws_key_pair_kubernetes.minimal.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_launch_template" "master-us-test-1a-masters-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 64
volume_type = "gp3"
}
}
block_device_mappings {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
iam_instance_profile {
name = aws_iam_instance_profile.masters-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_protocol_ipv6 = "disabled"
http_put_response_hop_limit = 3
http_tokens = "required"
}
monitoring {
enabled = false
}
name = "master-us-test-1a.masters.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.masters-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "master-us-test-1a.masters.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "master-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/kops-controller-pki" = ""
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/control-plane" = ""
"k8s.io/cluster-autoscaler/node-template/label/node.kubernetes.io/exclude-from-external-load-balancers" = ""
"k8s.io/role/control-plane" = "1"
"k8s.io/role/master" = "1"
"kops.k8s.io/instancegroup" = "master-us-test-1a"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_master-us-test-1a.masters.minimal.example.com_user_data")
}
resource "aws_launch_template" "nodes-minimal-example-com" {
block_device_mappings {
device_name = "/dev/xvda"
ebs {
delete_on_termination = true
encrypted = true
iops = 3000
throughput = 125
volume_size = 128
volume_type = "gp3"
}
}
iam_instance_profile {
name = aws_iam_instance_profile.nodes-minimal-example-com.id
}
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = aws_key_pair.kubernetes-minimal-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id
lifecycle {
create_before_destroy = true
}
metadata_options {
http_endpoint = "enabled"
http_protocol_ipv6 = "disabled"
http_put_response_hop_limit = 1
http_tokens = "required"
}
monitoring {
enabled = false
}
name = "nodes.minimal.example.com"
network_interfaces {
associate_public_ip_address = true
delete_on_termination = true
ipv6_address_count = 0
security_groups = [aws_security_group.nodes-minimal-example-com.id]
}
tag_specifications {
resource_type = "instance"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "nodes-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tag_specifications {
resource_type = "volume"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "nodes-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"aws-node-termination-handler/managed" = ""
"k8s.io/cluster-autoscaler/node-template/label/kops.k8s.io/instancegroup" = "nodes-us-test-1a"
"k8s.io/cluster-autoscaler/node-template/label/node-role.kubernetes.io/node" = ""
"k8s.io/role/node" = "1"
"kops.k8s.io/instancegroup" = "nodes"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
user_data = filebase64("${path.module}/data/aws_launch_template_nodes.minimal.example.com_user_data")
}
resource "aws_route" "route-0-0-0-0--0" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route" "route-__--0" {
destination_ipv6_cidr_block = "::/0"
gateway_id = aws_internet_gateway.minimal-example-com.id
route_table_id = aws_route_table.minimal-example-com.id
}
resource "aws_route_table" "minimal-example-com" {
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/kops/role" = "public"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_route_table_association" "us-test-1a-minimal-example-com" {
route_table_id = aws_route_table.minimal-example-com.id
subnet_id = aws_subnet.us-test-1a-minimal-example-com.id
}
resource "aws_s3_object" "cluster-completed-spec" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content")
key = "tests/minimal.example.com/cluster-completed.spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "etcd-cluster-spec-events" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content")
key = "tests/minimal.example.com/backups/etcd/events/control/etcd-cluster-spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "etcd-cluster-spec-main" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content")
key = "tests/minimal.example.com/backups/etcd/main/control/etcd-cluster-spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "kops-version-txt" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_kops-version.txt_content")
key = "tests/minimal.example.com/kops-version.txt"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test-1a_content")
key = "tests/minimal.example.com/manifests/etcd/events-master-us-test-1a.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test-1a_content")
key = "tests/minimal.example.com/manifests/etcd/main-master-us-test-1a.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content")
key = "tests/minimal.example.com/manifests/static/kube-apiserver-healthcheck.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-aws-cloud-controller-addons-k8s-io-k8s-1-18" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-aws-cloud-controller.addons.k8s.io-k8s-1.18_content")
key = "tests/minimal.example.com/addons/aws-cloud-controller.addons.k8s.io/k8s-1.18.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-aws-ebs-csi-driver-addons-k8s-io-k8s-1-17" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-aws-ebs-csi-driver.addons.k8s.io-k8s-1.17_content")
key = "tests/minimal.example.com/addons/aws-ebs-csi-driver.addons.k8s.io/k8s-1.17.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-bootstrap" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-bootstrap_content")
key = "tests/minimal.example.com/addons/bootstrap-channel.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-coredns-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content")
key = "tests/minimal.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content")
key = "tests/minimal.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content")
key = "tests/minimal.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content")
key = "tests/minimal.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-limit-range.addons.k8s.io_content")
key = "tests/minimal.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-node-termination-handler-aws-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-node-termination-handler.aws-k8s-1.11_content")
key = "tests/minimal.example.com/addons/node-termination-handler.aws/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-storage-aws-addons-k8s-io-v1-15-0" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-storage-aws.addons.k8s.io-v1.15.0_content")
key = "tests/minimal.example.com/addons/storage-aws.addons.k8s.io/v1.15.0.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "nodeupconfig-master-us-test-1a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test-1a_content")
key = "tests/minimal.example.com/igconfig/control-plane/master-us-test-1a/nodeupconfig.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "nodeupconfig-nodes" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content")
key = "tests/minimal.example.com/igconfig/node/nodes/nodeupconfig.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_security_group" "masters-minimal-example-com" {
description = "Security group for masters"
name = "masters.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "masters.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group" "nodes-minimal-example-com" {
description = "Security group for nodes"
name = "nodes.minimal.example.com"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "nodes.minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-22to22-nodes-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 22
protocol = "tcp"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-0-0-0-0--0-ingress-tcp-443to443-masters-minimal-example-com" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 443
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-masters-minimal-example-com" {
from_port = 22
ipv6_cidr_blocks = ["::/0"]
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-__--0-ingress-tcp-22to22-nodes-minimal-example-com" {
from_port = 22
ipv6_cidr_blocks = ["::/0"]
protocol = "tcp"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 22
type = "ingress"
}
resource "aws_security_group_rule" "from-__--0-ingress-tcp-443to443-masters-minimal-example-com" {
from_port = 443
ipv6_cidr_blocks = ["::/0"]
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-masters-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-masters-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.masters-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-0-0-0-0--0" {
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-egress-all-0to0-__--0" {
from_port = 0
ipv6_cidr_blocks = ["::/0"]
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-all-0to0-nodes-minimal-example-com" {
from_port = 0
protocol = "-1"
security_group_id = aws_security_group.nodes-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 0
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-1to2379-masters-minimal-example-com" {
from_port = 1
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 2379
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-2382to4000-masters-minimal-example-com" {
from_port = 2382
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 4000
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-tcp-4003to65535-masters-minimal-example-com" {
from_port = 4003
protocol = "tcp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "from-nodes-minimal-example-com-ingress-udp-1to65535-masters-minimal-example-com" {
from_port = 1
protocol = "udp"
security_group_id = aws_security_group.masters-minimal-example-com.id
source_security_group_id = aws_security_group.nodes-minimal-example-com.id
to_port = 65535
type = "ingress"
}
resource "aws_sqs_queue" "minimal-example-com-nth" {
message_retention_seconds = 300
name = "minimal-example-com-nth"
policy = file("${path.module}/data/aws_sqs_queue_minimal-example-com-nth_policy")
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal-example-com-nth"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_subnet" "us-test-1a-minimal-example-com" {
availability_zone = "us-test-1a"
cidr_block = "172.20.32.0/19"
enable_resource_name_dns_a_record_on_launch = true
private_dns_hostname_type_on_launch = "resource-name"
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "us-test-1a.minimal.example.com"
"SubnetType" = "Public"
"kubernetes.io/cluster/minimal.example.com" = "owned"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/internal-elb" = "1"
}
vpc_id = aws_vpc.minimal-example-com.id
}
resource "aws_vpc" "minimal-example-com" {
assign_generated_ipv6_cidr_block = true
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options" "minimal-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
"KubernetesCluster" = "minimal.example.com"
"Name" = "minimal.example.com"
"kubernetes.io/cluster/minimal.example.com" = "owned"
}
}
resource "aws_vpc_dhcp_options_association" "minimal-example-com" {
dhcp_options_id = aws_vpc_dhcp_options.minimal-example-com.id
vpc_id = aws_vpc.minimal-example-com.id
}
terraform {
required_version = ">= 0.15.0"
required_providers {
aws = {
"configuration_aliases" = [aws.files]
"source" = "hashicorp/aws"
"version" = ">= 5.0.0"
}
}
}

View File

@ -1 +0,0 @@
{"source":["aws.autoscaling"],"detail-type":["EC2 Instance-terminate Lifecycle Action"]}

View File

@ -1 +0,0 @@
{"source": ["aws.health"],"detail-type": ["AWS Health Event"],"detail": {"service": ["EC2"],"eventTypeCategory": ["scheduledChange"]}}

View File

@ -1 +0,0 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Instance State-change Notification"]}

View File

@ -1 +0,0 @@
{"source": ["aws.ec2"],"detail-type": ["EC2 Spot Instance Interruption Warning"]}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -1,10 +0,0 @@
{
"Statement": [
{
"Action": "ec2:DescribeRegions",
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -1,278 +0,0 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "privatecanal.example.com",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/privatecanal.example.com/*"
},
{
"Action": [
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecanal.example.com/backups/etcd/main/*"
},
{
"Action": [
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:GetObject",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/privatecanal.example.com/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-write-bucket"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:GetHostedZone",
"route53:ListResourceRecordSets"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "privatecanal.example.com",
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:snapshot/*",
"arn:aws-test:ec2:*:*:volume/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "privatecanal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:snapshot/*",
"arn:aws-test:ec2:*:*:volume/*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "privatecanal.example.com",
"ec2:CreateAction": [
"CreateSecurityGroup"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"ec2:CreateTags",
"ec2:DeleteTags"
],
"Condition": {
"Null": {
"aws:RequestTag/KubernetesCluster": "true"
},
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "privatecanal.example.com"
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:security-group/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeScalingActivities",
"autoscaling:DescribeTags",
"ec2:DescribeAccountAttributes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeImages",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeLaunchTemplateVersions",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeTags",
"ec2:DescribeVolumes",
"ec2:DescribeVolumesModifications",
"ec2:DescribeVpcs",
"ec2:GetInstanceTypesFromInstanceRequirements",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroupAttributes",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"iam:CreateServiceLinkedRole",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:CreateGrant",
"kms:Decrypt",
"kms:DescribeKey",
"kms:Encrypt",
"kms:GenerateDataKey*",
"kms:GenerateRandom",
"kms:ReEncrypt*",
"sqs:DeleteMessage",
"sqs:ReceiveMessage"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:ModifyTargetGroupAttributes",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "privatecanal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateSnapshot",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "privatecanal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": "ec2:CreateSecurityGroup",
"Effect": "Allow",
"Resource": "arn:aws-test:ec2:*:*:vpc/*"
}
],
"Version": "2012-10-17"
}

View File

@ -1,30 +0,0 @@
{
"Statement": [
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -1 +0,0 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -1,134 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
echo "== Downloading $1 with hash $2 from $3 =="
local -r file="$1"
local -r hash="$2"
local -a urls
IFS=, read -r -a urls <<< "$3"
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O ${file} --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "== Downloading ${url} using ${cmd} =="
if ! (${cmd} "${url}"); then
echo "== Failed to download ${url} using ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Failed to validate hash for ${url} =="
rm -f "${file}"
else
echo "== Downloaded ${url} with hash ${hash} =="
return 0
fi
done
done
echo "== All downloads failed; sleeping before retrying =="
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum "${file}" | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "== Running nodeup =="
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured =="
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ClusterName: privatecanal.example.com
ConfigBase: memfs://clusters.example.com/privatecanal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: KaerzmSXpT2iOE0EbQmw1+1nNAbgbo372K9d0m6j4tY=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -1,157 +0,0 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
echo "== Downloading $1 with hash $2 from $3 =="
local -r file="$1"
local -r hash="$2"
local -a urls
IFS=, read -r -a urls <<< "$3"
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O ${file} --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo ${file} --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O ${file} --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "== Downloading ${url} using ${cmd} =="
if ! (${cmd} "${url}"); then
echo "== Failed to download ${url} using ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Failed to validate hash for ${url} =="
rm -f "${file}"
else
echo "== Downloaded ${url} with hash ${hash} =="
return 0
fi
done
done
echo "== All downloads failed; sleeping before retrying =="
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum "${file}" | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== File ${file} is corrupted; hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "== Running nodeup =="
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "== Failed to initialize the machine ID; ensure machine-id configured =="
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ClusterName: privatecanal.example.com
ConfigServer:
CACertificates: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
servers:
- https://kops-controller.internal.privatecanal.example.com:3988/
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: utvltpPR5u6Y3FqNvbkihYM6yb3YEI4AgT263bwK7Xk=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

Some files were not shown because too many files have changed in this diff Show More