Merge pull request #14712 from hakman/gce_integration_tests

gce: Add integration tests for clusters with many addons
This commit is contained in:
Kubernetes Prow Robot 2022-12-03 12:09:54 -08:00 committed by GitHub
commit f1c1a50391
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
80 changed files with 16771 additions and 14 deletions

View File

@ -95,6 +95,11 @@ func TestCreateClusterHA(t *testing.T) {
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ha_encrypt", "v1alpha2")
}
// TestCreateClusterMinimalGCE runs kops create cluster minimal.example.com --cloud gce --zones us-test1-a
func TestCreateClusterMinimalGCE(t *testing.T) {
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.26-gce", "v1alpha2")
}
// TestCreateClusterHAGCE runs kops create cluster ha-gce.example.com --cloud gce --zones us-test1-a,us-test1-b,us-test1-c --master-zones us-test1-a,us-test1-b,us-test1-c
func TestCreateClusterHAGCE(t *testing.T) {
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ha_gce", "v1alpha2")

View File

@ -198,13 +198,20 @@ const (
awsAuthenticatorAddon = "authentication.aws-k8s-1.12"
awsCCMAddon = "aws-cloud-controller.addons.k8s.io-k8s-1.18"
awsEBSCSIAddon = "aws-ebs-csi-driver.addons.k8s.io-k8s-1.17"
calicoAddon = "networking.projectcalico.org-k8s-1.25"
canalAddon = "networking.projectcalico.org.canal-k8s-1.25"
certManagerAddon = "certmanager.io-k8s-1.16"
ciliumAddon = "networking.cilium.io-k8s-1.16"
dnsControllerAddon = "dns-controller.addons.k8s.io-k8s-1.12"
flannelAddon = "networking.flannel-k8s-1.25"
leaderElectionAddon = "leader-migration.rbac.addons.k8s.io-k8s-1.23"
gcpCCMAddon = "gcp-cloud-controller.addons.k8s.io-k8s-1.23"
gcpPDCSIAddon = "gcp-pd-csi-driver.addons.k8s.io-k8s-1.23"
calicoAddon = "networking.projectcalico.org-k8s-1.25"
canalAddon = "networking.projectcalico.org.canal-k8s-1.25"
ciliumAddon = "networking.cilium.io-k8s-1.16"
flannelAddon = "networking.flannel-k8s-1.25"
certManagerAddon = "certmanager.io-k8s-1.16"
clusterAutoscalerAddon = "cluster-autoscaler.addons.k8s.io-k8s-1.15"
dnsControllerAddon = "dns-controller.addons.k8s.io-k8s-1.12"
leaderElectionAddon = "leader-migration.rbac.addons.k8s.io-k8s-1.23"
metricsServerAddon = "metrics-server.addons.k8s.io-k8s-1.11"
)
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
@ -620,6 +627,7 @@ func TestManyAddons(t *testing.T) {
"networking.amazon-vpc-routed-eni-k8s-1.16",
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
metricsServerAddon,
dnsControllerAddon).
runTestTerraformAWS(t)
}
@ -642,6 +650,7 @@ func TestManyAddonsCCMIRSA(t *testing.T) {
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
metricsServerAddon,
dnsControllerAddon,
).
runTestTerraformAWS(t)
@ -666,6 +675,7 @@ func TestManyAddonsCCMIRSA23(t *testing.T) {
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
leaderElectionAddon,
metricsServerAddon,
dnsControllerAddon,
).
runTestTerraformAWS(t)
@ -690,6 +700,7 @@ func TestManyAddonsCCMIRSA24(t *testing.T) {
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
leaderElectionAddon,
metricsServerAddon,
dnsControllerAddon,
).
runTestTerraformAWS(t)
@ -714,6 +725,7 @@ func TestManyAddonsCCMIRSA25(t *testing.T) {
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
leaderElectionAddon,
metricsServerAddon,
dnsControllerAddon,
).
runTestTerraformAWS(t)
@ -738,11 +750,26 @@ func TestManyAddonsCCMIRSA26(t *testing.T) {
"node-termination-handler.aws-k8s-1.11",
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
metricsServerAddon,
dnsControllerAddon,
).
runTestTerraformAWS(t)
}
func TestManyAddonsGCE(t *testing.T) {
t.Setenv("KOPS_RUN_TOO_NEW_VERSION", "1")
newIntegrationTest("minimal.example.com", "many-addons-gce").
withAddons(
certManagerAddon,
clusterAutoscalerAddon,
dnsControllerAddon,
gcpCCMAddon,
gcpPDCSIAddon,
metricsServerAddon,
).
runTestTerraformGCE(t)
}
func TestCCM(t *testing.T) {
newIntegrationTest("minimal.example.com", "many-addons-ccm").
withAddons(
@ -755,6 +782,7 @@ func TestCCM(t *testing.T) {
"snapshot-controller.addons.k8s.io-k8s-1.20",
"aws-cloud-controller.addons.k8s.io-k8s-1.18",
dnsControllerAddon,
metricsServerAddon,
).
runTestTerraformAWS(t)
}

View File

@ -0,0 +1,94 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: private.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudConfig: {}
cloudProvider: gce
configBase: memfs://tests/private.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- instanceGroup: control-plane-us-test1-a
name: a
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- instanceGroup: control-plane-us-test1-a
name: a
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.26.0
masterPublicName: api.private.example.com
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
project: testproject
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 10.0.16.0/20
name: us-test1
region: us-test1
type: Public
topology:
dns:
type: Public
masters: public
nodes: public
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: private.example.com
name: control-plane-us-test1-a
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20221018
machineType: e2-medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test1
zones:
- us-test1-a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: private.example.com
name: nodes-us-test1-a
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20221018
machineType: n1-standard-2
maxSize: 1
minSize: 1
role: Node
subnets:
- us-test1
zones:
- us-test1-a

View File

@ -0,0 +1,7 @@
ClusterName: private.example.com
Zones:
- us-test1-a
CloudProvider: gce
Networking: cni
KubernetesVersion: v1.26.0
Project: testproject

View File

@ -159,6 +159,7 @@ kubeAPIServer:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -260,7 +261,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: 3t8PVuvgv3pIe8HSpXwbiGSecwtkmxuCpdcsCv8GcKU=
NodeupConfigHash: HkujD3DrMNe8BVULP3uODaLAVwjqbwq26texrG5QzP0=
__EOF_KUBE_ENV

View File

@ -92,6 +92,7 @@ spec:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -203,6 +204,8 @@ spec:
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networkCIDR: 172.20.0.0/16
networking:
amazonvpc: {}

View File

@ -46,6 +46,14 @@ spec:
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: dcc45685fd1de2514d806f6e96f36bfc6fb18af68a8de6a9e5def5af833b1f43
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -18,6 +18,7 @@ APIServerConfig:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:

View File

@ -10,6 +10,8 @@ spec:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable

View File

@ -860,6 +860,14 @@ resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io"
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "clusters.example.com/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-networking-amazon-vpc-routed-eni-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-networking.amazon-vpc-routed-eni-k8s-1.16_content")

View File

@ -161,6 +161,7 @@ kubeAPIServer:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -264,7 +265,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: 3sXGYG1eUIzbjDWp82UlwEwkuJOHNsbc3dfXYgve/oc=
NodeupConfigHash: vebRlz4lo6HLjGk/9j8DQH7dQdUlNauVT0P5Gj3pyV4=
__EOF_KUBE_ENV

View File

@ -94,6 +94,7 @@ spec:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -207,6 +208,8 @@ spec:
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networkCIDR: 172.20.0.0/16
networking:
amazonvpc: {}

View File

@ -53,6 +53,14 @@ spec:
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: 2c50298a62f772220623daac250303f2757a9393c63a441acd89f077f1b4c2f7
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -18,6 +18,7 @@ APIServerConfig:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:

View File

@ -10,6 +10,8 @@ spec:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable

View File

@ -868,6 +868,14 @@ resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io"
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "clusters.example.com/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-networking-amazon-vpc-routed-eni-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-networking.amazon-vpc-routed-eni-k8s-1.16_content")

View File

@ -161,6 +161,7 @@ kubeAPIServer:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -262,7 +263,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: mQfIup7T1f/jWcTUQ/LDQg1A7VIF6WYBi/x7Pmdn6GI=
NodeupConfigHash: vDmpdOVGU9ZTL7ZIoCoTpQBmyV+/IjIQqfnfEKbBBdY=
__EOF_KUBE_ENV

View File

@ -95,6 +95,7 @@ spec:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -206,6 +207,8 @@ spec:
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networkCIDR: 172.20.0.0/16
networking:
amazonvpc: {}

View File

@ -53,6 +53,14 @@ spec:
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: 2c50298a62f772220623daac250303f2757a9393c63a441acd89f077f1b4c2f7
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -18,6 +18,7 @@ APIServerConfig:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:

View File

@ -10,6 +10,8 @@ spec:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable

View File

@ -844,6 +844,14 @@ resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io"
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "clusters.example.com/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-networking-amazon-vpc-routed-eni-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-networking.amazon-vpc-routed-eni-k8s-1.16_content")

View File

@ -161,6 +161,7 @@ kubeAPIServer:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -262,7 +263,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: zyPNtSqKD3+5G1/pihAX8z7SE9tkPpbVbQ3A5AW6mYk=
NodeupConfigHash: 7cU4BxPnAf56/RrB7ld88nolR3tfWzQHFbCjQVMJq8U=
__EOF_KUBE_ENV

View File

@ -94,6 +94,7 @@ spec:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -205,6 +206,8 @@ spec:
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networkCIDR: 172.20.0.0/16
networking:
amazonvpc: {}

View File

@ -53,6 +53,14 @@ spec:
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: 2c50298a62f772220623daac250303f2757a9393c63a441acd89f077f1b4c2f7
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -18,6 +18,7 @@ APIServerConfig:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:

View File

@ -10,6 +10,8 @@ spec:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable

View File

@ -844,6 +844,14 @@ resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io"
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "clusters.example.com/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-networking-amazon-vpc-routed-eni-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-networking.amazon-vpc-routed-eni-k8s-1.16_content")

View File

@ -161,6 +161,7 @@ kubeAPIServer:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -262,7 +263,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: bTpwLpw5JvocD5V7OCaU6Za1mD6J1dUs/6y7ZOdIKNA=
NodeupConfigHash: B6r4yQceRNy3YAkeqah+zBOUHbHQu6+4r8Rortvh+YE=
__EOF_KUBE_ENV

View File

@ -94,6 +94,7 @@ spec:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -205,6 +206,8 @@ spec:
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networkCIDR: 172.20.0.0/16
networking:
amazonvpc: {}

View File

@ -46,6 +46,14 @@ spec:
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: 2c50298a62f772220623daac250303f2757a9393c63a441acd89f077f1b4c2f7
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -18,6 +18,7 @@ APIServerConfig:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:

View File

@ -10,6 +10,8 @@ spec:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable

View File

@ -836,6 +836,14 @@ resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io"
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "clusters.example.com/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-networking-amazon-vpc-routed-eni-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-networking.amazon-vpc-routed-eni-k8s-1.16_content")

View File

@ -159,6 +159,7 @@ kubeAPIServer:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -260,7 +261,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: kaA+6dZlwrU4C16vLXMCDClSSZWfRHkvSuxPSk1fNQ0=
NodeupConfigHash: dq6Cd+AjJ6Bsl113E2c/4UirbVV+Aq4cEv5g1UKzQEo=
__EOF_KUBE_ENV

View File

@ -91,6 +91,7 @@ spec:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -202,6 +203,8 @@ spec:
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networkCIDR: 172.20.0.0/16
networking:
amazonvpc: {}

View File

@ -46,6 +46,14 @@ spec:
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: dcc45685fd1de2514d806f6e96f36bfc6fb18af68a8de6a9e5def5af833b1f43
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -18,6 +18,7 @@ APIServerConfig:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:

View File

@ -10,6 +10,8 @@ spec:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable

View File

@ -653,6 +653,14 @@ resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io"
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "clusters.example.com/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-networking-amazon-vpc-routed-eni-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-networking.amazon-vpc-routed-eni-k8s-1.16_content")

View File

@ -0,0 +1,221 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
certManager:
enabled: true
channel: stable
cloudConfig:
gcpPDCSIDriver:
enabled: true
manageStorageClasses: true
multizone: true
nodeTags: minimal-example-com-k8s-io-role-node
cloudControllerManager:
allocateNodeCIDRs: true
cidrAllocatorType: CloudAllocator
clusterCIDR: 100.96.0.0/11
clusterName: minimal-example-com
image: k8scloudprovidergcp/cloud-controller-manager:latest
leaderElection:
leaderElect: true
cloudProvider: gce
clusterAutoscaler:
awsUseStaticInstanceList: false
balanceSimilarNodeGroups: false
enabled: true
expander: random
image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.25.0
maxNodeProvisionTime: 15m0s
newPodScaleUpDelay: 0s
scaleDownDelayAfterAdd: 10m0s
scaleDownUnneededTime: 10m0s
scaleDownUnreadyTime: 20m0s
scaleDownUtilizationThreshold: "0.5"
skipNodesWithLocalStorage: true
skipNodesWithSystemPods: true
clusterDNSDomain: cluster.local
configBase: memfs://tests/minimal.example.com
configStore: memfs://tests/minimal.example.com
containerRuntime: containerd
containerd:
logLevel: info
runc:
version: 1.1.4
version: 1.6.10
dnsZone: "1"
docker:
skipInstall: true
etcdClusters:
- backups:
backupStore: memfs://tests/minimal.example.com/backups/etcd/main
cpuRequest: 200m
etcdMembers:
- instanceGroup: master-us-test1-a
name: a
memoryRequest: 100Mi
name: main
version: 3.5.4
- backups:
backupStore: memfs://tests/minimal.example.com/backups/etcd/events
cpuRequest: 100m
etcdMembers:
- instanceGroup: master-us-test1-a
name: a
memoryRequest: 100Mi
name: events
version: 3.5.4
externalDns:
provider: dns-controller
iam:
allowContainerRegistry: true
legacy: false
keyStore: memfs://tests/minimal.example.com/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: external
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: registry.k8s.io/kube-apiserver:v1.26.0-alpha.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: external
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: registry.k8s.io/kube-controller-manager:v1.26.0-alpha.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
image: registry.k8s.io/dns/k8s-dns-node-cache:1.22.8
memoryRequest: 5Mi
provider: CoreDNS
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.26.0-alpha.0
logLevel: 2
kubeScheduler:
image: registry.k8s.io/kube-scheduler:v1.26.0-alpha.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.6
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: 1.26.0-alpha.0
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.6
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
project: testproject
secretStore: memfs://tests/minimal.example.com/secrets
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 10.0.16.0/20
name: us-test1
region: us-test1
type: Public
topology:
dns:
type: Public
masters: public
nodes: public

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.5.4"
}

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.5.4"
}

View File

@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://tests/minimal.example.com/backups/etcd/events --client-urls=https://__name__:4002
--cluster-name=etcd-events --containerized=true --dns-suffix=.internal.minimal.example.com
--grpc-port=3997 --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=6 --volume-name-tag=k8s-io-etcd-events --volume-provider=gce --volume-tag=k8s-io-cluster-name=minimal-example-com
--volume-tag=k8s-io-etcd-events --volume-tag=k8s-io-role-master=master > /tmp/pipe
2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831
name: etcd-manager
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://tests/minimal.example.com/backups/etcd/main --client-urls=https://__name__:4001
--cluster-name=etcd --containerized=true --dns-suffix=.internal.minimal.example.com
--grpc-port=3996 --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=6 --volume-name-tag=k8s-io-etcd-main --volume-provider=gce --volume-tag=k8s-io-cluster-name=minimal-example-com
--volume-tag=k8s-io-etcd-main --volume-tag=k8s-io-role-master=master > /tmp/pipe
2>&1
image: registry.k8s.io/etcdadm/etcd-manager:v3.0.20220831
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,33 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
image: registry.k8s.io/kops/kube-apiserver-healthcheck:1.26.0-alpha.2
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
securityContext:
runAsNonRoot: true
runAsUser: 10012
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@ -0,0 +1,130 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: 466d7793ef5737f2d4ea995722497ba2593472435466de890097fe7df801eddf
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 6a1db11adb764a3138401cf615c57780df760e7688d4d0d94bd434d6a6b9d370
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
version: 9.99.0
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
version: 9.99.0
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 249c512d18260bfa1c11e3ef2d4067afb9eb41808c12bdee96f4bc157e2b8298
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 9.99.0
- id: k8s-1.15
manifest: cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml
manifestHash: 36b2e5e73a105fa71f8732b7cd0a66f87bc40e8e36e53ae01e1cf5477b2aad4b
name: cluster-autoscaler.addons.k8s.io
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: 2c50298a62f772220623daac250303f2757a9393c63a441acd89f077f1b4c2f7
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1
name: certmanager.io
selector: null
version: 9.99.0
- id: v1.7.0
manifest: storage-gce.addons.k8s.io/v1.7.0.yaml
manifestHash: 6c6d100b10243fc62e0195706aa862b42632faeac05a117d07a263a2c5a8e87c
name: storage-gce.addons.k8s.io
selector:
k8s-addon: storage-gce.addons.k8s.io
version: 9.99.0
- id: k8s-1.23
manifest: gcp-pd-csi-driver.addons.k8s.io/k8s-1.23.yaml
manifestHash: 2f4bcebce4d5105537ed53697d0543a83a66cf2a4828fdc36d62471c67343ca3
name: gcp-pd-csi-driver.addons.k8s.io
selector:
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
version: 9.99.0
- id: v0.1.12
manifest: metadata-proxy.addons.k8s.io/v0.1.12.yaml
manifestHash: 10cdb84ad4055ddadd11022796c90676417e60c1d34f6740d9e275051f2c5e7a
name: metadata-proxy.addons.k8s.io
selector:
k8s-addon: metadata-proxy.addons.k8s.io
version: 9.99.0
- id: k8s-1.23
manifest: gcp-cloud-controller.addons.k8s.io/k8s-1.23.yaml
manifestHash: 3c47fb301c92019c8692b49ccbde4ee79060320907647d8b28a0dfbad07f1f12
name: gcp-cloud-controller.addons.k8s.io
prune:
kinds:
- kind: ConfigMap
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
- kind: Service
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
- kind: ServiceAccount
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: DaemonSet
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: apps
kind: Deployment
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
- group: apps
kind: StatefulSet
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
- group: policy
kind: PodDisruptionBudget
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: ClusterRole
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: ClusterRoleBinding
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
- group: rbac.authorization.k8s.io
kind: Role
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
- group: rbac.authorization.k8s.io
kind: RoleBinding
labelSelector: addon.kops.k8s.io/name=gcp-cloud-controller.addons.k8s.io,app.kubernetes.io/managed-by=kops
namespaces:
- kube-system
selector:
k8s-addon: gcp-cloud-controller.addons.k8s.io
version: 9.99.0

View File

@ -0,0 +1,385 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: cluster-autoscaler
---
apiVersion: v1
automountServiceAccountToken: true
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
rules:
- apiGroups:
- ""
resources:
- events
- endpoints
verbs:
- create
- patch
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- ""
resources:
- pods/status
verbs:
- update
- apiGroups:
- ""
resourceNames:
- cluster-autoscaler
resources:
- endpoints
verbs:
- get
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- watch
- list
- get
- update
- apiGroups:
- ""
resources:
- namespaces
- pods
- services
- replicationcontrollers
- persistentvolumeclaims
- persistentvolumes
verbs:
- watch
- list
- get
- apiGroups:
- batch
resources:
- jobs
- cronjobs
verbs:
- watch
- list
- get
- apiGroups:
- batch
- extensions
resources:
- jobs
verbs:
- get
- list
- patch
- watch
- apiGroups:
- extensions
resources:
- replicasets
- daemonsets
verbs:
- watch
- list
- get
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- watch
- list
- apiGroups:
- apps
resources:
- daemonsets
- replicasets
- statefulsets
verbs:
- watch
- list
- get
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
- csinodes
- csidrivers
- csistoragecapacities
verbs:
- watch
- list
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- apiGroups:
- coordination.k8s.io
resourceNames:
- cluster-autoscaler
resources:
- leases
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resourceNames:
- cluster-autoscaler-status
resources:
- configmaps
verbs:
- delete
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
spec:
ports:
- name: http
port: 8085
protocol: TCP
targetPort: 8085
selector:
app.kubernetes.io/name: cluster-autoscaler
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: cluster-autoscaler.addons.k8s.io
app.kubernetes.io/managed-by: kops
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: cluster-autoscaler
template:
metadata:
annotations:
prometheus.io/port: "8085"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
app: cluster-autoscaler
app.kubernetes.io/name: cluster-autoscaler
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
kops.k8s.io/managed-by: kops
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- command:
- ./cluster-autoscaler
- --balance-similar-node-groups=false
- --cloud-provider=gce
- --expander=random
- --nodes=1:1:nodes
- --scale-down-utilization-threshold=0.5
- --skip-nodes-with-local-storage=true
- --skip-nodes-with-system-pods=true
- --scale-down-delay-after-add=10m0s
- --scale-down-unneeded-time=10m0s
- --scale-down-unready-time=20m0s
- --new-pod-scale-up-delay=0s
- --max-node-provision-time=15m0s
- --cordon-node-before-terminating=true
- --logtostderr=true
- --stderrthreshold=info
- --v=4
env:
- name: AWS_REGION
value: us-test1
image: registry.k8s.io/autoscaling/cluster-autoscaler:v1.25.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /health-check
port: http
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: cluster-autoscaler
ports:
- containerPort: 8085
name: http
protocol: TCP
resources:
requests:
cpu: 100m
memory: 300Mi
dnsPolicy: ClusterFirst
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccountName: cluster-autoscaler
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
app: cluster-autoscaler
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
app: cluster-autoscaler
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule

View File

@ -0,0 +1,383 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: kube-dns
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: registry.k8s.io/coredns/coredns:v1.9.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: kube-dns
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: ScheduleAnyway
volumes:
- configMap:
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
maxUnavailable: 50%
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
creationTimestamp: null
labels:
k8s-app: coredns-autoscaler
kops.k8s.io/managed-by: kops
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: registry.k8s.io/cpa/cluster-proportional-autoscaler:1.8.5
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@ -0,0 +1,138 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.26.0-alpha.2
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
kops.k8s.io/managed-by: kops
version: v1.26.0-alpha.2
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --watch-ingress=false
- --dns=google-clouddns
- --zone=*/1
- --internal-ipv4
- --zone=*/*
- -v=2
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/dns-controller:1.26.0-alpha.2
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: dns-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -0,0 +1,406 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
component: cloud-controller-manager
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: cloud-controller-manager
namespace: kube-system
spec:
selector:
matchLabels:
component: cloud-controller-manager
template:
metadata:
creationTimestamp: null
labels:
component: cloud-controller-manager
kops.k8s.io/managed-by: kops
tier: control-plane
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
containers:
- args:
- --allocate-node-cidrs=true
- --cidr-allocator-type=CloudAllocator
- --cluster-cidr=100.96.0.0/11
- --cluster-name=minimal-example-com
- --leader-elect=true
- --v=2
- --cloud-provider=gce
- --use-service-account-credentials=true
- --cloud-config=/etc/kubernetes/cloud.config
command:
- /usr/local/bin/cloud-controller-manager
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: k8scloudprovidergcp/cloud-controller-manager:latest
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
host: 127.0.0.1
path: /healthz
port: 10258
scheme: HTTPS
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 15
name: cloud-controller-manager
resources:
requests:
cpu: 200m
volumeMounts:
- mountPath: /etc/kubernetes/cloud.config
name: cloudconfig
readOnly: true
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccountName: cloud-controller-manager
tolerations:
- effect: NoSchedule
key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
- effect: NoSchedule
key: node.kubernetes.io/not-ready
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- hostPath:
path: /etc/kubernetes/cloud.config
type: ""
name: cloudconfig
updateStrategy:
type: RollingUpdate
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: cloud-controller-manager:apiserver-authentication-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: system:cloud-controller-manager
rules:
- apiGroups:
- ""
- events.k8s.io
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- watch
- update
- apiGroups:
- coordination.k8s.io
resourceNames:
- cloud-controller-manager
resources:
- leases
verbs:
- get
- update
- apiGroups:
- ""
resources:
- endpoints
- serviceaccounts
verbs:
- create
- get
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- update
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- delete
- get
- update
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- '*'
resources:
- '*'
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: system::leader-locking-cloud-controller-manager
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- watch
- apiGroups:
- ""
resourceNames:
- cloud-controller-manager
resources:
- configmaps
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: system:controller:cloud-node-controller
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- update
- delete
- patch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- get
- list
- update
- delete
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- list
- delete
- apiGroups:
- ""
resources:
- pods/status
verbs:
- list
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: system::leader-locking-cloud-controller-manager
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system::leader-locking-cloud-controller-manager
subjects:
- kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: system:cloud-controller-manager
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:cloud-controller-manager
subjects:
- apiGroup: ""
kind: ServiceAccount
name: cloud-controller-manager
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: system:controller:cloud-node-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:controller:cloud-node-controller
subjects:
- kind: ServiceAccount
name: cloud-node-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-cloud-controller.addons.k8s.io
name: system:controller:pvl-controller
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- persistentvolumeclaims
- persistentvolumes
verbs:
- list
- watch

View File

@ -0,0 +1,855 @@
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: standard-csi
parameters:
type: pd-standard
provisioner: pd.csi.storage.gke.io
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: gce-pd-csi-driver
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-node-sa
namespace: gce-pd-csi-driver
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller-sa
namespace: gce-pd-csi-driver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-provisioner-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- create
- delete
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- csinodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshots
verbs:
- get
- list
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- get
- list
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller-provisioner-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: gce-pd-csi-driver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-attacher-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- csinodes
verbs:
- get
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- storage.k8s.io
resources:
- volumeattachments/status
verbs:
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller-attacher-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-attacher-role
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: gce-pd-csi-driver
---
apiVersion: scheduling.k8s.io/v1
description: This priority class should be used for the GCE PD CSI driver controller
deployment only.
globalDefault: false
kind: PriorityClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller
value: 900000000
---
apiVersion: scheduling.k8s.io/v1
description: This priority class should be used for the GCE PD CSI driver node deployment
only.
globalDefault: false
kind: PriorityClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-node
value: 900001000
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-resizer-role
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims/status
verbs:
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-resizer-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-resizer-role
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: gce-pd-csi-driver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller-deploy
rules:
- apiGroups:
- policy
resourceNames:
- csi-gce-pd-controller-psp
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller-deploy
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-controller-deploy
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: gce-pd-csi-driver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-node-deploy
rules:
- apiGroups:
- policy
resourceNames:
- csi-gce-pd-node-psp
resources:
- podsecuritypolicies
verbs:
- use
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-node-deploy
subjects:
- kind: ServiceAccount
name: csi-gce-pd-node-sa
namespace: gce-pd-csi-driver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-node-deploy
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: gce-pd-csi-driver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-snapshotter-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotclasses
verbs:
- get
- list
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- create
- get
- list
- watch
- update
- delete
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller-snapshotter-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: csi-gce-pd-snapshotter-role
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
namespace: gce-pd-csi-driver
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
k8s-app: gcp-compute-persistent-disk-csi-driver
name: csi-gce-pd-leaderelection-role
namespace: gce-pd-csi-driver
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- watch
- list
- delete
- update
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
k8s-app: gcp-compute-persistent-disk-csi-driver
name: csi-gce-pd-controller-leaderelection-binding
namespace: gce-pd-csi-driver
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: csi-gce-pd-leaderelection-role
subjects:
- kind: ServiceAccount
name: csi-gce-pd-controller-sa
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-controller
namespace: gce-pd-csi-driver
spec:
replicas: 1
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: gcp-compute-persistent-disk-csi-driver
kops.k8s.io/managed-by: kops
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- --v=5
- --csi-address=/csi/csi.sock
- --feature-gates=Topology=true
- --http-endpoint=:22011
- --leader-election-namespace=$(PDCSI_NAMESPACE)
- --timeout=250s
- --extra-create-metadata
- --leader-election
- --default-fstype=ext4
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/sig-storage/csi-provisioner:v2.1.0
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
periodSeconds: 20
timeoutSeconds: 10
name: csi-provisioner
ports:
- containerPort: 22011
name: http-endpoint
protocol: TCP
volumeMounts:
- mountPath: /csi
name: socket-dir
- args:
- --v=5
- --csi-address=/csi/csi.sock
- --http-endpoint=:22012
- --leader-election
- --leader-election-namespace=$(PDCSI_NAMESPACE)
- --timeout=250s
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/sig-storage/csi-attacher:v3.1.0
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
periodSeconds: 20
timeoutSeconds: 10
name: csi-attacher
ports:
- containerPort: 22012
name: http-endpoint
protocol: TCP
volumeMounts:
- mountPath: /csi
name: socket-dir
- args:
- --v=5
- --csi-address=/csi/csi.sock
- --http-endpoint=:22013
- --leader-election
- --leader-election-namespace=$(PDCSI_NAMESPACE)
- --handle-volume-inuse-error=false
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/sig-storage/csi-resizer:v1.1.0
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: http-endpoint
initialDelaySeconds: 10
periodSeconds: 20
timeoutSeconds: 10
name: csi-resizer
ports:
- containerPort: 22013
name: http-endpoint
protocol: TCP
volumeMounts:
- mountPath: /csi
name: socket-dir
- args:
- --v=5
- --csi-address=/csi/csi.sock
- --metrics-address=:22014
- --leader-election
- --leader-election-namespace=$(PDCSI_NAMESPACE)
- --timeout=300s
env:
- name: PDCSI_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: registry.k8s.io/sig-storage/csi-snapshotter:v3.0.3
name: csi-snapshotter
volumeMounts:
- mountPath: /csi
name: socket-dir
- args:
- --v=5
- --endpoint=unix:/csi/csi.sock
- --extra-labels=k8s-io-cluster-name=minimal-example-com
image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.3.4
name: gce-pd-driver
volumeMounts:
- mountPath: /csi
name: socket-dir
hostNetwork: true
nodeSelector: null
priorityClassName: csi-gce-pd-controller
serviceAccountName: csi-gce-pd-controller-sa
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
volumes:
- emptyDir: {}
name: socket-dir
---
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: pd.csi.storage.gke.io
spec:
attachRequired: true
podInfoOnMount: false
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: gcp-pd-csi-driver.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: gcp-pd-csi-driver.addons.k8s.io
name: csi-gce-pd-node
namespace: gce-pd-csi-driver
spec:
selector:
matchLabels:
app: gcp-compute-persistent-disk-csi-driver
template:
metadata:
creationTimestamp: null
labels:
app: gcp-compute-persistent-disk-csi-driver
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --v=5
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/pd.csi.storage.gke.io/csi.sock
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.1.0
name: csi-driver-registrar
volumeMounts:
- mountPath: /csi
name: plugin-dir
- mountPath: /registration
name: registration-dir
- args:
- --v=5
- --endpoint=unix:/csi/csi.sock
- --run-controller-service=false
image: registry.k8s.io/cloud-provider-gcp/gcp-compute-persistent-disk-csi-driver:v1.3.4
name: gce-pd-driver
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/lib/kubelet
mountPropagation: Bidirectional
name: kubelet-dir
- mountPath: /csi
name: plugin-dir
- mountPath: /dev
name: device-dir
- mountPath: /etc/udev
name: udev-rules-etc
- mountPath: /lib/udev
name: udev-rules-lib
- mountPath: /run/udev
name: udev-socket
- mountPath: /sys
name: sys
hostNetwork: true
nodeSelector:
kubernetes.io/os: linux
priorityClassName: csi-gce-pd-node
serviceAccountName: csi-gce-pd-node-sa
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /var/lib/kubelet/plugins_registry/
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet
type: Directory
name: kubelet-dir
- hostPath:
path: /var/lib/kubelet/plugins/pd.csi.storage.gke.io/
type: DirectoryOrCreate
name: plugin-dir
- hostPath:
path: /dev
type: Directory
name: device-dir
- hostPath:
path: /etc/udev
type: Directory
name: udev-rules-etc
- hostPath:
path: /lib/udev
type: Directory
name: udev-rules-lib
- hostPath:
path: /run/udev
type: Directory
name: udev-socket
- hostPath:
path: /sys
type: Directory
name: sys

View File

@ -0,0 +1,225 @@
apiVersion: v1
data:
config.yaml: |
{"cloud":"gce","configBase":"memfs://tests/minimal.example.com","secretStore":"memfs://tests/minimal.example.com/secrets","server":{"Listen":":3988","provider":{"gce":{"projectID":"testproject","region":"us-test1","clusterName":"minimal.example.com","MaxTimeSkew":300}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.26.0-alpha.2
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com
creationTimestamp: null
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
kops.k8s.io/managed-by: kops
version: v1.26.0-alpha.2
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- key: kops.k8s.io/kops-controller-pki
operator: Exists
containers:
- args:
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
command: null
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: registry.k8s.io/kops/kops-controller:1.26.0-alpha.2
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
runAsUser: 10011
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector: null
priorityClassName: system-cluster-critical
serviceAccount: kops-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@ -0,0 +1,127 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metadata-proxy.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: metadata-proxy.addons.k8s.io
k8s-app: metadata-proxy
kubernetes.io/cluster-service: "true"
name: metadata-proxy
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metadata-proxy.addons.k8s.io
addonmanager.kubernetes.io/mode: Reconcile
app.kubernetes.io/managed-by: kops
k8s-addon: metadata-proxy.addons.k8s.io
k8s-app: metadata-proxy
kubernetes.io/cluster-service: "true"
version: v0.12
name: metadata-proxy-v0.12
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metadata-proxy
version: v0.12
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metadata-proxy
kops.k8s.io/managed-by: kops
kubernetes.io/cluster-service: "true"
version: v0.12
spec:
containers:
- args:
- -addr=169.254.169.252:988
image: k8s.gcr.io/metadata-proxy:v0.1.12
name: metadata-proxy
resources:
limits:
cpu: 30m
memory: 25Mi
requests:
cpu: 30m
memory: 25Mi
securityContext:
privileged: true
- command:
- /monitor
- --stackdriver-prefix=custom.googleapis.com/addons
- --source=metadata_proxy:http://127.0.0.1:989?whitelisted=request_count
- --pod-id=$(POD_NAME)
- --namespace-id=$(POD_NAMESPACE)
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: k8s.gcr.io/prometheus-to-sd:v0.5.0
name: prometheus-to-sd-exporter
resources:
limits:
cpu: 2m
memory: 20Mi
requests:
cpu: 2m
memory: 20Mi
dnsPolicy: Default
hostNetwork: true
initContainers:
- command:
- /bin/sh
- -c
- |
set -e
set -x
if (ip link show ens4); then
PRIMARY_DEV=ens4
else
PRIMARY_DEV=eth0
fi
ip addr add dev lo 169.254.169.252/32
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 ! -i "${PRIMARY_DEV}" --dport 80 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:988
iptables -w -t nat -I PREROUTING -p tcp -d 169.254.169.254 ! -i "${PRIMARY_DEV}" --dport 8080 -m comment --comment "metadata-concealment: bridge traffic to metadata server goes to metadata proxy" -j DNAT --to-destination 169.254.169.252:987
image: k8s.gcr.io/k8s-custom-iptables:1.0
imagePullPolicy: Always
name: update-ipdtables
securityContext:
privileged: true
volumeMounts:
- mountPath: /host
name: host
nodeSelector:
cloud.google.com/metadata-proxy-ready: "true"
kubernetes.io/os: linux
priorityClassName: system-node-critical
serviceAccountName: metadata-proxy
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: host
updateStrategy:
type: RollingUpdate

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -0,0 +1,14 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-gce.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: storage-gce.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: standard
parameters:
type: pd-standard
provisioner: kubernetes.io/gce-pd

View File

@ -0,0 +1,282 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: external
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: registry.k8s.io/kube-apiserver:v1.26.0-alpha.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm
XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ==
-----END RSA PUBLIC KEY-----
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF
Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ==
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- 0eeb400fd028f5848c6d63c88b63148867bc36773e80ff9a9509c59e41859f51@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/amd64/kubelet
- 908abb954a0d131e5b702f4faecaa310d19ca217c09bb90a340f24a2b5e2a567@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/amd64/kubectl
- 145b8e00db6ce8629a172b48bc622b7bd08a58b573c5c8d77870b8898b74dfe6@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/amd64/mounter
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- dd1f4730daf728822aea3ba35a440e14b1dfa8f1db97288a59a8666676a13637@https://github.com/containerd/containerd/releases/download/v1.6.10/containerd-1.6.10-linux-amd64.tar.gz
- db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- c01a2ce56a4484354a7db11abac8166fa45215855662726b43639072608ecbfa@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/arm64/kubelet
- de553916b8607682b10cd6f6c333204b5f0186a10e2007a44528542845ffb28c@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/arm64/kubectl
- 626bcf9f2757357c2eba171634b22b2f6fc3998a973750ca9a9d602a50b74593@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/arm64/mounter
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 6d655e80a843f480e1c1cead18479185251581ff2d4a2e2e5eb88ad5b5e3d937@https://github.com/containerd/containerd/releases/download/v1.6.10/containerd-1.6.10-linux-arm64.tar.gz
- dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX
DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX
WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk
CzMeMdr4
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX
DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN
QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW
HLtkTXH8
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx
NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY
qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx
NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E
YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co=
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN
MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H
g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6
CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O
sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs
GS/VUw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN
MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL
DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW
LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE
hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV
cPfVNg==
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm
ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx
GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu
Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP
vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP
DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9
t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY
xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O
Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB
DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW
03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh
cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI
J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3
MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA
aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf
OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt
uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3
MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt
naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC
qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K
G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo=
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
FileAssets:
- content: |
apiVersion: kubescheduler.config.k8s.io/v1beta2
clientConnection:
kubeconfig: /var/lib/kube-scheduler/kubeconfig
kind: KubeSchedulerConfiguration
path: /var/lib/kube-scheduler/config.yaml
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "6980187172486667078076483355"
etcd-clients-ca: "6979622252718071085282986282"
etcd-manager-ca-events: "6982279354000777253151890266"
etcd-manager-ca-main: "6982279354000936168671127624"
etcd-peers-ca-events: "6982279353999767935825892873"
etcd-peers-ca-main: "6982279353998887468930183660"
kubernetes-ca: "6982820025135291416230495506"
service-account: "2"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
node-role.kubernetes.io/control-plane: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
podInfraContainerImage: registry.k8s.io/pause:3.6
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
taints:
- node-role.kubernetes.io/control-plane=:NoSchedule
UpdatePolicy: automatic
channels:
- memfs://tests/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
runc:
version: 1.1.4
version: 1.6.10
etcdManifests:
- memfs://tests/minimal.example.com/manifests/etcd/main-master-us-test1-a.yaml
- memfs://tests/minimal.example.com/manifests/etcd/events-master-us-test1-a.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@ -0,0 +1,50 @@
Assets:
amd64:
- 0eeb400fd028f5848c6d63c88b63148867bc36773e80ff9a9509c59e41859f51@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/amd64/kubelet
- 908abb954a0d131e5b702f4faecaa310d19ca217c09bb90a340f24a2b5e2a567@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/amd64/kubectl
- 145b8e00db6ce8629a172b48bc622b7bd08a58b573c5c8d77870b8898b74dfe6@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/amd64/mounter
- 962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz
- dd1f4730daf728822aea3ba35a440e14b1dfa8f1db97288a59a8666676a13637@https://github.com/containerd/containerd/releases/download/v1.6.10/containerd-1.6.10-linux-amd64.tar.gz
- db772be63147a4e747b4fe286c7c16a2edc4a8458bd3092ea46aaee77750e8ce@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64
arm64:
- c01a2ce56a4484354a7db11abac8166fa45215855662726b43639072608ecbfa@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/arm64/kubelet
- de553916b8607682b10cd6f6c333204b5f0186a10e2007a44528542845ffb28c@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/arm64/kubectl
- 626bcf9f2757357c2eba171634b22b2f6fc3998a973750ca9a9d602a50b74593@https://storage.googleapis.com/kubernetes-release/release/v1.26.0-alpha.0/bin/linux/arm64/mounter
- ef17764ffd6cdcb16d76401bac1db6acc050c9b088f1be5efa0e094ea3b01df0@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz
- 6d655e80a843f480e1c1cead18479185251581ff2d4a2e2e5eb88ad5b5e3d937@https://github.com/containerd/containerd/releases/download/v1.6.10/containerd-1.6.10-linux-arm64.tar.gz
- dbb71e737eaef454a406ce21fd021bd8f1b35afb7635016745992bbd7c17a223@https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.arm64
CAs: {}
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "6982820025135291416230495506"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
nodeLabels:
node-role.kubernetes.io/node: ""
podInfraContainerImage: registry.k8s.io/pause:3.6
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
UpdatePolicy: automatic
channels:
- memfs://tests/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
runc:
version: 1.1.4
version: 1.6.10

View File

@ -0,0 +1,263 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
gcpPDCSIDriver:
enabled: true
manageStorageClasses: true
multizone: true
nodeTags: minimal-example-com-k8s-io-role-node
containerRuntime: containerd
containerd:
logLevel: info
runc:
version: 1.1.4
version: 1.6.10
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
cpuRequest: 100m
memoryRequest: 100Mi
version: 3.5.4
main:
cpuRequest: 200m
memoryRequest: 100Mi
version: 3.5.4
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: Node,RBAC
bindAddress: 0.0.0.0
cloudProvider: external
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: registry.k8s.io/kube-apiserver:v1.26.0-alpha.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: external
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: registry.k8s.io/kube-controller-manager:v1.26.0-alpha.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.26.0-alpha.0
logLevel: 2
kubeScheduler:
image: registry.k8s.io/kube-scheduler:v1.26.0-alpha.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.6
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.6
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: gce
ConfigBase: memfs://tests/minimal.example.com
InstanceGroupName: master-us-test1-a
InstanceGroupRole: ControlPlane
NodeupConfigHash: hJUKy2VMF8YE8BPKo+avsmDWMMvM8KusFg5VQ+0wfQc=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,197 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return 0
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return 0
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
gcpPDCSIDriver:
enabled: true
manageStorageClasses: true
multizone: true
nodeTags: minimal-example-com-k8s-io-role-node
containerRuntime: containerd
containerd:
logLevel: info
runc:
version: 1.1.4
version: 1.6.10
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: registry.k8s.io/kube-proxy:v1.26.0-alpha.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: external
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
podInfraContainerImage: registry.k8s.io/pause:3.6
podManifestPath: /etc/kubernetes/manifests
protectKernelDefaults: true
registerSchedulable: true
shutdownGracePeriod: 30s
shutdownGracePeriodCriticalPods: 10s
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: gce
ConfigServer:
CACertificates: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
server: https://kops-controller.internal.minimal.example.com:3988/
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: XzKPrg4qnYbIhZ3FBpoSK85yYbyotxjBGv6XZfaU+7o=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,100 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: minimal.example.com
spec:
certManager:
enabled: true
clusterAutoscaler:
enabled: true
metricsServer:
enabled: true
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudConfig: {}
cloudProvider: gce
configBase: memfs://tests/minimal.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- instanceGroup: master-us-test1-a
name: a
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- instanceGroup: master-us-test1-a
name: a
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.26.0-alpha.0
masterPublicName: api.minimal.example.com
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
project: testproject
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 10.0.16.0/20
name: us-test1
region: us-test1
type: Public
topology:
dns:
type: Public
masters: public
nodes: public
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: master-us-test1-a
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20221018
machineType: e2-medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test1
zones:
- us-test1-a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: nodes
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20221018
machineType: n1-standard-2
maxSize: 1
minSize: 1
role: Node
subnets:
- us-test1
zones:
- us-test1-a

View File

@ -0,0 +1,588 @@
locals {
cluster_name = "minimal.example.com"
project = "testproject"
region = "us-test1"
}
output "cluster_name" {
value = "minimal.example.com"
}
output "project" {
value = "testproject"
}
output "region" {
value = "us-test1"
}
provider "google" {
project = "testproject"
region = "us-test1"
}
provider "aws" {
alias = "files"
region = "us-test-1"
}
resource "aws_s3_object" "cluster-completed-spec" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_cluster-completed.spec_content")
key = "tests/minimal.example.com/cluster-completed.spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "etcd-cluster-spec-events" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-events_content")
key = "tests/minimal.example.com/backups/etcd/events/control/etcd-cluster-spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "etcd-cluster-spec-main" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_etcd-cluster-spec-main_content")
key = "tests/minimal.example.com/backups/etcd/main/control/etcd-cluster-spec"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "kops-version-txt" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_kops-version.txt_content")
key = "tests/minimal.example.com/kops-version.txt"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-etcdmanager-events-master-us-test1-a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-events-master-us-test1-a_content")
key = "tests/minimal.example.com/manifests/etcd/events-master-us-test1-a.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-etcdmanager-main-master-us-test1-a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-etcdmanager-main-master-us-test1-a_content")
key = "tests/minimal.example.com/manifests/etcd/main-master-us-test1-a.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "manifests-static-kube-apiserver-healthcheck" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_manifests-static-kube-apiserver-healthcheck_content")
key = "tests/minimal.example.com/manifests/static/kube-apiserver-healthcheck.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-bootstrap" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-bootstrap_content")
key = "tests/minimal.example.com/addons/bootstrap-channel.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-certmanager-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-certmanager.io-k8s-1.16_content")
key = "tests/minimal.example.com/addons/certmanager.io/k8s-1.16.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-cluster-autoscaler-addons-k8s-io-k8s-1-15" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-cluster-autoscaler.addons.k8s.io-k8s-1.15_content")
key = "tests/minimal.example.com/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-coredns-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-coredns.addons.k8s.io-k8s-1.12_content")
key = "tests/minimal.example.com/addons/coredns.addons.k8s.io/k8s-1.12.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-dns-controller-addons-k8s-io-k8s-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-dns-controller.addons.k8s.io-k8s-1.12_content")
key = "tests/minimal.example.com/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-gcp-cloud-controller-addons-k8s-io-k8s-1-23" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-gcp-cloud-controller.addons.k8s.io-k8s-1.23_content")
key = "tests/minimal.example.com/addons/gcp-cloud-controller.addons.k8s.io/k8s-1.23.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-gcp-pd-csi-driver-addons-k8s-io-k8s-1-23" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-gcp-pd-csi-driver.addons.k8s.io-k8s-1.23_content")
key = "tests/minimal.example.com/addons/gcp-pd-csi-driver.addons.k8s.io/k8s-1.23.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-kops-controller-addons-k8s-io-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-kops-controller.addons.k8s.io-k8s-1.16_content")
key = "tests/minimal.example.com/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-kubelet-api-rbac-addons-k8s-io-k8s-1-9" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-kubelet-api.rbac.addons.k8s.io-k8s-1.9_content")
key = "tests/minimal.example.com/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-limit-range.addons.k8s.io_content")
key = "tests/minimal.example.com/addons/limit-range.addons.k8s.io/v1.5.0.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metadata-proxy-addons-k8s-io-v0-1-12" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metadata-proxy.addons.k8s.io-v0.1.12_content")
key = "tests/minimal.example.com/addons/metadata-proxy.addons.k8s.io/v0.1.12.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "tests/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-storage-gce-addons-k8s-io-v1-7-0" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-storage-gce.addons.k8s.io-v1.7.0_content")
key = "tests/minimal.example.com/addons/storage-gce.addons.k8s.io/v1.7.0.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "nodeupconfig-master-us-test1-a" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_nodeupconfig-master-us-test1-a_content")
key = "tests/minimal.example.com/igconfig/control-plane/master-us-test1-a/nodeupconfig.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "nodeupconfig-nodes" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_nodeupconfig-nodes_content")
key = "tests/minimal.example.com/igconfig/node/nodes/nodeupconfig.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "google_compute_disk" "a-etcd-events-minimal-example-com" {
labels = {
"k8s-io-cluster-name" = "minimal-example-com"
"k8s-io-etcd-events" = "a-2fa"
"k8s-io-role-master" = "master"
}
name = "a-etcd-events-minimal-example-com"
size = 20
type = "pd-ssd"
zone = "us-test1-a"
}
resource "google_compute_disk" "a-etcd-main-minimal-example-com" {
labels = {
"k8s-io-cluster-name" = "minimal-example-com"
"k8s-io-etcd-main" = "a-2fa"
"k8s-io-role-master" = "master"
}
name = "a-etcd-main-minimal-example-com"
size = 20
type = "pd-ssd"
zone = "us-test1-a"
}
resource "google_compute_firewall" "kubernetes-master-https-ipv6-minimal-example-com" {
allow {
ports = ["443"]
protocol = "tcp"
}
disabled = false
name = "kubernetes-master-https-ipv6-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["::/0"]
target_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "kubernetes-master-https-minimal-example-com" {
allow {
ports = ["443"]
protocol = "tcp"
}
disabled = false
name = "kubernetes-master-https-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["0.0.0.0/0"]
target_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "master-to-master-minimal-example-com" {
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
allow {
protocol = "esp"
}
allow {
protocol = "ah"
}
allow {
protocol = "sctp"
}
disabled = false
name = "master-to-master-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
target_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "master-to-node-minimal-example-com" {
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
allow {
protocol = "esp"
}
allow {
protocol = "ah"
}
allow {
protocol = "sctp"
}
disabled = false
name = "master-to-node-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
target_tags = ["minimal-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "node-to-master-minimal-example-com" {
allow {
ports = ["443"]
protocol = "tcp"
}
allow {
ports = ["3988"]
protocol = "tcp"
}
disabled = false
name = "node-to-master-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_tags = ["minimal-example-com-k8s-io-role-node"]
target_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "node-to-node-minimal-example-com" {
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
allow {
protocol = "esp"
}
allow {
protocol = "ah"
}
allow {
protocol = "sctp"
}
disabled = false
name = "node-to-node-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_tags = ["minimal-example-com-k8s-io-role-node"]
target_tags = ["minimal-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "nodeport-external-to-node-ipv6-minimal-example-com" {
allow {
ports = ["30000-32767"]
protocol = "tcp"
}
allow {
ports = ["30000-32767"]
protocol = "udp"
}
disabled = true
name = "nodeport-external-to-node-ipv6-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["::/0"]
target_tags = ["minimal-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "nodeport-external-to-node-minimal-example-com" {
allow {
ports = ["30000-32767"]
protocol = "tcp"
}
allow {
ports = ["30000-32767"]
protocol = "udp"
}
disabled = true
name = "nodeport-external-to-node-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["0.0.0.0/0"]
target_tags = ["minimal-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "ssh-external-to-master-ipv6-minimal-example-com" {
allow {
ports = ["22"]
protocol = "tcp"
}
disabled = false
name = "ssh-external-to-master-ipv6-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["::/0"]
target_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "ssh-external-to-master-minimal-example-com" {
allow {
ports = ["22"]
protocol = "tcp"
}
disabled = false
name = "ssh-external-to-master-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["0.0.0.0/0"]
target_tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "ssh-external-to-node-ipv6-minimal-example-com" {
allow {
ports = ["22"]
protocol = "tcp"
}
disabled = false
name = "ssh-external-to-node-ipv6-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["::/0"]
target_tags = ["minimal-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "ssh-external-to-node-minimal-example-com" {
allow {
ports = ["22"]
protocol = "tcp"
}
disabled = false
name = "ssh-external-to-node-minimal-example-com"
network = google_compute_network.minimal-example-com.name
source_ranges = ["0.0.0.0/0"]
target_tags = ["minimal-example-com-k8s-io-role-node"]
}
resource "google_compute_instance_group_manager" "a-master-us-test1-a-minimal-example-com" {
base_instance_name = "master-us-test1-a"
name = "a-master-us-test1-a-minimal-example-com"
target_size = 1
version {
instance_template = google_compute_instance_template.master-us-test1-a-minimal-example-com.self_link
}
zone = "us-test1-a"
}
resource "google_compute_instance_group_manager" "a-nodes-minimal-example-com" {
base_instance_name = "nodes"
name = "a-nodes-minimal-example-com"
target_size = 1
version {
instance_template = google_compute_instance_template.nodes-minimal-example-com.self_link
}
zone = "us-test1-a"
}
resource "google_compute_instance_template" "master-us-test1-a-minimal-example-com" {
can_ip_forward = true
disk {
auto_delete = true
boot = true
device_name = "persistent-disks-0"
disk_name = ""
disk_size_gb = 64
disk_type = "pd-standard"
interface = ""
mode = "READ_WRITE"
source = ""
source_image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20221018"
type = "PERSISTENT"
}
labels = {
"k8s-io-cluster-name" = "minimal-example-com"
"k8s-io-instance-group" = "master-us-test1-a"
"k8s-io-role-control-plane" = ""
"k8s-io-role-master" = ""
}
machine_type = "e2-medium"
metadata = {
"cluster-name" = "minimal.example.com"
"kops-k8s-io-instance-group-name" = "master-us-test1-a"
"ssh-keys" = "admin: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ=="
"startup-script" = file("${path.module}/data/google_compute_instance_template_master-us-test1-a-minimal-example-com_metadata_startup-script")
}
name_prefix = "master-us-test1-a-minimal-e8ua4m-"
network_interface {
access_config {
}
network = google_compute_network.minimal-example-com.name
subnetwork = google_compute_subnetwork.us-test1-minimal-example-com.name
}
scheduling {
automatic_restart = true
on_host_maintenance = "MIGRATE"
preemptible = false
provisioning_model = "STANDARD"
}
service_account {
email = google_service_account.control-plane.email
scopes = ["https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/ndev.clouddns.readwrite"]
}
tags = ["minimal-example-com-k8s-io-role-control-plane", "minimal-example-com-k8s-io-role-master"]
}
resource "google_compute_instance_template" "nodes-minimal-example-com" {
can_ip_forward = true
disk {
auto_delete = true
boot = true
device_name = "persistent-disks-0"
disk_name = ""
disk_size_gb = 128
disk_type = "pd-standard"
interface = ""
mode = "READ_WRITE"
source = ""
source_image = "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2004-focal-v20221018"
type = "PERSISTENT"
}
labels = {
"k8s-io-cluster-name" = "minimal-example-com"
"k8s-io-instance-group" = "nodes"
"k8s-io-role-node" = ""
}
machine_type = "n1-standard-2"
metadata = {
"cluster-name" = "minimal.example.com"
"kops-k8s-io-instance-group-name" = "nodes"
"ssh-keys" = "admin: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ=="
"startup-script" = file("${path.module}/data/google_compute_instance_template_nodes-minimal-example-com_metadata_startup-script")
}
name_prefix = "nodes-minimal-example-com-"
network_interface {
access_config {
}
network = google_compute_network.minimal-example-com.name
subnetwork = google_compute_subnetwork.us-test1-minimal-example-com.name
}
scheduling {
automatic_restart = true
on_host_maintenance = "MIGRATE"
preemptible = false
provisioning_model = "STANDARD"
}
service_account {
email = google_service_account.node.email
scopes = ["https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/devstorage.read_only"]
}
tags = ["minimal-example-com-k8s-io-role-node"]
}
resource "google_compute_network" "minimal-example-com" {
auto_create_subnetworks = false
name = "minimal-example-com"
}
resource "google_compute_subnetwork" "us-test1-minimal-example-com" {
ip_cidr_range = "10.0.16.0/20"
name = "us-test1-minimal-example-com"
network = google_compute_network.minimal-example-com.name
region = "us-test1"
}
resource "google_project_iam_binding" "serviceaccount-control-plane" {
members = ["serviceAccount:control-plane-minimal-e-rabo9p@testproject.iam.gserviceaccount.com"]
project = "testproject"
role = "roles/container.serviceAgent"
}
resource "google_project_iam_binding" "serviceaccount-nodes" {
members = ["serviceAccount:node-minimal-example-com@testproject.iam.gserviceaccount.com"]
project = "testproject"
role = "roles/compute.viewer"
}
resource "google_service_account" "control-plane" {
account_id = "control-plane-minimal-e-rabo9p"
description = "kubernetes control-plane instances"
display_name = "control-plane"
project = "testproject"
}
resource "google_service_account" "node" {
account_id = "node-minimal-example-com"
description = "kubernetes worker nodes"
display_name = "node"
project = "testproject"
}
terraform {
required_version = ">= 0.15.0"
required_providers {
google = {
"source" = "hashicorp/google"
"version" = ">= 2.19.0"
}
}
}

View File

@ -159,6 +159,7 @@ kubeAPIServer:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -260,7 +261,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: ControlPlane
NodeupConfigHash: 3eXdkP9halY6nXFlu9HMIb4qMRUqvOb4E6JPXLopbs0=
NodeupConfigHash: wdfWmOUE63NZlYzHfc06zl6Skj6oCTXtb2gRMJrn3Kk=
__EOF_KUBE_ENV

View File

@ -84,6 +84,7 @@ spec:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
@ -195,6 +196,8 @@ spec:
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
masterPublicName: api.minimal.example.com
metricsServer:
enabled: true
networkCIDR: 172.20.0.0/16
networking:
amazonvpc: {}

View File

@ -46,6 +46,14 @@ spec:
selector:
k8s-addon: cluster-autoscaler.addons.k8s.io
version: 9.99.0
- id: k8s-1.11
manifest: metrics-server.addons.k8s.io/k8s-1.11.yaml
manifestHash: dcc45685fd1de2514d806f6e96f36bfc6fb18af68a8de6a9e5def5af833b1f43
name: metrics-server.addons.k8s.io
needsPKI: true
selector:
k8s-app: metrics-server
version: 9.99.0
- id: k8s-1.16
manifest: certmanager.io/k8s-1.16.yaml
manifestHash: 79bc70f8f9b7a91e97830ecaa8968a51e0c5b78318444cb5a44935e8f9f73aa1

View File

@ -0,0 +1,267 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/metrics
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
replicas: 2
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
creationTimestamp: null
labels:
k8s-app: metrics-server
kops.k8s.io/managed-by: kops
spec:
containers:
- args:
- --secure-port=4443
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-preferred-address-types=Hostname
- --cert-dir=/tmp
- --kubelet-insecure-tls
image: registry.k8s.io/metrics-server/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 4443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 50m
memory: 128Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
topologySpreadConstraints:
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: topology.kubernetes.io/zone
whenUnsatisfiable: ScheduleAnyway
- labelSelector:
matchLabels:
k8s-app: metrics-server
maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: metrics-server.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
minAvailable: 1
selector:
matchLabels:
k8s-app: metrics-server

View File

@ -18,6 +18,7 @@ APIServerConfig:
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
enableAggregatorRouting: true
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:

View File

@ -12,6 +12,8 @@ spec:
enabled: true
podAnnotations:
testAnnotation: testAnnotation
metricsServer:
enabled: true
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable

View File

@ -645,6 +645,14 @@ resource "aws_s3_object" "minimal-example-com-addons-limit-range-addons-k8s-io"
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-metrics-server-addons-k8s-io-k8s-1-11" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-metrics-server.addons.k8s.io-k8s-1.11_content")
key = "clusters.example.com/minimal.example.com/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml"
provider = aws.files
server_side_encryption = "AES256"
}
resource "aws_s3_object" "minimal-example-com-addons-networking-amazon-vpc-routed-eni-k8s-1-16" {
bucket = "testingBucket"
content = file("${path.module}/data/aws_s3_object_minimal.example.com-addons-networking.amazon-vpc-routed-eni-k8s-1.16_content")