Remove code for no-longer-supported k8s 1.21

This commit is contained in:
John Gardiner Myers 2022-12-24 00:32:28 -08:00
parent b820f4ac59
commit 005ec38972
49 changed files with 56 additions and 5929 deletions

View File

@ -46,8 +46,6 @@ var MagicTimestamp = metav1.Time{Time: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UT
// TestCreateClusterMinimal runs kops create cluster minimal.example.com --zones us-test-1a
func TestCreateClusterMinimal(t *testing.T) {
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.20", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.21", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.22", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.23", "v1alpha2")
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/minimal-1.24", "v1alpha2")

View File

@ -113,12 +113,7 @@ func (b *KubeSchedulerBuilder) Build(c *fi.NodeupModelBuilderContext) error {
} else {
// We didn't get a kubescheduler configuration; warn as we're aiming to move this to generation in the kops CLI
klog.Warningf("using embedded kubescheduler configuration")
var config *SchedulerConfig
if b.IsKubernetesGTE("1.22") {
config = NewSchedulerConfig("kubescheduler.config.k8s.io/v1beta2")
} else {
config = NewSchedulerConfig("kubescheduler.config.k8s.io/v1beta1")
}
config := NewSchedulerConfig("kubescheduler.config.k8s.io/v1beta2")
kubeSchedulerConfig, err := configbuilder.BuildConfigYaml(&kubeScheduler, config)
if err != nil {

View File

@ -32,7 +32,7 @@ spec:
iam: {}
kubelet:
anonymousAuth: false
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:

View File

@ -30,7 +30,7 @@ spec:
iam: {}
kubelet:
anonymousAuth: false
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:

View File

@ -21,7 +21,7 @@ spec:
iam: {}
kubelet:
anonymousAuth: false
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:

View File

@ -26,7 +26,7 @@ func UseKopsControllerForNodeBootstrap(cluster *kops.Cluster) bool {
case kops.CloudProviderAWS:
return true
case kops.CloudProviderGCE:
return cluster.IsKubernetesGTE("1.22")
return true
case kops.CloudProviderHetzner:
return true
default:

View File

@ -374,8 +374,6 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
if requiresSubnetCIDR && strict {
if !strings.Contains(c.Spec.Networking.NonMasqueradeCIDR, ":") || s.IPv6CIDR == "" {
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "subnet did not have a cidr set"))
} else if c.IsKubernetesLT("1.22") {
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "IPv6-only subnets require Kubernetes 1.22+"))
}
}
} else {

View File

@ -415,10 +415,6 @@ func validateTopology(c *kops.Cluster, topology *kops.TopologySpec, fieldPath *f
allErrs = append(allErrs, field.Required(fieldPath.Child("nodes"), ""))
} else {
allErrs = append(allErrs, IsValidValue(fieldPath.Child("nodes"), &topology.Nodes, kops.SupportedTopologies)...)
if topology.Nodes == "private" && c.Spec.IsIPv6Only() && c.IsKubernetesLT("1.22") {
allErrs = append(allErrs, field.Forbidden(fieldPath.Child("nodes"), "private topology in IPv6 clusters requires Kubernetes 1.22+"))
}
}
if topology.Bastion != nil {

View File

@ -197,7 +197,7 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi
}
func UsesInstanceIDForNodeName(cluster *kops.Cluster) bool {
return cluster.Spec.ExternalCloudControllerManager != nil && cluster.IsKubernetesGTE("1.22") && cluster.Spec.GetCloudProvider() == kops.CloudProviderAWS
return cluster.Spec.ExternalCloudControllerManager != nil && cluster.Spec.GetCloudProvider() == kops.CloudProviderAWS
}
func filterFileAssets(f []kops.FileAssetSpec, role kops.InstanceGroupRole) []kops.FileAssetSpec {

View File

@ -279,7 +279,7 @@ func (b *NetworkModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
Tags: tags,
}
if b.Cluster.Spec.ExternalCloudControllerManager != nil && b.Cluster.IsKubernetesGTE("1.22") {
if b.Cluster.Spec.ExternalCloudControllerManager != nil {
subnet.ResourceBasedNaming = fi.PtrTo(true)
}

View File

@ -85,8 +85,6 @@ func (b *AWSCloudControllerManagerOptionsBuilder) BuildOptions(o interface{}) er
if eccm.Image == "" {
// See https://us.gcr.io/k8s-artifacts-prod/provider-aws/cloud-controller-manager
switch b.KubernetesVersion.Minor {
case 21:
eccm.Image = "registry.k8s.io/provider-aws/cloud-controller-manager:v1.21.6"
case 22:
eccm.Image = "registry.k8s.io/provider-aws/cloud-controller-manager:v1.22.7"
case 23:

View File

@ -37,7 +37,7 @@ func (b *AWSEBSCSIDriverOptionsBuilder) BuildOptions(o interface{}) error {
if aws.EBSCSIDriver == nil {
aws.EBSCSIDriver = &kops.EBSCSIDriverSpec{
Enabled: fi.PtrTo(b.IsKubernetesGTE("1.22")),
Enabled: fi.PtrTo(true),
}
}
c := aws.EBSCSIDriver

View File

@ -43,8 +43,6 @@ func (b *ClusterAutoscalerOptionsBuilder) BuildOptions(o interface{}) error {
v, err := util.ParseKubernetesVersion(clusterSpec.KubernetesVersion)
if err == nil {
switch v.Minor {
case 21:
image = "registry.k8s.io/autoscaling/cluster-autoscaler:v1.21.3"
case 22:
image = "registry.k8s.io/autoscaling/cluster-autoscaler:v1.22.3"
case 23:

View File

@ -42,11 +42,7 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error {
// Ensure the version is set
if c.Version == "" {
// We run the k8s-recommended versions of etcd
if b.IsKubernetesGTE("1.22") {
c.Version = DefaultEtcd3Version_1_22
} else {
c.Version = DefaultEtcd3Version_1_20
}
c.Version = DefaultEtcd3Version_1_22
}
}

View File

@ -203,7 +203,7 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
clusterSpec.Kubelet.CgroupDriver = "systemd"
}
if b.IsKubernetesGTE("1.22") && clusterSpec.Kubelet.ProtectKernelDefaults == nil {
if clusterSpec.Kubelet.ProtectKernelDefaults == nil {
clusterSpec.Kubelet.ProtectKernelDefaults = fi.PtrTo(true)
}

View File

@ -87,11 +87,7 @@ func (b *KubeSchedulerBuilder) buildSchedulerConfig() ([]byte, error) {
} else {
config = &unstructured.Unstructured{}
config.SetKind("KubeSchedulerConfiguration")
if b.IsKubernetesGTE("1.22") {
config.SetAPIVersion("kubescheduler.config.k8s.io/v1beta2")
} else {
config.SetAPIVersion("kubescheduler.config.k8s.io/v1beta1")
}
config.SetAPIVersion("kubescheduler.config.k8s.io/v1beta2")
// We need to store the object, because we are often called repeatedly (until we converge)
b.AdditionalObjects = append(b.AdditionalObjects, kubemanifest.NewObject(config.Object))
}

View File

@ -3,4 +3,4 @@ kind: Cluster
metadata:
name: minimal.example.com
spec:
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0

View File

@ -3,7 +3,7 @@ Zones:
- us-test-1a
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0
# We specify SSHAccess but _not_ AdminAccess
SSHAccess:
- 1.2.3.4/32

View File

@ -9,4 +9,4 @@ ControlPlaneZones:
- us-test-1c
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -4,4 +4,4 @@ Zones:
ControlPlaneCount: 3
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -5,4 +5,4 @@ Zones:
ControlPlaneCount: 5
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -6,4 +6,4 @@ Networking: cni
Topology: private
Bastion: true
Egress: i-09123456
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -1,91 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/minimal.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.21.0
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public
masters: public
nodes: public
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: control-plane-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221206
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: nodes-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221206
machineType: t2.medium
maxSize: 1
minSize: 1
role: Node
subnets:
- us-test-1a

View File

@ -1,6 +0,0 @@
ClusterName: minimal.example.com
Zones:
- us-test-1a
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.21.0

View File

@ -1,91 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
rbac: {}
channel: stable
cloudProvider: aws
configBase: memfs://tests/minimal.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
memoryRequest: 100Mi
name: main
- cpuRequest: 100m
etcdMembers:
- encryptedVolume: true
instanceGroup: control-plane-us-test-1a
name: a
memoryRequest: 100Mi
name: events
iam:
allowContainerRegistry: true
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
- ::/0
kubernetesVersion: v1.21.0
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
- ::/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public
masters: public
nodes: public
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: control-plane-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221206
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal.example.com
name: nodes-us-test-1a
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20221206
machineType: t2.medium
maxSize: 1
minSize: 1
role: Node
subnets:
- us-test-1a

View File

@ -1,6 +0,0 @@
ClusterName: minimal.example.com
Zones:
- us-test-1a
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.21.0

View File

@ -6,4 +6,4 @@ Networking: cni
Topology: private
Bastion: true
Egress: nat-09123456
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -3,6 +3,6 @@ Zones:
- us-test-1a
CloudProvider: aws
Networking: cni
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0
Sets:
- cluster.spec.nodePortAccess=1.2.3.4/32,10.20.30.0/24

View File

@ -11,5 +11,5 @@ NodeSecurityGroups:
ControlPlaneSecurityGroups:
- sg-exampleid3
- sg-exampleid4
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0
cloudLabels: "Owner=John Doe,dn=\"cn=John Doe: dc=example dc=com\", foo/bar=fib+baz"

View File

@ -11,7 +11,7 @@ NodeSecurityGroups:
ControlPlaneSecurityGroups:
- sg-exampleid3
- sg-exampleid4
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0
cloudLabels: "Owner=John Doe,dn=\"cn=John Doe: dc=example dc=com\", foo/bar=fib+baz"
Project: testproject
GCEServiceAccount: test-account@testproject.iam.gserviceaccount.com

View File

@ -9,4 +9,4 @@ SubnetIDs:
- subnet-1
UtilitySubnetIDs:
- subnet-2
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -6,4 +6,4 @@ Networking: cni
NetworkID: vpc-12345678
SubnetIDs:
- subnet-1
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -5,4 +5,4 @@ CloudProvider: aws
Networking: cni
SubnetIDs:
- subnet-1
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -4,4 +4,4 @@ Zones:
CloudProvider: aws
Networking: cni
NetworkID: vpc-12345678
KubernetesVersion: v1.21.0
KubernetesVersion: v1.26.0

View File

@ -1,855 +0,0 @@
# Pulled and modified from: https://docs.projectcalico.org/v3.13/manifests/canal.yaml
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# Configure the MTU to use
{{- if .Networking.Canal.MTU }}
veth_mtu: "{{ .Networking.Canal.MTU }}"
{{- else }}
veth_mtu: "{{- if eq GetCloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}"
{{- end }}
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .Networking.NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
# Pod CIDR auto-detection on kubeadm needs access to config maps.
- apiGroups: [""]
resources:
- configmaps
verbs:
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only required for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
# Flannel ClusterRole
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- list
- watch
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
{{ if .Networking.Canal.TyphaReplicas -}}
---
# Source: calico/templates/calico-typha.yaml
# This manifest creates a Service, which will be backed by Calico's Typha daemon.
# Typha sits in between Felix and the API server, reducing Calico's load on the API server.
apiVersion: v1
kind: Service
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
ports:
- port: 5473
protocol: TCP
targetPort: calico-typha
name: calico-typha
selector:
k8s-app: calico-typha
---
# This manifest creates a Deployment of Typha to back the above service.
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
# Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the
# typha_service_name variable in the canal-config ConfigMap above.
#
# We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential
# (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In
# production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade.
replicas: {{ or .Networking.Canal.TyphaReplicas 0 }}
revisionHistoryLimit: 2
selector:
matchLabels:
k8s-app: calico-typha
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-typha
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Since Calico can't network a pod until Typha is up, we need to run Typha itself
# as a host-networked pod.
serviceAccountName: canal
priorityClassName: system-cluster-critical
# fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573
securityContext:
fsGroup: 65534
containers:
- image: calico/typha:v3.13.4
name: calico-typha
ports:
- containerPort: 5473
name: calico-typha
protocol: TCP
env:
# Enable "info" logging by default. Can be set to "debug" to increase verbosity.
- name: TYPHA_LOGSEVERITYSCREEN
value: "info"
# Disable logging to file and syslog since those don't make sense in Kubernetes.
- name: TYPHA_LOGFILEPATH
value: "none"
- name: TYPHA_LOGSEVERITYSYS
value: "none"
# Monitor the Kubernetes API to find the number of running instances and rebalance
# connections.
- name: TYPHA_CONNECTIONREBALANCINGMODE
value: "kubernetes"
- name: TYPHA_DATASTORETYPE
value: "kubernetes"
- name: TYPHA_HEALTHENABLED
value: "true"
- name: TYPHA_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}"
- name: TYPHA_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}"
livenessProbe:
httpGet:
path: /liveness
port: 9098
host: localhost
periodSeconds: 30
initialDelaySeconds: 30
securityContext:
runAsNonRoot: true
allowPrivilegeEscalation: false
readinessProbe:
httpGet:
path: /readiness
port: 9098
host: localhost
periodSeconds: 10
---
# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: calico-typha
namespace: kube-system
labels:
k8s-app: calico-typha
spec:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: calico-typha
{{- end }}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the canal container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: canal
spec:
nodeSelector:
kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure canal gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: canal
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.13.4
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-canal.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
securityContext:
privileged: true
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.13.4
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
securityContext:
privileged: true
containers:
# Runs canal container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.13.4
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Configure route aggregation based on pod CIDR.
- name: USE_POD_CIDR
value: "true"
{{- if .Networking.Canal.TyphaReplicas }}
# Typha support: controlled by the ConfigMap.
- name: FELIX_TYPHAK8SSERVICENAME
valueFrom:
configMapKeyRef:
name: canal-config
key: typha_service_name
{{- end }}
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,canal"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# No IP address needed.
- name: IP
value: ""
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: canal-config
key: veth_mtu
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Canal.LogSeveritySys "info" }}"
- name: FELIX_HEALTHENABLED
value: "true"
# kops additions
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set Felix iptables binary variant, Legacy or NFT
- name: FELIX_IPTABLESBACKEND
value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: {{ or .Networking.Canal.CPURequest "100m" }}
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
timeoutSeconds: 10
readinessProbe:
httpGet:
path: /readiness
port: 9099
host: localhost
periodSeconds: 10
timeoutSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
{{- if not (WithDefaultBool .Networking.Canal.FlanneldIptablesForwardRules true) }}
- name: FLANNELD_IPTABLES_FORWARD_RULES
value: "false"
{{- end }}
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by canal.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used by flannel.
- name: flannel-cfg
configMap:
name: canal-config
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system

View File

@ -954,17 +954,6 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.CloudupModelBuilderContext)
id := "k8s-1.25"
location := key + "/" + id + ".yaml"
addon := addons.Add(&channelsapi.AddonSpec{
Name: fi.PtrTo(key),
Selector: networkingSelector(),
Manifest: fi.PtrTo(location),
Id: id,
})
addon.BuildPrune = true
} else if b.IsKubernetesGTE("v1.22.0") {
id := "k8s-1.22"
location := key + "/" + id + ".yaml"
addon := addons.Add(&channelsapi.AddonSpec{
Name: fi.PtrTo(key),
Selector: networkingSelector(),
@ -973,7 +962,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.CloudupModelBuilderContext)
})
addon.BuildPrune = true
} else {
id := "k8s-1.16"
id := "k8s-1.22"
location := key + "/" + id + ".yaml"
addon := addons.Add(&channelsapi.AddonSpec{
@ -993,17 +982,6 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.CloudupModelBuilderContext)
id := "k8s-1.25"
location := key + "/" + id + ".yaml"
addon := addons.Add(&channelsapi.AddonSpec{
Name: fi.PtrTo(key),
Selector: networkingSelector(),
Manifest: fi.PtrTo(location),
Id: id,
})
addon.BuildPrune = true
} else if b.IsKubernetesGTE("v1.22.0") {
id := "k8s-1.22"
location := key + "/" + id + ".yaml"
addon := addons.Add(&channelsapi.AddonSpec{
Name: fi.PtrTo(key),
Selector: networkingSelector(),
@ -1012,7 +990,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.CloudupModelBuilderContext)
})
addon.BuildPrune = true
} else {
id := "k8s-1.16"
id := "k8s-1.22"
location := key + "/" + id + ".yaml"
addon := addons.Add(&channelsapi.AddonSpec{

View File

@ -34,9 +34,6 @@ import (
// https://github.com/kubernetes/kubernetes/issues/30338
const (
// defaultCNIAssetAmd64K8s_15 is the CNI tarball for k8s >= 1.15
defaultCNIAssetAmd64K8s_15 = "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz"
defaultCNIAssetArm64K8s_15 = "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz"
// defaultCNIAssetAmd64K8s_22 is the CNI tarball for k8s >= 1.22
defaultCNIAssetAmd64K8s_22 = "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz"
defaultCNIAssetArm64K8s_22 = "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-arm64-v0.9.1.tgz"
@ -75,18 +72,10 @@ func findCNIAssets(c *kopsapi.Cluster, assetBuilder *assets.AssetBuilder, arch a
switch arch {
case architectures.ArchitectureAmd64:
if c.IsKubernetesLT("1.22") {
cniAssetURL = defaultCNIAssetAmd64K8s_15
} else {
cniAssetURL = defaultCNIAssetAmd64K8s_22
}
cniAssetURL = defaultCNIAssetAmd64K8s_22
klog.V(2).Infof("Adding default ARM64 CNI plugin binaries asset: %s", cniAssetURL)
case architectures.ArchitectureArm64:
if c.IsKubernetesLT("1.22") {
cniAssetURL = defaultCNIAssetArm64K8s_15
} else {
cniAssetURL = defaultCNIAssetArm64K8s_22
}
cniAssetURL = defaultCNIAssetArm64K8s_22
klog.V(2).Infof("Adding default AMD64 CNI plugin binaries asset: %s", cniAssetURL)
default:
return nil, nil, fmt.Errorf("unknown arch for CNI plugin binaries asset: %s", arch)

View File

@ -49,28 +49,6 @@ func Test_FindCNIAssetFromEnvironmentVariable(t *testing.T) {
}
}
func Test_FindCNIAssetFromDefaults118(t *testing.T) {
desiredCNIVersionURL := "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz"
desiredCNIVersionHash := "sha256:977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8"
cluster := &api.Cluster{}
cluster.Spec.KubernetesVersion = "v1.18.0"
assetBuilder := assets.NewAssetBuilder(cluster, false)
cniAsset, cniAssetHash, err := findCNIAssets(cluster, assetBuilder, architectures.ArchitectureAmd64)
if err != nil {
t.Errorf("Unable to parse CNI version %s", err)
}
if cniAsset.String() != desiredCNIVersionURL {
t.Errorf("Expected default CNI version %q, but got %q instead", desiredCNIVersionURL, cniAsset)
}
if cniAssetHash.String() != desiredCNIVersionHash {
t.Errorf("Expected default CNI version hash %q, but got %q instead", desiredCNIVersionHash, cniAssetHash)
}
}
func Test_FindCNIAssetFromDefaults122(t *testing.T) {
desiredCNIVersionURL := "https://storage.googleapis.com/k8s-artifacts-cni/release/v0.9.1/cni-plugins-linux-amd64-v0.9.1.tgz"
desiredCNIVersionHash := "sha256:962100bbc4baeaaa5748cdbfce941f756b1531c2eadb290129401498bfac21e7"

View File

@ -880,17 +880,15 @@ func setupControlPlane(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubne
g.Spec.Zones = []string{zone}
}
if cluster.IsKubernetesGTE("1.22") {
if cloudProvider == api.CloudProviderAWS {
g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(3)),
HTTPTokens: fi.PtrTo("required"),
}
}
if cluster.IsKubernetesGTE("1.26") && fi.ValueOf(cluster.Spec.IAM.UseServiceAccountExternalPermissions) {
g.Spec.InstanceMetadata.HTTPPutResponseHopLimit = fi.PtrTo(int64(1))
if cloudProvider == api.CloudProviderAWS {
g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(3)),
HTTPTokens: fi.PtrTo("required"),
}
}
if cluster.IsKubernetesGTE("1.26") && fi.ValueOf(cluster.Spec.IAM.UseServiceAccountExternalPermissions) {
g.Spec.InstanceMetadata.HTTPPutResponseHopLimit = fi.PtrTo(int64(1))
}
g.Spec.MachineType = opt.ControlPlaneSize
g.Spec.Image = opt.ControlPlaneImage
@ -1011,12 +1009,10 @@ func setupNodes(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap ma
g.Spec.Zones = []string{zone}
}
if cluster.IsKubernetesGTE("1.22") {
if cloudProvider == api.CloudProviderAWS {
g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(1)),
HTTPTokens: fi.PtrTo("required"),
}
if cloudProvider == api.CloudProviderAWS {
g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(1)),
HTTPTokens: fi.PtrTo("required"),
}
}
@ -1080,12 +1076,10 @@ func setupAPIServers(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetM
g.Spec.Zones = []string{zone}
}
if cluster.IsKubernetesGTE("1.22") {
if cloudProvider == api.CloudProviderAWS {
g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(1)),
HTTPTokens: fi.PtrTo("required"),
}
if cloudProvider == api.CloudProviderAWS {
g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(1)),
HTTPTokens: fi.PtrTo("required"),
}
}
@ -1284,11 +1278,9 @@ func setupTopology(opt *NewClusterOptions, cluster *api.Cluster, allZones sets.S
bastionGroup.Spec.Zones = allZones.List()
}
if cluster.IsKubernetesGTE("1.22") {
bastionGroup.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(1)),
HTTPTokens: fi.PtrTo("required"),
}
bastionGroup.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.PtrTo(int64(1)),
HTTPTokens: fi.PtrTo("required"),
}
bastionGroup.Spec.Image = opt.BastionImage

View File

@ -654,7 +654,7 @@ func (tf *TemplateFunctions) KopsControllerConfig() (string, error) {
Region: tf.Region,
}
if cluster.Spec.ExternalCloudControllerManager != nil && cluster.IsKubernetesGTE("1.22") {
if cluster.Spec.ExternalCloudControllerManager != nil {
config.Server.UseInstanceIDForNodeName = true
}

View File

@ -22,7 +22,7 @@ spec:
name: master-us-test-1a
name: events
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
additionalSans:
- proxy.api.minimal.example.com

View File

@ -21,7 +21,7 @@ spec:
name: master-us-test-1a
name: events
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
additionalSans:
- proxy.api.minimal.example.com

View File

@ -26,7 +26,7 @@ spec:
cloudControllerManager:
cloudProvider: aws
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
additionalSans:
- proxy.api.minimal.example.com

View File

@ -25,7 +25,7 @@ spec:
name: master-us-test-1a
name: events
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
additionalSans:
- proxy.api.minimal.example.com

View File

@ -29,7 +29,7 @@ spec:
name: master-us-test-1a
name: events
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
additionalSans:
- proxy.api.minimal.example.com

View File

@ -19,7 +19,7 @@ spec:
name: master-us-test-1a
name: events
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
kubeDNS:
provider: CoreDNS
tolerations:

View File

@ -21,7 +21,7 @@ spec:
name: master-us-test-1a
name: events
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.26.0
masterPublicName: api.minimal.example.com
additionalSans:
- proxy.api.minimal.example.com

View File

@ -19,7 +19,7 @@ spec:
name: master-us-test-1a
name: events
iam: {}
kubernetesVersion: v1.21.0
kubernetesVersion: v1.22.0
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking: