Merge pull request #12623 from johngmyers/cilium-ipv6-ipam

Never masquerade IPv6 with Cilium
This commit is contained in:
Kubernetes Prow Robot 2021-10-29 05:56:51 -07:00 committed by GitHub
commit 5bfdefb43c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 68 additions and 104 deletions

View File

@ -5217,11 +5217,6 @@ spec:
podCIDR:
description: PodCIDR is the CIDR from which we allocate IPs for pods
type: string
podCIDRFromCloud:
description: PodCIDRFromCloud determines if the Node's podCIDR should
be set by the cloud provider. This requires ipv6 enabled and that
instances can be given full ipv6 prefixes.
type: boolean
project:
description: Project is the cloud project we should use, required
on GCE

View File

@ -28,7 +28,7 @@ type PrefixBuilder struct {
var _ fi.ModelBuilder = &PrefixBuilder{}
func (b *PrefixBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.Cluster.Spec.PodCIDRFromCloud {
if !b.Cluster.Spec.IsKopsControllerIPAM() {
return nil
}
c.AddTask(&nodetasks.Prefix{

View File

@ -112,9 +112,6 @@ type ClusterSpec struct {
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
PodCIDR string `json:"podCIDR,omitempty"`
// PodCIDRFromCloud determines if the Node's podCIDR should be set by the cloud provider.
// This requires ipv6 enabled and that instances can be given full ipv6 prefixes.
PodCIDRFromCloud bool `json:"podCIDRFromCloud,omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
@ -835,6 +832,10 @@ func (c *ClusterSpec) IsIPv6Only() bool {
return utils.IsIPv6CIDR(c.NonMasqueradeCIDR)
}
func (c *ClusterSpec) IsKopsControllerIPAM() bool {
return c.IsIPv6Only()
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.

View File

@ -107,9 +107,6 @@ type ClusterSpec struct {
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
PodCIDR string `json:"podCIDR,omitempty"`
// PodCIDRFromCloud determines if the Node's podCIDR should be set by the cloud provider.
// This requires ipv6 enabled and that instances can be given full ipv6 prefixes.
PodCIDRFromCloud bool `json:"podCIDRFromCloud,omitempty"`
//MasterIPRange string `json:",omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange

View File

@ -2433,7 +2433,6 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.PodCIDRFromCloud = in.PodCIDRFromCloud
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess
@ -2846,7 +2845,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.PodCIDRFromCloud = in.PodCIDRFromCloud
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess

View File

@ -108,9 +108,6 @@ type ClusterSpec struct {
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
PodCIDR string `json:"podCIDR,omitempty"`
// PodCIDRFromCloud determines if the Node's podCIDR should be set by the cloud provider.
// This requires ipv6 enabled and that instances can be given full ipv6 prefixes.
PodCIDRFromCloud bool `json:"podCIDRFromCloud,omitempty"`
//MasterIPRange string `json:",omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange

View File

@ -2362,7 +2362,6 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.PodCIDRFromCloud = in.PodCIDRFromCloud
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess
@ -2767,7 +2766,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec,
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.PodCIDRFromCloud = in.PodCIDRFromCloud
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess

View File

@ -36,7 +36,6 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/upup/pkg/fi"
@ -263,13 +262,6 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie
}
}
if spec.PodCIDRFromCloud {
if !featureflag.AWSIPv6.Enabled() {
allErrs = append(allErrs, field.Forbidden(fieldPath.Child("podCIDRFromCloud", "serviceAccountExternalPermissions"), "podCIDRFromCloud requires the AWSIPv6 feature flag to be enabled"))
}
}
return allErrs
}

View File

@ -116,7 +116,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error {
}
if c.Tunnel == "" {
if c.Ipam == "eni" || clusterSpec.PodCIDRFromCloud {
if c.Ipam == "eni" || clusterSpec.IsIPv6Only() {
c.Tunnel = "disabled"
} else {
c.Tunnel = "vxlan"

View File

@ -116,9 +116,9 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
// Doesn't seem to be any real downside to always doing a leader election
kcm.LeaderElection = &kops.LeaderElectionConfiguration{LeaderElect: fi.Bool(true)}
kcm.AllocateNodeCIDRs = fi.Bool(!clusterSpec.PodCIDRFromCloud)
kcm.AllocateNodeCIDRs = fi.Bool(!clusterSpec.IsKopsControllerIPAM())
if kcm.ClusterCIDR == "" && !clusterSpec.PodCIDRFromCloud {
if kcm.ClusterCIDR == "" && !clusterSpec.IsKopsControllerIPAM() {
kcm.ClusterCIDR = clusterSpec.PodCIDR
}
@ -163,7 +163,7 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
if fi.BoolValue(clusterSpec.KubeAPIServer.EnableBootstrapAuthToken) {
changes = append(changes, "tokencleaner")
}
if clusterSpec.PodCIDRFromCloud {
if clusterSpec.IsKopsControllerIPAM() {
changes = append(changes, "-nodeipam")
}
if len(changes) != 0 {

View File

@ -99,7 +99,7 @@ func (b *KubeProxyOptionsBuilder) BuildOptions(o interface{}) error {
func (*KubeProxyOptionsBuilder) needsClusterCIDR(clusterSpec *kops.ClusterSpec) bool {
// If we use podCIDR from cloud, we should not set cluster cidr.
if clusterSpec.PodCIDRFromCloud {
if clusterSpec.IsKopsControllerIPAM() {
return false
}

View File

@ -768,7 +768,7 @@ func (b *PolicyBuilder) addNodeupPermissions(p *Policy, enableHookSupport bool)
"ec2:DescribeInstanceTypes",
)
if b.Cluster.Spec.PodCIDRFromCloud {
if b.Cluster.Spec.IsKopsControllerIPAM() {
p.unconditionalAction.Insert(
"ec2:AssignIpv6Addresses",
)

View File

@ -127,6 +127,9 @@ cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
nodeIPFamilies:
- ipv6
- ipv4
containerRuntime: containerd
containerd:
logLevel: info
@ -146,7 +149,7 @@ kubeAPIServer:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
bindAddress: '::'
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
@ -179,7 +182,7 @@ kubeAPIServer:
securePort: 443
serviceAccountIssuer: https://api.internal.minimal-ipv6.example.com
serviceAccountJWKSURI: https://api.internal.minimal-ipv6.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
serviceClusterIPRange: fd00:5e4f:ce::/108
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: false
@ -210,7 +213,7 @@ kubelet:
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDNS: fd00:5e4f:ce::a
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
@ -218,14 +221,14 @@ kubelet:
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
nonMasqueradeCIDR: ::/0
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDNS: fd00:5e4f:ce::a
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
@ -233,7 +236,7 @@ masterKubelet:
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
nonMasqueradeCIDR: ::/0
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
@ -244,7 +247,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal-ipv6.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
NodeupConfigHash: yrj4teDAp1g5GdFmTTsqdvkO9tGX3EVpiqHmwEPCLw4=
NodeupConfigHash: E/2UN3GhVLTRD3ByNw5y9wNYgvXfvosAyFhP1X5utsw=
__EOF_KUBE_ENV

View File

@ -127,6 +127,9 @@ cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
nodeIPFamilies:
- ipv6
- ipv4
containerRuntime: containerd
containerd:
logLevel: info
@ -143,7 +146,7 @@ kubelet:
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDNS: fd00:5e4f:ce::a
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
@ -151,7 +154,7 @@ kubelet:
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
nonMasqueradeCIDR: ::/0
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
@ -161,7 +164,7 @@ CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal-ipv6.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: oLOgAfNuXTV6ZrJSk0ddFu5+Jr/oeJ7LLMCtUQygY1w=
NodeupConfigHash: Mqfc35n7HWWI03aEiC/9tG99xKZd6sr0kJSCwJvzhKA=
__EOF_KUBE_ENV

View File

@ -15,6 +15,9 @@ spec:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
nodeIPFamilies:
- ipv6
- ipv4
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: memfs://clusters.example.com/minimal-ipv6.example.com
@ -53,7 +56,7 @@ spec:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
bindAddress: '::'
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
@ -86,7 +89,7 @@ spec:
securePort: 443
serviceAccountIssuer: https://api.internal.minimal-ipv6.example.com
serviceAccountJWKSURI: https://api.internal.minimal-ipv6.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
serviceClusterIPRange: fd00:5e4f:ce::/108
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: false
@ -116,7 +119,9 @@ spec:
memoryRequest: 5Mi
provider: CoreDNS
replicas: 2
serverIP: 100.64.0.10
serverIP: fd00:5e4f:ce::a
upstreamNameservers:
- fd00:ec2::253
kubeProxy:
cpuRequest: 100m
hostnameOverride: '@aws'
@ -132,7 +137,7 @@ spec:
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDNS: fd00:5e4f:ce::a
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
@ -140,7 +145,7 @@ spec:
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
nonMasqueradeCIDR: ::/0
podManifestPath: /etc/kubernetes/manifests
kubernetesApiAccess:
- 0.0.0.0/0
@ -152,7 +157,7 @@ spec:
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDNS: fd00:5e4f:ce::a
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
@ -160,18 +165,16 @@ spec:
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
nonMasqueradeCIDR: ::/0
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
masterPublicName: api.minimal-ipv6.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
podCIDRFromCloud: true
nonMasqueradeCIDR: ::/0
secretStore: memfs://clusters.example.com/minimal-ipv6.example.com/secrets
serviceClusterIPRange: 100.64.0.0/13
serviceClusterIPRange: fd00:5e4f:ce::/108
sshAccess:
- 0.0.0.0/0
- ::/0

View File

@ -20,7 +20,7 @@ spec:
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 88ffe1a3752cf290450cc94bd53aea49a665e411dbf4cfe9c1a2cc5b027f12ef
manifestHash: e31327420b42b8d1b813625c65601166c52b054ae9ac95a57048d72e70b7033c
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io

View File

@ -81,7 +81,7 @@ data:
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
forward . fd00:ec2::253 {
max_concurrent 1000
}
cache 30
@ -226,7 +226,7 @@ metadata:
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
clusterIP: fd00:5e4f:ce::a
ports:
- name: dns
port: 53

View File

@ -6,7 +6,7 @@ APIServerConfig:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
bindAddress: '::'
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
@ -39,7 +39,7 @@ APIServerConfig:
securePort: 443
serviceAccountIssuer: https://api.internal.minimal-ipv6.example.com
serviceAccountJWKSURI: https://api.internal.minimal-ipv6.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
serviceClusterIPRange: fd00:5e4f:ce::/108
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
@ -235,7 +235,7 @@ KubeletConfig:
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDNS: fd00:5e4f:ce::a
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
@ -249,7 +249,7 @@ KubeletConfig:
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
nonMasqueradeCIDR: ::/0
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
UpdatePolicy: automatic

View File

@ -42,7 +42,7 @@ KubeletConfig:
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDNS: fd00:5e4f:ce::a
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
@ -53,7 +53,7 @@ KubeletConfig:
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
nonMasqueradeCIDR: ::/0
podManifestPath: /etc/kubernetes/manifests
UpdatePolicy: automatic
channels:

View File

@ -35,8 +35,7 @@ spec:
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
podCIDRFromCloud: true
nonMasqueradeCIDR: ::/0
topology:
masters: public
nodes: public

View File

@ -54,7 +54,7 @@ spec:
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 35f45e466345bbf440198f73fe9c6ab8f87ae8ed7ab714c9930dd76a5fdd60f0
manifestHash: 3560289593c612da551bb62ce4e04c12ff4524d0a58d90d6def8df5d05a4298e
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -45,7 +45,7 @@ data:
enable-endpoint-health-checking: "true"
enable-ipv4: "true"
enable-ipv6: "false"
enable-ipv6-masquerade: "true"
enable-ipv6-masquerade: "false"
enable-l7-proxy: "true"
enable-node-port: "false"
enable-remote-node-identity: "true"

View File

@ -54,7 +54,7 @@ spec:
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: d3bfdf14497029e5668a72dab8413a302db8899ce951a99661922c2f52af135b
manifestHash: 0b45bffaea8cbfd5a8c163753a5783501b605e3a787bba0af9562bee6a4cb52c
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -45,7 +45,7 @@ data:
enable-endpoint-health-checking: "true"
enable-ipv4: "true"
enable-ipv6: "false"
enable-ipv6-masquerade: "true"
enable-ipv6-masquerade: "false"
enable-l7-proxy: "true"
enable-node-port: "false"
enable-remote-node-identity: "true"

View File

@ -151,7 +151,7 @@ data:
# enable-bpf-masquerade enables masquerading packets from endpoints leaving
# the host with BPF instead of iptables. (default false)
enable-bpf-masquerade: "{{ .EnableBPFMasquerade }}"
enable-bpf-masquerade: "{{ and (WithDefaultBool .EnableBPFMasquerade false) (not IsIPv6Only) }}"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
@ -221,8 +221,8 @@ data:
# - none
# - auto (automatically detect the container runtime)
#
masquerade: "{{- if WithDefaultBool .DisableMasquerade false -}}false{{- else -}}true{{- end -}}"
enable-ipv6-masquerade: "{{- if WithDefaultBool .DisableMasquerade false -}}false{{- else -}}true{{- end -}}"
masquerade: "{{- not (or IsIPv6Only (WithDefaultBool .DisableMasquerade false) ) -}}"
enable-ipv6-masquerade: "false"
install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}"
auto-direct-node-routes: "{{ .AutoDirectNodeRoutes }}"
{{ if .EnableHostReachableServices }}

View File

@ -326,15 +326,9 @@ func (c *populateClusterSpec) assignSubnets(cluster *kopsapi.Cluster) error {
cluster.Spec.KubeControllerManager = &kopsapi.KubeControllerManagerConfig{}
}
if cluster.Spec.PodCIDR == "" && nmOnes > 0 {
if cluster.Spec.PodCIDR == "" && nmBits == 32 {
// Allocate as big a range as possible: the NonMasqueradeCIDR mask + 1, with a '1' in the extra bit
ip := nonMasqueradeCIDR.IP.Mask(nonMasqueradeCIDR.Mask)
if nmBits > 32 && nmOnes < 95 {
// The maximum size of an IPv6 ClusterCIDR is /64, but a /112 node CIDR gives far more addresses
// than Kubernetes can handle on a node and is more visually pleasing.
// Technically, the maximum size of an IPv4 ClusterCIDR is /8, but nobody has a /7 to allocate.
nmOnes = 95
}
ip[nmOnes/8] |= 128 >> (nmOnes % 8)
cidr := net.IPNet{IP: ip, Mask: net.CIDRMask(nmOnes+1, nmBits)}
cluster.Spec.PodCIDR = cidr.String()
@ -342,7 +336,7 @@ func (c *populateClusterSpec) assignSubnets(cluster *kopsapi.Cluster) error {
}
if cluster.Spec.ServiceClusterIPRange == "" {
if nmBits > 32 && nmOnes == 0 {
if nmBits > 32 {
cluster.Spec.ServiceClusterIPRange = "fd00:5e4f:ce::/108"
} else {
// Allocate from the '0' subnet; but only carve off 1/4 of that (i.e. add 1 + 2 bits to the netmask)

View File

@ -77,24 +77,8 @@ func TestPopulateCluster_Subnets(t *testing.T) {
ExpectedServiceClusterIPRange: "10.0.0.0/12",
},
{
NonMasqueradeCIDR: "fd00:10:96::/96",
ExpectedClusterCIDR: "fd00:10:96::8000:0/97",
ExpectedServiceClusterIPRange: "fd00:10:96::/108",
},
{
NonMasqueradeCIDR: "fd00:10:96::/95",
ExpectedClusterCIDR: "fd00:10:96::1:0:0/96",
ExpectedServiceClusterIPRange: "fd00:10:96::/108",
},
{
NonMasqueradeCIDR: "fd00:10:96::/94",
ExpectedClusterCIDR: "fd00:10:96::1:0:0/96",
ExpectedServiceClusterIPRange: "fd00:10:96::/108",
},
{
NonMasqueradeCIDR: "fd00:10:96::/106",
ExpectedClusterCIDR: "fd00:10:96::20:0/107",
ExpectedServiceClusterIPRange: "fd00:10:96::/109",
NonMasqueradeCIDR: "::/0",
ExpectedServiceClusterIPRange: "fd00:5e4f:ce::/108",
},
}
for _, tc := range tests {

View File

@ -569,7 +569,7 @@ func (tf *TemplateFunctions) KopsControllerConfig() (string, error) {
}
}
if tf.Cluster.Spec.PodCIDRFromCloud {
if tf.Cluster.Spec.IsKopsControllerIPAM() {
config.EnableCloudIPAM = true
}

View File

@ -61,7 +61,7 @@ spec:
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 0f2a1d439e26214d53ea5a403d87f2ef7e7168bf0048f9be8c5b7d5e1cc7d963
manifestHash: 3508e7d209ec49e2bff9a94f205b1cb5425d3bef6c47e5ecf16877ecc8345ee9
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -68,7 +68,7 @@ spec:
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 0f2a1d439e26214d53ea5a403d87f2ef7e7168bf0048f9be8c5b7d5e1cc7d963
manifestHash: 3508e7d209ec49e2bff9a94f205b1cb5425d3bef6c47e5ecf16877ecc8345ee9
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -61,7 +61,7 @@ spec:
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 0f2a1d439e26214d53ea5a403d87f2ef7e7168bf0048f9be8c5b7d5e1cc7d963
manifestHash: 3508e7d209ec49e2bff9a94f205b1cb5425d3bef6c47e5ecf16877ecc8345ee9
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -75,7 +75,7 @@ spec:
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 0f2a1d439e26214d53ea5a403d87f2ef7e7168bf0048f9be8c5b7d5e1cc7d963
manifestHash: 3508e7d209ec49e2bff9a94f205b1cb5425d3bef6c47e5ecf16877ecc8345ee9
name: networking.cilium.io
needsRollingUpdate: all
selector:

View File

@ -68,7 +68,7 @@ spec:
version: 9.99.0
- id: k8s-1.16
manifest: networking.cilium.io/k8s-1.16-v1.10.yaml
manifestHash: 0f2a1d439e26214d53ea5a403d87f2ef7e7168bf0048f9be8c5b7d5e1cc7d963
manifestHash: 3508e7d209ec49e2bff9a94f205b1cb5425d3bef6c47e5ecf16877ecc8345ee9
name: networking.cilium.io
needsRollingUpdate: all
selector: