Merge pull request #14706 from johngmyers/v1alpha3-networking

v1alpha3: move networking fields under networking
This commit is contained in:
Kubernetes Prow Robot 2022-12-05 21:34:38 -08:00 committed by GitHub
commit e5a835d287
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
139 changed files with 1420 additions and 1224 deletions

View File

@ -614,11 +614,11 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr
}
if c.NetworkCIDR != "" {
cluster.Spec.NetworkCIDR = c.NetworkCIDR
cluster.Spec.Networking.NetworkCIDR = c.NetworkCIDR
}
if c.DisableSubnetTags {
cluster.Spec.TagSubnets = fi.PtrTo(false)
cluster.Spec.Networking.TagSubnets = fi.PtrTo(false)
}
if c.APIPublicName != "" {

View File

@ -292,12 +292,12 @@ func completeClusterSubnet(f commandutils.Factory, excludeSubnets *[]string) fun
var requiredType kopsapi.SubnetType
var subnets []string
alreadySelected := sets.NewString(*excludeSubnets...)
for _, subnet := range cluster.Spec.Subnets {
for _, subnet := range cluster.Spec.Networking.Subnets {
if alreadySelected.Has(subnet.Name) {
requiredType = subnet.Type
}
}
for _, subnet := range cluster.Spec.Subnets {
for _, subnet := range cluster.Spec.Networking.Subnets {
if !alreadySelected.Has(subnet.Name) && subnet.Type != kopsapi.SubnetTypeUtility &&
(subnet.Type == requiredType || requiredType == "") {
subnets = append(subnets, subnet.Name)

View File

@ -222,7 +222,7 @@ func clusterOutputTable(clusters []*kopsapi.Cluster, out io.Writer) error {
})
t.AddColumn("ZONES", func(c *kopsapi.Cluster) string {
zones := sets.NewString()
for _, s := range c.Spec.Subnets {
for _, s := range c.Spec.Networking.Subnets {
if s.Zone != "" {
zones.Insert(s.Zone)
}

View File

@ -242,18 +242,18 @@ func RunToolboxInstanceSelector(ctx context.Context, f commandutils.Factory, out
return fmt.Errorf("cannot select instance types from non-aws cluster")
}
firstClusterSubnet := strings.ReplaceAll(cluster.Spec.Subnets[0].Name, "utility-", "")
firstClusterSubnet := strings.ReplaceAll(cluster.Spec.Networking.Subnets[0].Name, "utility-", "")
region := firstClusterSubnet[:len(firstClusterSubnet)-1]
igSubnets := []string{}
for _, clusterSubnet := range cluster.Spec.Subnets {
for _, clusterSubnet := range cluster.Spec.Networking.Subnets {
igSubnets = append(igSubnets, clusterSubnet.Name)
}
if commandline.Flags[subnets] != nil {
userSubnets := *commandline.StringSliceMe(commandline.Flags[subnets])
dryRun := *commandline.BoolMe(commandline.Flags[dryRun])
err := validateUserSubnets(userSubnets, cluster.Spec.Subnets)
err := validateUserSubnets(userSubnets, cluster.Spec.Networking.Subnets)
if err != nil && !dryRun {
return err
}
@ -389,7 +389,7 @@ func retrieveClusterRefs(ctx context.Context, f commandutils.Factory, clusterNam
return nil, nil, nil, err
}
if len(cluster.Spec.Subnets) == 0 {
if len(cluster.Spec.Networking.Subnets) == 0 {
return nil, nil, nil, fmt.Errorf("configuration must include Subnets")
}

View File

@ -427,7 +427,7 @@ func usesBastion(instanceGroups []*kops.InstanceGroup) bool {
}
func findBastionPublicName(c *kops.Cluster) string {
topology := c.Spec.Topology
topology := c.Spec.Networking.Topology
if topology == nil {
return ""
}

View File

@ -1575,7 +1575,8 @@ the removal of fields no longer in use.
| v1alpha2 Field | New Field |
|--------------------------------------------------------|----------------------------------------------------------------|
| additionalSans | api.AdditionalSANs |
| additionalNetworkCIDRs | networking.additionalNetworkCIDRs |
| additionalSans | api.additionalSANs |
| api.loadBalancer.subnets.allocationId | api.loadBalancer.subnets.allocationID |
| api.loadBalancer.useForInternalApi | api.loadBalancer.useForInternalAPI |
| cloudConfig.azure | cloudProvider.azure |
@ -1584,11 +1585,13 @@ the removal of fields no longer in use.
| cloudConfig.openstack | cloudProvider.openstack |
| cloudProvider (string) | cloudProvider (map) |
| DisableSubnetTags | tagSubnets (value inverted) |
| egressProxy | networking.egressProxy |
| etcdClusters[\*].etcdMembers[\*].kmsKeyId | etcdClusters[\*].etcdMembers[\*].kmsKeyID |
| etcdClusters[\*].etcdMembers[\*].volumeIops | etcdClusters[\*].etcdMembers[\*].volumeIOPS |
| externalDns | externalDNS |
| externalDns.disable: true | externalDNS.provider: none |
| hooks[\*].disabled | hooks[\*].enabled (value inverted) |
| isolateMasters | networking.isolateControlPlane |
| kubeAPIServer.authorizationRbacSuperUser | kubeAPIServer.authorizationRBACSuperUser |
| kubeAPIServer.authorizationWebhookCacheAuthorizedTtl | kubeAPIServer.authorizationWebhookCacheAuthorizedTTL |
| kubeAPIServer.authorizationWebhookCacheUnauthorizedTtl | kubeAPIServer.authorizationWebhookCacheUnauthorizedTTL |
@ -1603,6 +1606,8 @@ the removal of fields no longer in use.
| masterKubelet.authenticationTokenWebhookCacheTtl | controlPlaneKubelet.authenticationTokenWebhookCacheTTL |
| masterKubelet.clientCaFile | controlPlaneKubelet.clientCAFile |
| masterPublicName | api.publicName |
| networkCIDR | networking.networkCIDR |
| networkID | networking.networkID |
| networking.amazonvpc | networking.amazonVPC |
| networking.amazonvpc.imageName | networking.amazonVPC.image |
| networking.amazonvpc.initImageName | networking.amazonVPC.initImage |
@ -1612,6 +1617,12 @@ the removal of fields no longer in use.
| networking.cilium.toFqdnsDnsRejectResponseCode | networking.cilium.toFQDNsDNSRejectResponseCode |
| networking.cilium.toFqdnsEnablePoller | networking.cilium.toFQDNsEnablePoller |
| networking.kuberouter | networking.kubeRouter |
| nonMasqueradeCIDR | networking.nonMasqueradeCIDR |
| podCIDR | networking.podCIDR |
| project | cloudProvider.gce.project |
| topology.bastion.bastionPublicName | topology.bastion.publicName |
| topology.dns.type | topology.dns |
| serviceClusterIPRange | networking.serviceClusterIPRange |
| subnets | networking.subnets |
| tagSubnets | networking.tagSubnets |
| topology | networking.topology |
| topology.bastion.bastionPublicName | networking.topology.bastion.publicName |
| topology.dns.type | networking.topology.dns |

View File

@ -40,13 +40,16 @@ func up(ctx context.Context) error {
AWS: &api.AWSSpec{},
},
ConfigBase: registryBase.Join(cluster.ObjectMeta.Name).Path(),
Topology: &api.TopologySpec{},
Networking: api.NetworkingSpec{
Topology: &api.TopologySpec{
ControlPlane: api.TopologyPublic,
Nodes: api.TopologyPublic,
},
},
}
cluster.Spec.Topology.ControlPlane = api.TopologyPublic
cluster.Spec.Topology.Nodes = api.TopologyPublic
for _, z := range nodeZones {
cluster.Spec.Subnets = append(cluster.Spec.Subnets, api.ClusterSubnetSpec{
cluster.Spec.Networking.Subnets = append(cluster.Spec.Networking.Subnets, api.ClusterSubnetSpec{
Name: z,
Zone: z,
Type: api.SubnetTypePublic,

View File

@ -216,7 +216,7 @@ func (b *CloudConfigBuilder) build(c *fi.ModelBuilderContext, inTree bool) error
requireGlobal = false
var region string
for _, subnet := range b.Cluster.Spec.Subnets {
for _, subnet := range b.Cluster.Spec.Networking.Subnets {
if subnet.Region != "" {
region = subnet.Region
break
@ -226,7 +226,7 @@ func (b *CloudConfigBuilder) build(c *fi.ModelBuilderContext, inTree bool) error
return fmt.Errorf("on Azure, subnets must include Regions")
}
vnetName := b.Cluster.Spec.NetworkID
vnetName := b.Cluster.Spec.Networking.NetworkID
if vnetName == "" {
vnetName = b.Cluster.Name
}

View File

@ -50,11 +50,13 @@ func TestBuildAzure(t *testing.T) {
RouteTableName: routeTableName,
},
},
NetworkID: vnetName,
Subnets: []kops.ClusterSubnetSpec{
{
Name: "test-subnet",
Region: "eastus",
Networking: kops.NetworkingSpec{
NetworkID: vnetName,
Subnets: []kops.ClusterSubnetSpec{
{
Name: "test-subnet",
Region: "eastus",
},
},
},
},
@ -127,7 +129,9 @@ func TestBuildAWSCustomNodeIPFamilies(t *testing.T) {
ExternalCloudControllerManager: &kops.CloudControllerManagerConfig{
CloudProvider: string(kops.CloudProviderAWS),
},
NonMasqueradeCIDR: "::/0",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "::/0",
},
},
}

View File

@ -70,7 +70,7 @@ func (b *ContainerdBuilder) Build(c *fi.ModelBuilderContext) error {
// Using containerd with Kubenet requires special configuration.
// This is a temporary backwards-compatible solution for kubenet users and will be deprecated when Kubenet is deprecated:
// https://github.com/containerd/containerd/blob/master/docs/cri/config.md#cni-config-template
if components.UsesKubenet(b.Cluster.Spec.Networking) {
if components.UsesKubenet(&b.Cluster.Spec.Networking) {
b.buildCNIConfigTemplateFile(c)
if err := b.buildIPMasqueradeRules(c); err != nil {
return err
@ -407,12 +407,12 @@ iptables -w -t nat -A IP-MASQ -d {{.NonMasqueradeCIDR}} -m comment --comment "ip
iptables -w -t nat -A IP-MASQ -m comment --comment "ip-masq: outbound traffic is subject to MASQUERADE (must be last in chain)" -j MASQUERADE
`
if b.Cluster.Spec.NonMasqueradeCIDR == "" {
if b.Cluster.Spec.Networking.NonMasqueradeCIDR == "" {
// We could fall back to the pod CIDR, that is likely more correct anyway
return fmt.Errorf("NonMasqueradeCIDR is not set")
}
script = strings.ReplaceAll(script, "{{.NonMasqueradeCIDR}}", b.Cluster.Spec.NonMasqueradeCIDR)
script = strings.ReplaceAll(script, "{{.NonMasqueradeCIDR}}", b.Cluster.Spec.Networking.NonMasqueradeCIDR)
c.AddTask(&nodetasks.File{
Path: "/opt/kops/bin/cni-iptables-setup",
@ -500,7 +500,7 @@ func (b *ContainerdBuilder) buildContainerdConfig() (string, error) {
config.SetPath([]string{"plugins", "io.containerd.grpc.v1.cri", "containerd", "runtimes", "runc", "runtime_type"}, "io.containerd.runc.v2")
// only enable systemd cgroups for kubernetes >= 1.20
config.SetPath([]string{"plugins", "io.containerd.grpc.v1.cri", "containerd", "runtimes", "runc", "options", "SystemdCgroup"}, true)
if components.UsesKubenet(cluster.Spec.Networking) {
if components.UsesKubenet(&cluster.Spec.Networking) {
// Using containerd with Kubenet requires special configuration.
// This is a temporary backwards-compatible solution for kubenet users and will be deprecated when Kubenet is deprecated:
// https://github.com/containerd/containerd/blob/master/docs/cri/config.md#cni-config-template

View File

@ -187,7 +187,7 @@ func TestContainerdConfig(t *testing.T) {
ContainerRuntime: "containerd",
Containerd: &kops.ContainerdConfig{},
KubernetesVersion: "1.21.0",
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Kubenet: &kops.KubenetNetworkingSpec{},
},
},

View File

@ -398,7 +398,7 @@ func (b *KubeAPIServerBuilder) writeServerCertificate(c *fi.ModelBuilderContext,
}
}
if b.CloudProvider == kops.CloudProviderOpenstack {
if b.Cluster.Spec.Topology != nil && b.Cluster.Spec.Topology.ControlPlane == kops.TopologyPrivate {
if b.Cluster.Spec.Networking.Topology != nil && b.Cluster.Spec.Networking.Topology.ControlPlane == kops.TopologyPrivate {
instanceAddress, err := getInstanceAddress()
if err != nil {
return err
@ -617,7 +617,7 @@ func (b *KubeAPIServerBuilder) buildPod(kubeAPIServer *kops.KubeAPIServerConfig)
container := &v1.Container{
Name: "kube-apiserver",
Image: image,
Env: proxy.GetProxyEnvVars(b.Cluster.Spec.EgressProxy),
Env: proxy.GetProxyEnvVars(b.Cluster.Spec.Networking.EgressProxy),
LivenessProbe: &v1.Probe{
ProbeHandler: v1.ProbeHandler{
HTTPGet: probeAction,

View File

@ -218,7 +218,7 @@ func (b *KubeControllerManagerBuilder) buildPod(kcm *kops.KubeControllerManagerC
container := &v1.Container{
Name: "kube-controller-manager",
Image: image,
Env: proxy.GetProxyEnvVars(b.Cluster.Spec.EgressProxy),
Env: proxy.GetProxyEnvVars(b.Cluster.Spec.Networking.EgressProxy),
LivenessProbe: &v1.Probe{
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{

View File

@ -51,7 +51,7 @@ func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
if b.IsMaster {
// If this is a master that is not isolated, run it as a normal node also (start kube-proxy etc)
// This lets e.g. daemonset pods communicate with other pods in the system
if fi.ValueOf(b.Cluster.Spec.IsolateMasters) {
if fi.ValueOf(b.Cluster.Spec.Networking.IsolateControlPlane) {
klog.V(2).Infof("Running on Master with IsolateMaster=true; skipping kube-proxy installation")
return nil
}

View File

@ -243,7 +243,7 @@ func (b *KubeSchedulerBuilder) buildPod(kubeScheduler *kops.KubeSchedulerConfig)
container := &v1.Container{
Name: "kube-scheduler",
Image: image,
Env: proxy.GetProxyEnvVars(b.Cluster.Spec.EgressProxy),
Env: proxy.GetProxyEnvVars(b.Cluster.Spec.Networking.EgressProxy),
LivenessProbe: &v1.Probe{
ProbeHandler: v1.ProbeHandler{HTTPGet: healthAction},
InitialDelaySeconds: 15,

View File

@ -150,7 +150,7 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
}
}
if components.UsesCNI(b.Cluster.Spec.Networking) {
if components.UsesCNI(&b.Cluster.Spec.Networking) {
c.AddTask(&nodetasks.File{
Path: b.CNIConfDir(),
Type: nodetasks.FileType_Directory,
@ -530,7 +530,7 @@ func (b *KubeletBuilder) buildKubeletConfigSpec() (*kops.KubeletConfigSpec, erro
c.BootstrapKubeconfig = ""
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
if b.Cluster.Spec.Networking.AmazonVPC != nil {
sess := session.Must(session.NewSession())
metadata := ec2metadata.New(sess)

View File

@ -313,7 +313,7 @@ func (t *ProtokubeBuilder) buildEnvFile() (*nodetasks.File, error) {
envVars["SCW_DEFAULT_ZONE"] = os.Getenv("SCW_DEFAULT_ZONE")
}
for _, envVar := range proxy.GetProxyEnvVars(t.Cluster.Spec.EgressProxy) {
for _, envVar := range proxy.GetProxyEnvVars(t.Cluster.Spec.Networking.EgressProxy) {
envVars[envVar.Name] = envVar.Value
}

View File

@ -68,23 +68,6 @@ type ClusterSpec struct {
ContainerRuntime string `json:"containerRuntime,omitempty"`
// The version of kubernetes to install (optional, and can be a "spec" like stable)
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// Configuration of subnets we are targeting
Subnets []ClusterSubnetSpec `json:"subnets,omitempty"`
// NetworkCIDR is the CIDR used for the AWS VPC / DO/ GCE Network, or otherwise allocated to k8s
// This is a real CIDR, not the internal k8s network
// On AWS, it maps to the VPC CIDR. It is not required on GCE.
// On DO, it maps to the VPC CIDR.
NetworkCIDR string `json:"networkCIDR,omitempty"`
// AdditionalNetworkCIDRs is a list of additional CIDR used for the AWS VPC
// or otherwise allocated to k8s. This is a real CIDR, not the internal k8s network
// On AWS, it maps to any additional CIDRs added to a VPC.
AdditionalNetworkCIDRs []string `json:"additionalNetworkCIDRs,omitempty"`
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
NetworkID string `json:"networkID,omitempty"`
// Topology defines the type of network topology to use on the cluster - default public
// This is heavily weighted towards AWS for the time being, but should also be agnostic enough
// to port out to GCE later if needed
Topology *TopologySpec `json:"topology,omitempty"`
// SecretStore is the VFS path to where secrets are stored
SecretStore string `json:"secretStore,omitempty"`
// KeyStore is the VFS path to where SSL keys and certificates are stored
@ -102,29 +85,12 @@ type ClusterSpec struct {
DNSControllerGossipConfig *DNSControllerGossipConfig `json:"dnsControllerGossipConfig,omitempty"`
// ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local)
ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"`
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
PodCIDR string `json:"podCIDR,omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
// SSHAccess is a list of the CIDRs that can access SSH.
SSHAccess []string `json:"sshAccess,omitempty"`
// NodePortAccess is a list of the CIDRs that can access the node ports range (30000-32767).
NodePortAccess []string `json:"nodePortAccess,omitempty"`
// HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy
EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"`
// SSHKeyName specifies a preexisting SSH key to use
SSHKeyName *string `json:"sshKeyName,omitempty"`
// IsolateMasters determines whether we should lock down masters so that they are not on the pod network.
// true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master
// if they have hostNetwork=true.
// false is now the default, and it will:
// * give the master a normal PodCIDR
// * run kube-proxy on the master
// * enable debugging handlers on the master, so kubectl logs works
IsolateMasters *bool `json:"isolateMasters,omitempty"`
// UpdatePolicy determines the policy for applying upgrades automatically.
// Valid values:
// 'automatic' (default): apply updates automatically (apply OS security upgrades, avoiding rebooting when possible)
@ -168,8 +134,8 @@ type ClusterSpec struct {
// AWSLoadbalancerControllerConfig determines the AWS LB controller configuration.
AWSLoadBalancerController *AWSLoadBalancerControllerConfig `json:"awsLoadBalancerController,omitempty"`
// Networking configuration
Networking *NetworkingSpec `json:"networking,omitempty"`
// Networking configures networking.
Networking NetworkingSpec `json:"networking,omitempty"`
// API controls how the Kubernetes API is exposed.
API APISpec `json:"api,omitempty"`
// Authentication field controls how the cluster is configured for authentication
@ -188,8 +154,6 @@ type ClusterSpec struct {
IAM *IAMSpec `json:"iam,omitempty"`
// EncryptionConfig controls if encryption is enabled
EncryptionConfig *bool `json:"encryptionConfig,omitempty"`
// TagSubnets controls if tags are added to subnets to enable use by load balancers (AWS only). Default: true.
TagSubnets *bool `json:"tagSubnets,omitempty"`
// Target allows for us to nest extra config for targets such as terraform
Target *TargetSpec `json:"target,omitempty"`
// UseHostCertificates will mount /etc/ssl/certs to inside needed containers.
@ -714,6 +678,7 @@ const (
)
// ClusterSubnetSpec defines a subnet
// TODO: move to networking.go
type ClusterSubnetSpec struct {
// Name is the name of the subnet
Name string `json:"name,omitempty"`
@ -783,16 +748,10 @@ func (t *TerraformSpec) IsEmpty() bool {
// store them (i.e. we don't need to 'lock them')
func (c *Cluster) FillDefaults() error {
// Topology support
if c.Spec.Topology == nil {
c.Spec.Topology = &TopologySpec{ControlPlane: TopologyPublic, Nodes: TopologyPublic, DNS: DNSTypePublic}
if c.Spec.Networking.Topology == nil {
c.Spec.Networking.Topology = &TopologySpec{ControlPlane: TopologyPublic, Nodes: TopologyPublic, DNS: DNSTypePublic}
}
if c.Spec.Networking == nil {
c.Spec.Networking = &NetworkingSpec{}
}
c.fillClusterSpecNetworkingSpec()
if c.Spec.Channel == "" {
c.Spec.Channel = DefaultChannel
}
@ -804,41 +763,9 @@ func (c *Cluster) FillDefaults() error {
return nil
}
// fillClusterSpecNetworking provides default value if c.Spec.NetworkingSpec is nil
func (c *Cluster) fillClusterSpecNetworkingSpec() {
if c.Spec.Networking.Kubenet != nil {
// OK
} else if c.Spec.Networking.CNI != nil {
// OK
} else if c.Spec.Networking.External != nil {
// OK
} else if c.Spec.Networking.Kopeio != nil {
// OK
} else if c.Spec.Networking.Weave != nil {
// OK
} else if c.Spec.Networking.Flannel != nil {
// OK
} else if c.Spec.Networking.Calico != nil {
// OK
} else if c.Spec.Networking.Canal != nil {
// OK
} else if c.Spec.Networking.KubeRouter != nil {
// OK
} else if c.Spec.Networking.AmazonVPC != nil {
// OK
} else if c.Spec.Networking.Cilium != nil {
// OK
} else if c.Spec.Networking.GCE != nil {
// OK
} else {
// No networking model selected; choose Kubenet
c.Spec.Networking.Kubenet = &KubenetNetworkingSpec{}
}
}
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
func (c *Cluster) SharedVPC() bool {
return c.Spec.NetworkID != ""
return c.Spec.Networking.NetworkID != ""
}
// IsKubernetesGTE checks if the version is >= the specified version.
@ -894,21 +821,21 @@ func (c *Cluster) IsGossip() bool {
}
func (c *Cluster) UsesPublicDNS() bool {
if c.Spec.Topology == nil || c.Spec.Topology.DNS == "" || c.Spec.Topology.DNS == DNSTypePublic {
if c.Spec.Networking.Topology == nil || c.Spec.Networking.Topology.DNS == "" || c.Spec.Networking.Topology.DNS == DNSTypePublic {
return true
}
return false
}
func (c *Cluster) UsesPrivateDNS() bool {
if c.Spec.Topology != nil && c.Spec.Topology.DNS == DNSTypePrivate {
if c.Spec.Networking.Topology != nil && c.Spec.Networking.Topology.DNS == DNSTypePrivate {
return true
}
return false
}
func (c *Cluster) UsesNoneDNS() bool {
if c.Spec.Topology != nil && c.Spec.Topology.DNS == DNSTypeNone {
if c.Spec.Networking.Topology != nil && c.Spec.Networking.Topology.DNS == DNSTypeNone {
return true
}
return false
@ -919,7 +846,7 @@ func (c *Cluster) APIInternalName() string {
}
func (c *ClusterSpec) IsIPv6Only() bool {
return utils.IsIPv6CIDR(c.NonMasqueradeCIDR)
return utils.IsIPv6CIDR(c.Networking.NonMasqueradeCIDR)
}
func (c *ClusterSpec) IsKopsControllerIPAM() bool {

View File

@ -36,7 +36,7 @@ func TestUseCiliumEtcd(t *testing.T) {
Name: "cilium",
},
},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
Version: "v1.8",
},
@ -53,7 +53,6 @@ func TestUseCiliumEtcd(t *testing.T) {
Name: "cilium",
},
},
Networking: &kops.NetworkingSpec{},
},
},
expected: false,
@ -66,7 +65,7 @@ func TestUseCiliumEtcd(t *testing.T) {
Name: "calico",
},
},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
Version: "v1.8",
},

View File

@ -25,7 +25,7 @@ import (
// FindSubnet returns the subnet with the specified name, or returns nil
func FindSubnet(c *kops.Cluster, subnetName string) *kops.ClusterSubnetSpec {
for _, subnet := range c.Spec.Subnets {
for _, subnet := range c.Spec.Networking.Subnets {
if subnet.Name == subnetName {
return &subnet
}

View File

@ -27,9 +27,11 @@ import (
func Test_FindSubnet(t *testing.T) {
cluster := &kops.Cluster{
Spec: kops.ClusterSpec{
Subnets: []kops.ClusterSubnetSpec{
{Name: "a"},
{Name: "b"},
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{Name: "a"},
{Name: "b"},
},
},
},
}
@ -78,9 +80,11 @@ func Test_FindSubnet(t *testing.T) {
func Test_FindZonesForInstanceGroup(t *testing.T) {
cluster := &kops.Cluster{
Spec: kops.ClusterSpec{
Subnets: []kops.ClusterSubnetSpec{
{Name: "zonea", Zone: "zonea"},
{Name: "zoneb", Zone: "zoneb"},
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{Name: "zonea", Zone: "zonea"},
{Name: "zoneb", Zone: "zoneb"},
},
},
},
}

View File

@ -18,8 +18,51 @@ package kops
import "k8s.io/apimachinery/pkg/api/resource"
// NetworkingSpec allows selection and configuration of a networking plugin
// NetworkingSpec configures networking.
type NetworkingSpec struct {
// NetworkID is the cloud provider's identifier of the existing network (for example, AWS VPC) the cluster should use.
// If not specified, kOps will create a new network.
NetworkID string `json:"networkID,omitempty"`
// NetworkCIDR is the primary IPv4 CIDR used for the cloud provider's network.
// It is not required on GCE.
// On DO, it maps to the VPC CIDR.
NetworkCIDR string `json:"networkCIDR,omitempty"`
// AdditionalNetworkCIDRs is a list of additional CIDR used for the AWS VPC
// or otherwise allocated to k8s. This is a real CIDR, not the internal k8s network
// On AWS, it maps to any additional CIDRs added to a VPC.
AdditionalNetworkCIDRs []string `json:"additionalNetworkCIDRs,omitempty"`
// Subnets are the subnets that the cluster can use.
Subnets []ClusterSubnetSpec `json:"subnets,omitempty"`
// TagSubnets controls if tags are added to subnets to enable use by load balancers (AWS only). Default: true.
TagSubnets *bool `json:"tagSubnets,omitempty"`
// Topology defines the type of network topology to use on the cluster - default public
// This is heavily weighted towards AWS for the time being, but should also be agnostic enough
// to port out to GCE later if needed
Topology *TopologySpec `json:"topology,omitempty"`
// HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy
EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
PodCIDR string `json:"podCIDR,omitempty"`
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// IsolateControlPlane determines whether we should lock down masters so that they are not on the pod network.
// true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master
// if they have hostNetwork=true.
// false is now the default, and it will:
// * give the master a normal PodCIDR
// * run kube-proxy on the master
// * enable debugging handlers on the master, so kubectl logs works
IsolateControlPlane *bool `json:"isolateControlPlane,omitempty"`
// The following specify the selection and configuration of a networking plugin.
// Exactly one of the fields must be non-null.
Classic *ClassicNetworkingSpec `json:"classic,omitempty"`
Kubenet *KubenetNetworkingSpec `json:"kubenet,omitempty"`
External *ExternalNetworkingSpec `json:"external,omitempty"`

View File

@ -66,6 +66,7 @@ type ClusterSpec struct {
// The version of kubernetes to install (optional, and can be a "spec" like stable)
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// Configuration of subnets we are targeting
// +k8s:conversion-gen=false
Subnets []ClusterSubnetSpec `json:"subnets,omitempty"`
// Project is the cloud project we should use, required on GCE
// +k8s:conversion-gen=false
@ -79,16 +80,20 @@ type ClusterSpec struct {
// NetworkCIDR is the CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s
// This is a real CIDR, not the internal k8s network
// On AWS, it maps to the VPC CIDR. It is not required on GCE.
// +k8s:conversion-gen=false
NetworkCIDR string `json:"networkCIDR,omitempty"`
// AdditionalNetworkCIDRs is a list of additional CIDR used for the AWS VPC
// or otherwise allocated to k8s. This is a real CIDR, not the internal k8s network
// On AWS, it maps to any additional CIDRs added to a VPC.
// +k8s:conversion-gen=false
AdditionalNetworkCIDRs []string `json:"additionalNetworkCIDRs,omitempty"`
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
// +k8s:conversion-gen=false
NetworkID string `json:"networkID,omitempty"`
// Topology defines the type of network topology to use on the cluster - default public
// This is heavily weighted towards AWS for the time being, but should also be agnostic enough
// to port out to GCE later if needed
// +k8s:conversion-gen=false
Topology *TopologySpec `json:"topology,omitempty"`
// SecretStore is the VFS path to where secrets are stored
SecretStore string `json:"secretStore,omitempty"`
@ -111,12 +116,15 @@ type ClusterSpec struct {
// ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local)
ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"`
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
// +k8s:conversion-gen=false
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
// +k8s:conversion-gen=false
PodCIDR string `json:"podCIDR,omitempty"`
// MasterIPRange string `json:",omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange
// +k8s:conversion-gen=false
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
// SSHAccess determines the permitted access to SSH
// Currently only a single CIDR is supported (though a richer grammar could be added in future)
@ -124,6 +132,7 @@ type ClusterSpec struct {
// NodePortAccess is a list of the CIDRs that can access the node ports range (30000-32767).
NodePortAccess []string `json:"nodePortAccess,omitempty"`
// HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy
// +k8s:conversion-gen=false
EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"`
// SSHKeyName specifies a preexisting SSH key to use
SSHKeyName *string `json:"sshKeyName,omitempty"`
@ -138,6 +147,7 @@ type ClusterSpec struct {
// * give the master a normal PodCIDR
// * run kube-proxy on the master
// * enable debugging handlers on the master, so kubectl logs works
// +k8s:conversion-gen=false
IsolateMasters *bool `json:"isolateMasters,omitempty"`
// UpdatePolicy determines the policy for applying upgrades automatically.
// Valid values:
@ -183,7 +193,9 @@ type ClusterSpec struct {
AWSLoadBalancerController *AWSLoadBalancerControllerConfig `json:"awsLoadBalancerController,omitempty"`
// Networking configuration
Networking *NetworkingSpec `json:"networking,omitempty"`
// +k8s:conversion-gen=false
LegacyNetworking *NetworkingSpec `json:"networking,omitempty"`
Networking NetworkingSpec `json:"-"`
// API field controls how the API is exposed outside the cluster
// +k8s:conversion-gen=false
LegacyAPI *APISpec `json:"api,omitempty"`
@ -205,6 +217,7 @@ type ClusterSpec struct {
// EncryptionConfig holds the encryption config
EncryptionConfig *bool `json:"encryptionConfig,omitempty"`
// DisableSubnetTags controls if subnets are tagged in AWS
// +k8s:conversion-gen=false
TagSubnets *bool `json:"DisableSubnetTags,omitempty"`
// Target allows for us to nest extra config for targets such as terraform
Target *TargetSpec `json:"target,omitempty"`

View File

@ -95,6 +95,11 @@ func Convert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *kops
}
out.ExternalPolicies = &policies
}
if in.LegacyNetworking != nil {
if err := autoConvert_v1alpha2_NetworkingSpec_To_kops_NetworkingSpec(in.LegacyNetworking, &out.Networking, s); err != nil {
return err
}
}
if in.LegacyAPI != nil {
if err := autoConvert_v1alpha2_APISpec_To_kops_APISpec(in.LegacyAPI, &out.API, s); err != nil {
return err
@ -139,17 +144,57 @@ func Convert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *kops
string(kops.CloudProviderScaleway),
})
}
if in.TagSubnets != nil {
out.TagSubnets = values.Bool(!*in.TagSubnets)
}
for i, hook := range in.Hooks {
if hook.Enabled != nil {
out.Hooks[i].Enabled = values.Bool(!*hook.Enabled)
}
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Networking.Subnets
*out = make([]kops.ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_v1alpha2_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Networking.Subnets = nil
}
out.API.PublicName = in.MasterPublicName
out.Networking.NetworkCIDR = in.NetworkCIDR
out.Networking.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
out.Networking.NetworkID = in.NetworkID
if in.Topology != nil {
in, out := &in.Topology, &out.Networking.Topology
*out = new(kops.TopologySpec)
if err := Convert_v1alpha2_TopologySpec_To_kops_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Networking.Topology = nil
}
out.Networking.ServiceClusterIPRange = in.ServiceClusterIPRange
out.Networking.PodCIDR = in.PodCIDR
out.Networking.NonMasqueradeCIDR = in.NonMasqueradeCIDR
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.Networking.EgressProxy
*out = new(kops.EgressProxySpec)
if err := Convert_v1alpha2_EgressProxySpec_To_kops_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Networking.EgressProxy = nil
}
if in.IsolateMasters != nil {
in, out := &in.IsolateMasters, &out.Networking.IsolateControlPlane
*out = new(bool)
**out = **in
}
out.API.AdditionalSANs = in.AdditionalSANs
out.API.Access = in.KubernetesAPIAccess
if in.TagSubnets != nil {
out.Networking.TagSubnets = values.Bool(!*in.TagSubnets)
}
return nil
}
@ -177,6 +222,13 @@ func Convert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, out
}
out.ExternalPolicies = &policies
}
out.LegacyNetworking = &NetworkingSpec{}
if err := autoConvert_kops_NetworkingSpec_To_v1alpha2_NetworkingSpec(&in.Networking, out.LegacyNetworking, s); err != nil {
return err
}
if out.LegacyNetworking.IsEmpty() {
out.LegacyNetworking = nil
}
out.LegacyAPI = &APISpec{}
if err := autoConvert_kops_APISpec_To_v1alpha2_APISpec(&in.API, out.LegacyAPI, s); err != nil {
return err
@ -209,8 +261,48 @@ func Convert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec, out
return err
}
}
if in.TagSubnets != nil {
out.TagSubnets = values.Bool(!*in.TagSubnets)
if in.Networking.Subnets != nil {
in, out := &in.Networking.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_kops_ClusterSubnetSpec_To_v1alpha2_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.NetworkCIDR = in.Networking.NetworkCIDR
out.AdditionalNetworkCIDRs = in.Networking.AdditionalNetworkCIDRs
out.NetworkID = in.Networking.NetworkID
if in.Networking.Topology != nil {
in, out := &in.Networking.Topology, &out.Topology
*out = new(TopologySpec)
if err := Convert_kops_TopologySpec_To_v1alpha2_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
out.ServiceClusterIPRange = in.Networking.ServiceClusterIPRange
out.PodCIDR = in.Networking.PodCIDR
out.NonMasqueradeCIDR = in.Networking.NonMasqueradeCIDR
if in.Networking.EgressProxy != nil {
in, out := &in.Networking.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
if err := Convert_kops_EgressProxySpec_To_v1alpha2_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
if in.Networking.IsolateControlPlane != nil {
in, out := &in.Networking.IsolateControlPlane, &out.IsolateMasters
*out = new(bool)
**out = **in
}
if in.Networking.TagSubnets != nil {
out.TagSubnets = values.Bool(!*in.Networking.TagSubnets)
}
for i, hook := range in.Hooks {
if hook.Enabled != nil {

View File

@ -85,11 +85,11 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) {
obj.Authorization.AlwaysAllow = &AlwaysAllowAuthorizationSpec{}
}
if obj.Networking != nil {
if obj.Networking.Flannel != nil {
if obj.LegacyNetworking != nil {
if obj.LegacyNetworking.Flannel != nil {
// Populate with legacy default value; new clusters will be created with "vxlan" by
// "create cluster."
rebindIfEmpty(&obj.Networking.Flannel.Backend, "udp")
rebindIfEmpty(&obj.LegacyNetworking.Flannel.Backend, "udp")
}
}
}

View File

@ -16,10 +16,24 @@ limitations under the License.
package v1alpha2
import "k8s.io/apimachinery/pkg/api/resource"
import (
"k8s.io/apimachinery/pkg/api/resource"
)
// NetworkingSpec allows selection and configuration of a networking plugin
type NetworkingSpec struct {
NetworkID string `json:"-"`
NetworkCIDR string `json:"-"`
AdditionalNetworkCIDRs []string `json:"-"`
Subnets []ClusterSubnetSpec `json:"-"`
TagSubnets *bool `json:"-"`
Topology *TopologySpec `json:"-"`
EgressProxy *EgressProxySpec `json:"-"`
NonMasqueradeCIDR string `json:"-"`
PodCIDR string `json:"-"`
ServiceClusterIPRange string `json:"-"`
IsolateControlPlane *bool `json:"-"`
Classic *ClassicNetworkingSpec `json:"classic,omitempty"`
Kubenet *KubenetNetworkingSpec `json:"kubenet,omitempty"`
External *ExternalNetworkingSpec `json:"external,omitempty"`
@ -37,6 +51,12 @@ type NetworkingSpec struct {
GCE *GCENetworkingSpec `json:"gce,omitempty"`
}
func (s *NetworkingSpec) IsEmpty() bool {
return s.Classic == nil && s.Kubenet == nil && s.External == nil && s.CNI == nil && s.Kopeio == nil &&
s.Weave == nil && s.Flannel == nil && s.Calico == nil && s.Canal == nil && s.KubeRouter == nil &&
s.Romana == nil && s.AmazonVPC == nil && s.Cilium == nil && s.LyftVPC == nil && s.GCE == nil
}
// ClassicNetworkingSpec is the specification of classic networking mode, integrated into kubernetes.
// Support been removed since Kubernetes 1.4.
type ClassicNetworkingSpec struct{}

View File

@ -2439,32 +2439,14 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
}
out.ContainerRuntime = in.ContainerRuntime
out.KubernetesVersion = in.KubernetesVersion
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]kops.ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_v1alpha2_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
// INFO: in.Subnets opted out of conversion generation
// INFO: in.Project opted out of conversion generation
// INFO: in.MasterPublicName opted out of conversion generation
// INFO: in.MasterInternalName opted out of conversion generation
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
out.NetworkID = in.NetworkID
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(kops.TopologySpec)
if err := Convert_v1alpha2_TopologySpec_To_kops_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
// INFO: in.NetworkCIDR opted out of conversion generation
// INFO: in.AdditionalNetworkCIDRs opted out of conversion generation
// INFO: in.NetworkID opted out of conversion generation
// INFO: in.Topology opted out of conversion generation
out.SecretStore = in.SecretStore
out.KeyStore = in.KeyStore
out.ConfigStore = in.ConfigStore
@ -2480,23 +2462,15 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
}
// INFO: in.AdditionalSANs opted out of conversion generation
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
// INFO: in.ServiceClusterIPRange opted out of conversion generation
// INFO: in.PodCIDR opted out of conversion generation
// INFO: in.NonMasqueradeCIDR opted out of conversion generation
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(kops.EgressProxySpec)
if err := Convert_v1alpha2_EgressProxySpec_To_kops_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
// INFO: in.EgressProxy opted out of conversion generation
out.SSHKeyName = in.SSHKeyName
// INFO: in.KubernetesAPIAccess opted out of conversion generation
out.IsolateMasters = in.IsolateMasters
// INFO: in.IsolateMasters opted out of conversion generation
out.UpdatePolicy = in.UpdatePolicy
out.ExternalPolicies = in.ExternalPolicies
out.AdditionalPolicies = in.AdditionalPolicies
@ -2684,14 +2658,9 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
} else {
out.AWSLoadBalancerController = nil
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(kops.NetworkingSpec)
if err := Convert_v1alpha2_NetworkingSpec_To_kops_NetworkingSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Networking = nil
// INFO: in.LegacyNetworking opted out of conversion generation
if err := Convert_v1alpha2_NetworkingSpec_To_kops_NetworkingSpec(&in.Networking, &out.Networking, s); err != nil {
return err
}
// INFO: in.LegacyAPI opted out of conversion generation
if err := Convert_v1alpha2_APISpec_To_kops_APISpec(&in.API, &out.API, s); err != nil {
@ -2755,7 +2724,7 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.IAM = nil
}
out.EncryptionConfig = in.EncryptionConfig
out.TagSubnets = in.TagSubnets
// INFO: in.TagSubnets opted out of conversion generation
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(kops.TargetSpec)
@ -2859,29 +2828,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
}
out.ContainerRuntime = in.ContainerRuntime
out.KubernetesVersion = in.KubernetesVersion
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_kops_ClusterSubnetSpec_To_v1alpha2_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
out.NetworkID = in.NetworkID
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
if err := Convert_kops_TopologySpec_To_v1alpha2_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
out.SecretStore = in.SecretStore
out.KeyStore = in.KeyStore
out.ConfigStore = in.ConfigStore
@ -2896,22 +2842,9 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
out.DNSControllerGossipConfig = nil
}
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
if err := Convert_kops_EgressProxySpec_To_v1alpha2_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
out.SSHKeyName = in.SSHKeyName
out.IsolateMasters = in.IsolateMasters
out.UpdatePolicy = in.UpdatePolicy
out.ExternalPolicies = in.ExternalPolicies
out.AdditionalPolicies = in.AdditionalPolicies
@ -3099,14 +3032,8 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
} else {
out.AWSLoadBalancerController = nil
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(NetworkingSpec)
if err := Convert_kops_NetworkingSpec_To_v1alpha2_NetworkingSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Networking = nil
if err := Convert_kops_NetworkingSpec_To_v1alpha2_NetworkingSpec(&in.Networking, &out.Networking, s); err != nil {
return err
}
if err := Convert_kops_APISpec_To_v1alpha2_APISpec(&in.API, &out.API, s); err != nil {
return err
@ -3169,7 +3096,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
out.IAM = nil
}
out.EncryptionConfig = in.EncryptionConfig
out.TagSubnets = in.TagSubnets
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(TargetSpec)
@ -6035,6 +5961,43 @@ func Convert_kops_NTPConfig_To_v1alpha2_NTPConfig(in *kops.NTPConfig, out *NTPCo
}
func autoConvert_v1alpha2_NetworkingSpec_To_kops_NetworkingSpec(in *NetworkingSpec, out *kops.NetworkingSpec, s conversion.Scope) error {
out.NetworkID = in.NetworkID
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]kops.ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_v1alpha2_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.TagSubnets = in.TagSubnets
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(kops.TopologySpec)
if err := Convert_v1alpha2_TopologySpec_To_kops_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(kops.EgressProxySpec)
if err := Convert_v1alpha2_EgressProxySpec_To_kops_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.PodCIDR = in.PodCIDR
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.IsolateControlPlane = in.IsolateControlPlane
if in.Classic != nil {
in, out := &in.Classic, &out.Classic
*out = new(kops.ClassicNetworkingSpec)
@ -6179,6 +6142,43 @@ func Convert_v1alpha2_NetworkingSpec_To_kops_NetworkingSpec(in *NetworkingSpec,
}
func autoConvert_kops_NetworkingSpec_To_v1alpha2_NetworkingSpec(in *kops.NetworkingSpec, out *NetworkingSpec, s conversion.Scope) error {
out.NetworkID = in.NetworkID
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_kops_ClusterSubnetSpec_To_v1alpha2_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.TagSubnets = in.TagSubnets
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
if err := Convert_kops_TopologySpec_To_v1alpha2_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
if err := Convert_kops_EgressProxySpec_To_v1alpha2_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.PodCIDR = in.PodCIDR
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.IsolateControlPlane = in.IsolateControlPlane
if in.Classic != nil {
in, out := &in.Classic, &out.Classic
*out = new(ClassicNetworkingSpec)

View File

@ -1256,11 +1256,12 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(AWSLoadBalancerControllerConfig)
(*in).DeepCopyInto(*out)
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
if in.LegacyNetworking != nil {
in, out := &in.LegacyNetworking, &out.LegacyNetworking
*out = new(NetworkingSpec)
(*in).DeepCopyInto(*out)
}
in.Networking.DeepCopyInto(&out.Networking)
if in.LegacyAPI != nil {
in, out := &in.LegacyAPI, &out.LegacyAPI
*out = new(APISpec)
@ -4291,6 +4292,38 @@ func (in *NTPConfig) DeepCopy() *NTPConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkingSpec) DeepCopyInto(out *NetworkingSpec) {
*out = *in
if in.AdditionalNetworkCIDRs != nil {
in, out := &in.AdditionalNetworkCIDRs, &out.AdditionalNetworkCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TagSubnets != nil {
in, out := &in.TagSubnets, &out.TagSubnets
*out = new(bool)
**out = **in
}
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
(*in).DeepCopyInto(*out)
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
**out = **in
}
if in.IsolateControlPlane != nil {
in, out := &in.IsolateControlPlane, &out.IsolateControlPlane
*out = new(bool)
**out = **in
}
if in.Classic != nil {
in, out := &in.Classic, &out.Classic
*out = new(ClassicNetworkingSpec)

View File

@ -64,22 +64,6 @@ type ClusterSpec struct {
ContainerRuntime string `json:"containerRuntime,omitempty"`
// The version of kubernetes to install (optional, and can be a "spec" like stable)
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
// Configuration of subnets we are targeting
Subnets []ClusterSubnetSpec `json:"subnets,omitempty"`
// NetworkCIDR is the CIDR used for the AWS VPC / GCE Network, or otherwise allocated to k8s
// This is a real CIDR, not the internal k8s network
// On AWS, it maps to the VPC CIDR. It is not required on GCE.
NetworkCIDR string `json:"networkCIDR,omitempty"`
// AdditionalNetworkCIDRs is a list of additional CIDR used for the AWS VPC
// or otherwise allocated to k8s. This is a real CIDR, not the internal k8s network
// On AWS, it maps to any additional CIDRs added to a VPC.
AdditionalNetworkCIDRs []string `json:"additionalNetworkCIDRs,omitempty"`
// NetworkID is an identifier of a network, if we want to reuse/share an existing network (e.g. an AWS VPC)
NetworkID string `json:"networkID,omitempty"`
// Topology defines the type of network topology to use on the cluster - default public
// This is heavily weighted towards AWS for the time being, but should also be agnostic enough
// to port out to GCE later if needed
Topology *TopologySpec `json:"topology,omitempty"`
// SecretStore is the VFS path to where secrets are stored
SecretStore string `json:"secretStore,omitempty"`
// KeyStore is the VFS path to where SSL keys and certificates are stored
@ -97,31 +81,13 @@ type ClusterSpec struct {
DNSControllerGossipConfig *DNSControllerGossipConfig `json:"dnsControllerGossipConfig,omitempty"`
// ClusterDNSDomain is the suffix we use for internal DNS names (normally cluster.local)
ClusterDNSDomain string `json:"clusterDNSDomain,omitempty"`
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
PodCIDR string `json:"podCIDR,omitempty"`
// MasterIPRange string `json:",omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
// SSHAccess determines the permitted access to SSH
// Currently only a single CIDR is supported (though a richer grammar could be added in future)
SSHAccess []string `json:"sshAccess,omitempty"`
// NodePortAccess is a list of the CIDRs that can access the node ports range (30000-32767).
NodePortAccess []string `json:"nodePortAccess,omitempty"`
// HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy
EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"`
// SSHKeyName specifies a preexisting SSH key to use
SSHKeyName *string `json:"sshKeyName,omitempty"`
// IsolateMasters determines whether we should lock down masters so that they are not on the pod network.
// true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master
// if they have hostNetwork=true.
// false is now the default, and it will:
// * give the master a normal PodCIDR
// * run kube-proxy on the master
// * enable debugging handlers on the master, so kubectl logs works
IsolateMasters *bool `json:"isolateMasters,omitempty"`
// UpdatePolicy determines the policy for applying upgrades automatically.
// Valid values:
// 'automatic' (default): apply updates automatically (apply OS security upgrades, avoiding rebooting when possible)
@ -166,7 +132,7 @@ type ClusterSpec struct {
AWSLoadBalancerController *AWSLoadBalancerControllerConfig `json:"awsLoadBalancerController,omitempty"`
// Networking configuration
Networking *NetworkingSpec `json:"networking,omitempty"`
Networking NetworkingSpec `json:"networking,omitempty"`
// API controls how the Kubernetes API is exposed.
API APISpec `json:"api,omitempty"`
// Authentication field controls how the cluster is configured for authentication
@ -184,8 +150,6 @@ type ClusterSpec struct {
IAM *IAMSpec `json:"iam,omitempty"`
// EncryptionConfig holds the encryption config
EncryptionConfig *bool `json:"encryptionConfig,omitempty"`
// TagSubnets controls if tags are added to subnets to enable use by load balancers (AWS only). Default: true.
TagSubnets *bool `json:"tagSubnets,omitempty"`
// Target allows for us to nest extra config for targets such as terraform
Target *TargetSpec `json:"target,omitempty"`
// UseHostCertificates will mount /etc/ssl/certs to inside needed containers.

View File

@ -34,21 +34,21 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) {
return true
}
if obj.Topology == nil {
obj.Topology = &TopologySpec{}
if obj.Networking.Topology == nil {
obj.Networking.Topology = &TopologySpec{}
}
rebindIfEmpty(&obj.Topology.ControlPlane, TopologyPublic)
rebindIfEmpty(&obj.Networking.Topology.ControlPlane, TopologyPublic)
rebindIfEmpty(&obj.Topology.Nodes, TopologyPublic)
rebindIfEmpty(&obj.Networking.Topology.Nodes, TopologyPublic)
if obj.Topology.DNS == "" {
obj.Topology.DNS = DNSTypePublic
if obj.Networking.Topology.DNS == "" {
obj.Networking.Topology.DNS = DNSTypePublic
}
if obj.CloudProvider.Openstack == nil {
if obj.API.DNS == nil && obj.API.LoadBalancer == nil {
switch obj.Topology.ControlPlane {
switch obj.Networking.Topology.ControlPlane {
case TopologyPublic:
obj.API.DNS = &DNSAccessSpec{}
@ -56,7 +56,7 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) {
obj.API.LoadBalancer = &LoadBalancerAccessSpec{}
default:
klog.Infof("unknown controlPlane topology type: %q", obj.Topology.ControlPlane)
klog.Infof("unknown controlPlane topology type: %q", obj.Networking.Topology.ControlPlane)
}
}
@ -78,11 +78,9 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) {
obj.Authorization.AlwaysAllow = &AlwaysAllowAuthorizationSpec{}
}
if obj.Networking != nil {
if obj.Networking.Flannel != nil {
// Populate with legacy default value; new clusters will be created with "vxlan" by
// "create cluster."
rebindIfEmpty(&obj.Networking.Flannel.Backend, "udp")
}
if obj.Networking.Flannel != nil {
// Populate with legacy default value; new clusters will be created with "vxlan" by
// "create cluster."
rebindIfEmpty(&obj.Networking.Flannel.Backend, "udp")
}
}

View File

@ -22,8 +22,51 @@ import (
"k8s.io/kops/pkg/apis/kops"
)
// NetworkingSpec allows selection and configuration of a networking plugin
// NetworkingSpec configures networking.
type NetworkingSpec struct {
// NetworkID is the cloud provider's identifier of the existing network (for example, AWS VPC) the cluster should use.
// If not specified, kOps will create a new network.
NetworkID string `json:"networkID,omitempty"`
// NetworkCIDR is the primary IPv4 CIDR used for the cloud provider's network.
// It is not required on GCE.
// On DO, it maps to the VPC CIDR.
NetworkCIDR string `json:"networkCIDR,omitempty"`
// AdditionalNetworkCIDRs is a list of additional CIDR used for the AWS VPC
// or otherwise allocated to k8s. This is a real CIDR, not the internal k8s network
// On AWS, it maps to any additional CIDRs added to a VPC.
AdditionalNetworkCIDRs []string `json:"additionalNetworkCIDRs,omitempty"`
// Subnets are the subnets that the cluster can use.
Subnets []ClusterSubnetSpec `json:"subnets,omitempty"`
// TagSubnets controls if tags are added to subnets to enable use by load balancers (AWS only). Default: true.
TagSubnets *bool `json:"tagSubnets,omitempty"`
// Topology defines the type of network topology to use on the cluster - default public
// This is heavily weighted towards AWS for the time being, but should also be agnostic enough
// to port out to GCE later if needed
Topology *TopologySpec `json:"topology,omitempty"`
// HTTPProxy defines connection information to support use of a private cluster behind an forward HTTP Proxy
EgressProxy *EgressProxySpec `json:"egressProxy,omitempty"`
// NonMasqueradeCIDR is the CIDR for the internal k8s network (on which pods & services live)
// It cannot overlap ServiceClusterIPRange
NonMasqueradeCIDR string `json:"nonMasqueradeCIDR,omitempty"`
// PodCIDR is the CIDR from which we allocate IPs for pods
PodCIDR string `json:"podCIDR,omitempty"`
// ServiceClusterIPRange is the CIDR, from the internal network, where we allocate IPs for services
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty"`
// IsolateControlPlane determines whether we should lock down masters so that they are not on the pod network.
// true is the kube-up behaviour, but it is very surprising: it means that daemonsets only work on the master
// if they have hostNetwork=true.
// false is now the default, and it will:
// * give the master a normal PodCIDR
// * run kube-proxy on the master
// * enable debugging handlers on the master, so kubectl logs works
IsolateControlPlane *bool `json:"isolateControlPlane,omitempty"`
// The following specify the selection and configuration of a networking plugin.
// Exactly one of the fields must be non-null.
Classic *kops.ClassicNetworkingSpec `json:"-"`
Kubenet *KubenetNetworkingSpec `json:"kubenet,omitempty"`
External *ExternalNetworkingSpec `json:"external,omitempty"`

View File

@ -2549,29 +2549,6 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
}
out.ContainerRuntime = in.ContainerRuntime
out.KubernetesVersion = in.KubernetesVersion
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]kops.ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_v1alpha3_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
out.NetworkID = in.NetworkID
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(kops.TopologySpec)
if err := Convert_v1alpha3_TopologySpec_To_kops_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
out.SecretStore = in.SecretStore
out.KeyStore = in.KeyStore
out.ConfigStore = in.ConfigStore
@ -2586,22 +2563,9 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.DNSControllerGossipConfig = nil
}
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(kops.EgressProxySpec)
if err := Convert_v1alpha3_EgressProxySpec_To_kops_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
out.SSHKeyName = in.SSHKeyName
out.IsolateMasters = in.IsolateMasters
out.UpdatePolicy = in.UpdatePolicy
out.ExternalPolicies = in.ExternalPolicies
out.AdditionalPolicies = in.AdditionalPolicies
@ -2789,14 +2753,8 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
} else {
out.AWSLoadBalancerController = nil
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(kops.NetworkingSpec)
if err := Convert_v1alpha3_NetworkingSpec_To_kops_NetworkingSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Networking = nil
if err := Convert_v1alpha3_NetworkingSpec_To_kops_NetworkingSpec(&in.Networking, &out.Networking, s); err != nil {
return err
}
if err := Convert_v1alpha3_APISpec_To_kops_APISpec(&in.API, &out.API, s); err != nil {
return err
@ -2851,7 +2809,6 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.IAM = nil
}
out.EncryptionConfig = in.EncryptionConfig
out.TagSubnets = in.TagSubnets
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(kops.TargetSpec)
@ -2962,29 +2919,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec,
}
out.ContainerRuntime = in.ContainerRuntime
out.KubernetesVersion = in.KubernetesVersion
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_kops_ClusterSubnetSpec_To_v1alpha3_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
out.NetworkID = in.NetworkID
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
if err := Convert_kops_TopologySpec_To_v1alpha3_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
out.SecretStore = in.SecretStore
out.KeyStore = in.KeyStore
out.ConfigStore = in.ConfigStore
@ -2999,22 +2933,9 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec,
out.DNSControllerGossipConfig = nil
}
out.ClusterDNSDomain = in.ClusterDNSDomain
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.PodCIDR = in.PodCIDR
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.SSHAccess = in.SSHAccess
out.NodePortAccess = in.NodePortAccess
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
if err := Convert_kops_EgressProxySpec_To_v1alpha3_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
out.SSHKeyName = in.SSHKeyName
out.IsolateMasters = in.IsolateMasters
out.UpdatePolicy = in.UpdatePolicy
out.ExternalPolicies = in.ExternalPolicies
out.AdditionalPolicies = in.AdditionalPolicies
@ -3202,14 +3123,8 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec,
} else {
out.AWSLoadBalancerController = nil
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(NetworkingSpec)
if err := Convert_kops_NetworkingSpec_To_v1alpha3_NetworkingSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Networking = nil
if err := Convert_kops_NetworkingSpec_To_v1alpha3_NetworkingSpec(&in.Networking, &out.Networking, s); err != nil {
return err
}
if err := Convert_kops_APISpec_To_v1alpha3_APISpec(&in.API, &out.API, s); err != nil {
return err
@ -3264,7 +3179,6 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec,
out.IAM = nil
}
out.EncryptionConfig = in.EncryptionConfig
out.TagSubnets = in.TagSubnets
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(TargetSpec)
@ -6194,6 +6108,43 @@ func Convert_kops_NTPConfig_To_v1alpha3_NTPConfig(in *kops.NTPConfig, out *NTPCo
}
func autoConvert_v1alpha3_NetworkingSpec_To_kops_NetworkingSpec(in *NetworkingSpec, out *kops.NetworkingSpec, s conversion.Scope) error {
out.NetworkID = in.NetworkID
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]kops.ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_v1alpha3_ClusterSubnetSpec_To_kops_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.TagSubnets = in.TagSubnets
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(kops.TopologySpec)
if err := Convert_v1alpha3_TopologySpec_To_kops_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(kops.EgressProxySpec)
if err := Convert_v1alpha3_EgressProxySpec_To_kops_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.PodCIDR = in.PodCIDR
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.IsolateControlPlane = in.IsolateControlPlane
out.Classic = in.Classic
if in.Kubenet != nil {
in, out := &in.Kubenet, &out.Kubenet
@ -6314,6 +6265,43 @@ func Convert_v1alpha3_NetworkingSpec_To_kops_NetworkingSpec(in *NetworkingSpec,
}
func autoConvert_kops_NetworkingSpec_To_v1alpha3_NetworkingSpec(in *kops.NetworkingSpec, out *NetworkingSpec, s conversion.Scope) error {
out.NetworkID = in.NetworkID
out.NetworkCIDR = in.NetworkCIDR
out.AdditionalNetworkCIDRs = in.AdditionalNetworkCIDRs
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
if err := Convert_kops_ClusterSubnetSpec_To_v1alpha3_ClusterSubnetSpec(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Subnets = nil
}
out.TagSubnets = in.TagSubnets
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
if err := Convert_kops_TopologySpec_To_v1alpha3_TopologySpec(*in, *out, s); err != nil {
return err
}
} else {
out.Topology = nil
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
if err := Convert_kops_EgressProxySpec_To_v1alpha3_EgressProxySpec(*in, *out, s); err != nil {
return err
}
} else {
out.EgressProxy = nil
}
out.NonMasqueradeCIDR = in.NonMasqueradeCIDR
out.PodCIDR = in.PodCIDR
out.ServiceClusterIPRange = in.ServiceClusterIPRange
out.IsolateControlPlane = in.IsolateControlPlane
out.Classic = in.Classic
if in.Kubenet != nil {
in, out := &in.Kubenet, &out.Kubenet

View File

@ -1018,23 +1018,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(GossipConfig)
(*in).DeepCopyInto(*out)
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalNetworkCIDRs != nil {
in, out := &in.AdditionalNetworkCIDRs, &out.AdditionalNetworkCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
(*in).DeepCopyInto(*out)
}
if in.DNSControllerGossipConfig != nil {
in, out := &in.DNSControllerGossipConfig, &out.DNSControllerGossipConfig
*out = new(DNSControllerGossipConfig)
@ -1050,21 +1033,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
**out = **in
}
if in.SSHKeyName != nil {
in, out := &in.SSHKeyName, &out.SSHKeyName
*out = new(string)
**out = **in
}
if in.IsolateMasters != nil {
in, out := &in.IsolateMasters, &out.IsolateMasters
*out = new(bool)
**out = **in
}
if in.UpdatePolicy != nil {
in, out := &in.UpdatePolicy, &out.UpdatePolicy
*out = new(string)
@ -1204,11 +1177,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(AWSLoadBalancerControllerConfig)
(*in).DeepCopyInto(*out)
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(NetworkingSpec)
(*in).DeepCopyInto(*out)
}
in.Networking.DeepCopyInto(&out.Networking)
in.API.DeepCopyInto(&out.API)
if in.Authentication != nil {
in, out := &in.Authentication, &out.Authentication
@ -1254,11 +1223,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(bool)
**out = **in
}
if in.TagSubnets != nil {
in, out := &in.TagSubnets, &out.TagSubnets
*out = new(bool)
**out = **in
}
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(TargetSpec)
@ -4238,6 +4202,38 @@ func (in *NTPConfig) DeepCopy() *NTPConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkingSpec) DeepCopyInto(out *NetworkingSpec) {
*out = *in
if in.AdditionalNetworkCIDRs != nil {
in, out := &in.AdditionalNetworkCIDRs, &out.AdditionalNetworkCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TagSubnets != nil {
in, out := &in.TagSubnets, &out.TagSubnets
*out = new(bool)
**out = **in
}
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
(*in).DeepCopyInto(*out)
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
**out = **in
}
if in.IsolateControlPlane != nil {
in, out := &in.IsolateControlPlane, &out.IsolateControlPlane
*out = new(bool)
**out = **in
}
if in.Classic != nil {
in, out := &in.Classic, &out.Classic
*out = new(kops.ClassicNetworkingSpec)

View File

@ -48,15 +48,15 @@ func awsValidateCluster(c *kops.Cluster) field.ErrorList {
allErrs = append(allErrs, awsValidateIAMAuthenticator(field.NewPath("spec", "authentication", "aws"), c.Spec.Authentication.AWS)...)
}
for i, subnet := range c.Spec.Subnets {
f := field.NewPath("spec", "subnets").Index(i)
for i, subnet := range c.Spec.Networking.Subnets {
f := field.NewPath("spec", "networking", "subnets").Index(i)
if subnet.AdditionalRoutes != nil {
if len(subnet.ID) > 0 {
allErrs = append(allErrs, field.Invalid(f, subnet, "additional routes cannot be added if the subnet is shared"))
} else if subnet.Type != kops.SubnetTypePrivate {
allErrs = append(allErrs, field.Invalid(f, subnet, "additional routes can only be added on private subnets"))
}
allErrs = append(allErrs, awsValidateAdditionalRoutes(f.Child("additionalRoutes"), subnet.AdditionalRoutes, c.Spec.NetworkCIDR)...)
allErrs = append(allErrs, awsValidateAdditionalRoutes(f.Child("additionalRoutes"), subnet.AdditionalRoutes, c.Spec.Networking.NetworkCIDR)...)
}
}
@ -307,7 +307,7 @@ func awsValidateLoadBalancerSubnets(fieldPath *field.Path, spec kops.ClusterSpec
if subnet.Name == "" {
allErrs = append(allErrs, field.Required(fieldPath.Index(i).Child("name"), "subnet name can't be empty"))
} else {
for _, cs := range spec.Subnets {
for _, cs := range spec.Networking.Subnets {
if subnet.Name == cs.Name {
clusterSubnet = &cs
break
@ -390,7 +390,7 @@ func awsValidateAdditionalRoutes(fieldPath *field.Path, routes []kops.RouteSpec,
_, clusterNet, errClusterNet := net.ParseCIDR(cidr)
if errClusterNet != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "networkCIDR"), cidr, "Invalid cluster cidr"))
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "networking", "networkCIDR"), cidr, "Invalid cluster cidr"))
} else {
for i, r := range routes {
f := fieldPath.Index(i)

View File

@ -586,7 +586,7 @@ func TestLoadBalancerSubnets(t *testing.T) {
cluster.Spec.API.LoadBalancer.Type = kops.LoadBalancerType(*test.lbType)
}
for _, s := range test.clusterSubnets {
cluster.Spec.Subnets = append(cluster.Spec.Subnets, kops.ClusterSubnetSpec{
cluster.Spec.Networking.Subnets = append(cluster.Spec.Networking.Subnets, kops.ClusterSubnetSpec{
Name: s,
CIDR: cidr,
})
@ -744,7 +744,7 @@ func TestAWSAdditionalRoutes(t *testing.T) {
Target: "pcx-abcdef",
},
},
expected: []string{"Invalid value::spec.networkCIDR"},
expected: []string{"Invalid value::spec.networking.networkCIDR"},
},
{ // bad cidr
clusterCidr: "100.64.0.0/10",
@ -755,7 +755,7 @@ func TestAWSAdditionalRoutes(t *testing.T) {
Target: "pcx-abcdef",
},
},
expected: []string{"Invalid value::spec.subnets[0].additionalRoutes[0].cidr"},
expected: []string{"Invalid value::spec.networking.subnets[0].additionalRoutes[0].cidr"},
},
{ // bad target
clusterCidr: "100.64.0.0/10",
@ -766,7 +766,7 @@ func TestAWSAdditionalRoutes(t *testing.T) {
Target: "unknown-abcdef",
},
},
expected: []string{"Invalid value::spec.subnets[0].additionalRoutes[0].target"},
expected: []string{"Invalid value::spec.networking.subnets[0].additionalRoutes[0].target"},
},
{ // target more specific
clusterCidr: "100.64.0.0/10",
@ -777,7 +777,7 @@ func TestAWSAdditionalRoutes(t *testing.T) {
Target: "pcx-abcdef",
},
},
expected: []string{"Forbidden::spec.subnets[0].additionalRoutes[0].target"},
expected: []string{"Forbidden::spec.networking.subnets[0].additionalRoutes[0].target"},
},
{ // duplicates cidr
clusterCidr: "100.64.0.0/10",
@ -792,7 +792,7 @@ func TestAWSAdditionalRoutes(t *testing.T) {
Target: "tgw-abcdef",
},
},
expected: []string{"Duplicate value::spec.subnets[0].additionalRoutes[1].cidr"},
expected: []string{"Duplicate value::spec.networking.subnets[0].additionalRoutes[1].cidr"},
},
{ // shared subnet
clusterCidr: "100.64.0.0/10",
@ -804,7 +804,7 @@ func TestAWSAdditionalRoutes(t *testing.T) {
Target: "pcx-abcdef",
},
},
expected: []string{"Invalid value::spec.subnets[0]"},
expected: []string{"Invalid value::spec.networking.subnets[0]"},
},
{ // not a private subnet
clusterCidr: "100.64.0.0/10",
@ -815,19 +815,21 @@ func TestAWSAdditionalRoutes(t *testing.T) {
Target: "pcx-abcdef",
},
},
expected: []string{"Invalid value::spec.subnets[0]"},
expected: []string{"Invalid value::spec.networking.subnets[0]"},
},
}
for _, test := range tests {
cluster := kops.Cluster{
Spec: kops.ClusterSpec{
NetworkCIDR: test.clusterCidr,
Subnets: []kops.ClusterSubnetSpec{
{
ID: test.providerId,
Type: test.subnetType,
AdditionalRoutes: test.route,
Networking: kops.NetworkingSpec{
NetworkCIDR: test.clusterCidr,
Subnets: []kops.ClusterSubnetSpec{
{
ID: test.providerId,
Type: test.subnetType,
AdditionalRoutes: test.route,
},
},
},
},

View File

@ -28,8 +28,8 @@ func gceValidateCluster(c *kops.Cluster) field.ErrorList {
fieldSpec := field.NewPath("spec")
region := ""
for i, subnet := range c.Spec.Subnets {
f := fieldSpec.Child("subnets").Index(i)
for i, subnet := range c.Spec.Networking.Subnets {
f := fieldSpec.Child("networking", "subnets").Index(i)
if subnet.Zone != "" {
allErrs = append(allErrs, field.Invalid(f.Child("zone"), subnet.Zone, "zones should not be specified for GCE subnets, as GCE subnets are regional"))
}

View File

@ -235,14 +235,14 @@ func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, cl
// Check that instance groups are defined in subnets that are defined in the cluster
{
clusterSubnets := make(map[string]*kops.ClusterSubnetSpec)
for i := range cluster.Spec.Subnets {
s := &cluster.Spec.Subnets[i]
for i := range cluster.Spec.Networking.Subnets {
s := &cluster.Spec.Networking.Subnets[i]
clusterSubnets[s.Name] = s
}
for i, z := range g.Spec.Subnets {
if clusterSubnets[z] == nil {
allErrs = append(allErrs, field.NotFound(field.NewPath("spec", "subnets").Index(i), z))
allErrs = append(allErrs, field.NotFound(field.NewPath("spec", "networking", "subnets").Index(i), z))
}
}
}

View File

@ -78,8 +78,8 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
optionTaken = true
requiresNetworkCIDR = false
requiresSubnetCIDR = false
if c.Spec.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "networkCIDR should not be set on GCE"))
if c.Spec.Networking.NetworkCIDR != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networking", "networkCIDR"), "networkCIDR should not be set on GCE"))
}
}
if c.Spec.CloudProvider.Hetzner != nil {
@ -114,9 +114,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
requiresNetworkCIDR = false
}
if requiresSubnets && len(c.Spec.Subnets) == 0 {
if requiresSubnets && len(c.Spec.Networking.Subnets) == 0 {
// TODO: Auto choose zones from region?
allErrs = append(allErrs, field.Required(fieldSpec.Child("subnets"), "must configure at least one subnet (use --zones)"))
allErrs = append(allErrs, field.Required(fieldSpec.Child("networking", "subnets"), "must configure at least one subnet (use --zones)"))
}
if strict && c.Spec.Kubelet == nil {
@ -148,23 +148,23 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
var networkCIDR *net.IPNet
var err error
{
if c.Spec.NetworkCIDR == "" {
if c.Spec.Networking.NetworkCIDR == "" {
if requiresNetworkCIDR {
allErrs = append(allErrs, field.Required(fieldSpec.Child("networkCIDR"), "Cluster did not have networkCIDR set"))
allErrs = append(allErrs, field.Required(fieldSpec.Child("networking", "networkCIDR"), "Cluster did not have networkCIDR set"))
}
} else {
_, networkCIDR, err = net.ParseCIDR(c.Spec.NetworkCIDR)
_, networkCIDR, err = net.ParseCIDR(c.Spec.Networking.NetworkCIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, "Cluster had an invalid networkCIDR"))
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "networkCIDR"), c.Spec.Networking.NetworkCIDR, "Cluster had an invalid networkCIDR"))
}
if c.Spec.GetCloudProvider() == kops.CloudProviderDO {
// verify if the NetworkCIDR is in a private range as per RFC1918
if !networkCIDR.IP.IsPrivate() {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networkCIDR"), c.Spec.NetworkCIDR, "Cluster had a networkCIDR outside the private IP range"))
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "networkCIDR"), c.Spec.Networking.NetworkCIDR, "Cluster had a networkCIDR outside the private IP range"))
}
// verify if networkID is not specified. In case of DO, this is mutually exclusive.
if c.Spec.NetworkID != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networkCIDR"), "DO doesn't support specifying both NetworkID and NetworkCIDR together"))
if c.Spec.Networking.NetworkID != "" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networking", "networkCIDR"), "DO doesn't support specifying both NetworkID and NetworkCIDR together"))
}
}
}
@ -173,21 +173,19 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
// Check AdditionalNetworkCIDRs
var additionalNetworkCIDRs []*net.IPNet
{
if len(c.Spec.AdditionalNetworkCIDRs) > 0 {
for _, AdditionalNetworkCIDR := range c.Spec.AdditionalNetworkCIDRs {
_, IPNetAdditionalNetworkCIDR, err := net.ParseCIDR(AdditionalNetworkCIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("additionalNetworkCIDRs"), AdditionalNetworkCIDR, "Cluster had an invalid additionalNetworkCIDRs"))
}
additionalNetworkCIDRs = append(additionalNetworkCIDRs, IPNetAdditionalNetworkCIDR)
for _, AdditionalNetworkCIDR := range c.Spec.Networking.AdditionalNetworkCIDRs {
_, IPNetAdditionalNetworkCIDR, err := net.ParseCIDR(AdditionalNetworkCIDR)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "additionalNetworkCIDRs"), AdditionalNetworkCIDR, "Cluster had an invalid additionalNetworkCIDRs"))
}
additionalNetworkCIDRs = append(additionalNetworkCIDRs, IPNetAdditionalNetworkCIDR)
}
}
// nonMasqueradeCIDR is essentially deprecated, and we're moving to cluster-cidr instead (which is better named pod-cidr)
nonMasqueradeCIDRRequired := true
serviceClusterMustBeSubnetOfNonMasqueradeCIDR := true
if c.Spec.Networking != nil && c.Spec.Networking.GCE != nil {
if c.Spec.Networking.GCE != nil {
nonMasqueradeCIDRRequired = false
serviceClusterMustBeSubnetOfNonMasqueradeCIDR = false
}
@ -195,28 +193,28 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
// Check NonMasqueradeCIDR
var nonMasqueradeCIDR *net.IPNet
{
nonMasqueradeCIDRString := c.Spec.NonMasqueradeCIDR
nonMasqueradeCIDRString := c.Spec.Networking.NonMasqueradeCIDR
if nonMasqueradeCIDRString == "" {
if nonMasqueradeCIDRRequired {
allErrs = append(allErrs, field.Required(fieldSpec.Child("nonMasqueradeCIDR"), "Cluster did not have nonMasqueradeCIDR set"))
allErrs = append(allErrs, field.Required(fieldSpec.Child("networking", "nonMasqueradeCIDR"), "Cluster did not have nonMasqueradeCIDR set"))
}
} else {
_, nonMasqueradeCIDR, err = net.ParseCIDR(nonMasqueradeCIDRString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("nonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid nonMasqueradeCIDR"))
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "nonMasqueradeCIDR"), nonMasqueradeCIDRString, "Cluster had an invalid nonMasqueradeCIDR"))
} else {
if strings.Contains(nonMasqueradeCIDRString, ":") && nonMasqueradeCIDRString != "::/0" {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("nonMasqueradeCIDR"), "IPv6 clusters must have a nonMasqueradeCIDR of \"::/0\""))
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networking", "nonMasqueradeCIDR"), "IPv6 clusters must have a nonMasqueradeCIDR of \"::/0\""))
}
if networkCIDR != nil && subnet.Overlap(nonMasqueradeCIDR, networkCIDR) && c.Spec.Networking != nil && c.Spec.Networking.AmazonVPC == nil && (c.Spec.Networking.Cilium == nil || c.Spec.Networking.Cilium.IPAM != kops.CiliumIpamEni) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("nonMasqueradeCIDR"), fmt.Sprintf("nonMasqueradeCIDR %q cannot overlap with networkCIDR %q", nonMasqueradeCIDRString, c.Spec.NetworkCIDR)))
if networkCIDR != nil && subnet.Overlap(nonMasqueradeCIDR, networkCIDR) && c.Spec.Networking.AmazonVPC == nil && (c.Spec.Networking.Cilium == nil || c.Spec.Networking.Cilium.IPAM != kops.CiliumIpamEni) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networking", "nonMasqueradeCIDR"), fmt.Sprintf("nonMasqueradeCIDR %q cannot overlap with networkCIDR %q", nonMasqueradeCIDRString, c.Spec.Networking.NetworkCIDR)))
}
if c.Spec.ContainerRuntime == "docker" && c.Spec.Kubelet != nil && fi.ValueOf(c.Spec.Kubelet.NetworkPluginName) == "kubenet" {
if fi.ValueOf(c.Spec.Kubelet.NonMasqueradeCIDR) != nonMasqueradeCIDRString {
if strict || c.Spec.Kubelet.NonMasqueradeCIDR != nil {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "nonMasqueradeCIDR"), "kubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubelet", "networking", "nonMasqueradeCIDR"), "kubelet nonMasqueradeCIDR did not match cluster nonMasqueradeCIDR"))
}
}
if fi.ValueOf(c.Spec.ControlPlaneKubelet.NonMasqueradeCIDR) != nonMasqueradeCIDRString {
@ -232,18 +230,18 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
// Check ServiceClusterIPRange
var serviceClusterIPRange *net.IPNet
{
serviceClusterIPRangeString := c.Spec.ServiceClusterIPRange
serviceClusterIPRangeString := c.Spec.Networking.ServiceClusterIPRange
if serviceClusterIPRangeString == "" {
if strict {
allErrs = append(allErrs, field.Required(fieldSpec.Child("serviceClusterIPRange"), "Cluster did not have serviceClusterIPRange set"))
allErrs = append(allErrs, field.Required(fieldSpec.Child("networking", "serviceClusterIPRange"), "Cluster did not have serviceClusterIPRange set"))
}
} else {
_, serviceClusterIPRange, err = net.ParseCIDR(serviceClusterIPRangeString)
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("serviceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid serviceClusterIPRange"))
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("networking", "serviceClusterIPRange"), serviceClusterIPRangeString, "Cluster had an invalid serviceClusterIPRange"))
} else {
if nonMasqueradeCIDR != nil && serviceClusterMustBeSubnetOfNonMasqueradeCIDR && !subnet.BelongsTo(nonMasqueradeCIDR, serviceClusterIPRange) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("serviceClusterIPRange"), fmt.Sprintf("serviceClusterIPRange %q must be a subnet of nonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.NonMasqueradeCIDR)))
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("networking", "serviceClusterIPRange"), fmt.Sprintf("serviceClusterIPRange %q must be a subnet of nonMasqueradeCIDR %q", serviceClusterIPRangeString, c.Spec.Networking.NonMasqueradeCIDR)))
}
if c.Spec.KubeAPIServer != nil && c.Spec.KubeAPIServer.ServiceClusterIPRange != serviceClusterIPRangeString {
@ -264,7 +262,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), clusterCIDRString, "cluster had an invalid kubeControllerManager.clusterCIDR"))
} else if nonMasqueradeCIDR != nil && !subnet.BelongsTo(nonMasqueradeCIDR, clusterCIDR) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), fmt.Sprintf("kubeControllerManager.clusterCIDR %q must be a subnet of nonMasqueradeCIDR %q", clusterCIDRString, c.Spec.NonMasqueradeCIDR)))
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeControllerManager", "clusterCIDR"), fmt.Sprintf("kubeControllerManager.clusterCIDR %q must be a subnet of nonMasqueradeCIDR %q", clusterCIDRString, c.Spec.Networking.NonMasqueradeCIDR)))
}
}
}
@ -278,7 +276,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
allErrs = append(allErrs, field.Invalid(fieldSpec.Child("kubeDNS", "serverIP"), address, "Cluster had an invalid kubeDNS.serverIP"))
} else {
if serviceClusterIPRange != nil && !serviceClusterIPRange.Contains(ip) {
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.ServiceClusterIPRange, address)))
allErrs = append(allErrs, field.Forbidden(fieldSpec.Child("kubeDNS", "serverIP"), fmt.Sprintf("ServiceClusterIPRange %q must contain the DNS Server IP %q", c.Spec.Networking.ServiceClusterIPRange, address)))
}
if !featureflag.ExperimentalClusterDNS.Enabled() {
if isExperimentalClusterDNS(c.Spec.Kubelet, c.Spec.KubeDNS) {
@ -370,11 +368,11 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
// Check that the subnet CIDRs are all consistent
{
for i, s := range c.Spec.Subnets {
fieldSubnet := fieldSpec.Child("subnets").Index(i)
for i, s := range c.Spec.Networking.Subnets {
fieldSubnet := fieldSpec.Child("networking", "subnets").Index(i)
if s.CIDR == "" {
if requiresSubnetCIDR && strict {
if !strings.Contains(c.Spec.NonMasqueradeCIDR, ":") || s.IPv6CIDR == "" {
if !strings.Contains(c.Spec.Networking.NonMasqueradeCIDR, ":") || s.IPv6CIDR == "" {
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "subnet did not have a cidr set"))
} else if c.IsKubernetesLT("1.22") {
allErrs = append(allErrs, field.Required(fieldSubnet.Child("cidr"), "IPv6-only subnets require Kubernetes 1.22+"))
@ -385,7 +383,7 @@ func ValidateCluster(c *kops.Cluster, strict bool) field.ErrorList {
if err != nil {
allErrs = append(allErrs, field.Invalid(fieldSubnet.Child("cidr"), s.CIDR, "subnet had an invalid cidr"))
} else if networkCIDR != nil && !validateSubnetCIDR(networkCIDR, additionalNetworkCIDRs, subnetCIDR) {
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("cidr"), fmt.Sprintf("subnet %q had a cidr %q that was not a subnet of the networkCIDR %q", s.Name, s.CIDR, c.Spec.NetworkCIDR)))
allErrs = append(allErrs, field.Forbidden(fieldSubnet.Child("cidr"), fmt.Sprintf("subnet %q had a cidr %q that was not a subnet of the networkCIDR %q", s.Name, s.CIDR, c.Spec.Networking.NetworkCIDR)))
}
}
}

View File

@ -26,12 +26,12 @@ func openstackValidateCluster(c *kops.Cluster) (errList field.ErrorList) {
return errList
}
if c.Spec.CloudProvider.Openstack.Router == nil || c.Spec.CloudProvider.Openstack.Router.ExternalNetwork == nil {
topology := c.Spec.Topology
topology := c.Spec.Networking.Topology
if topology == nil || topology.Nodes == kops.TopologyPublic {
errList = append(errList, field.Forbidden(field.NewPath("spec", "topology", "nodes"), "Public topology requires an external network"))
errList = append(errList, field.Forbidden(field.NewPath("spec", "networking", "topology", "nodes"), "Public topology requires an external network"))
}
if topology == nil || topology.ControlPlane == kops.TopologyPublic {
errList = append(errList, field.Forbidden(field.NewPath("spec", "topology", "controlPlane"), "Public topology requires an external network"))
errList = append(errList, field.Forbidden(field.NewPath("spec", "networking", "topology", "controlPlane"), "Public topology requires an external network"))
}
}
return errList

View File

@ -36,8 +36,8 @@ func Test_ValidateTopology(t *testing.T) {
},
},
ExpectedErrors: []string{
"Forbidden::spec.topology.nodes",
"Forbidden::spec.topology.controlPlane",
"Forbidden::spec.networking.topology.nodes",
"Forbidden::spec.networking.topology.controlPlane",
},
},
{
@ -49,8 +49,8 @@ func Test_ValidateTopology(t *testing.T) {
},
},
ExpectedErrors: []string{
"Forbidden::spec.topology.nodes",
"Forbidden::spec.topology.controlPlane",
"Forbidden::spec.networking.topology.nodes",
"Forbidden::spec.networking.topology.controlPlane",
},
},
{
@ -58,9 +58,11 @@ func Test_ValidateTopology(t *testing.T) {
CloudProvider: kops.CloudProviderSpec{
Openstack: &kops.OpenstackSpec{},
},
Topology: &kops.TopologySpec{
ControlPlane: kops.TopologyPrivate,
Nodes: kops.TopologyPrivate,
Networking: kops.NetworkingSpec{
Topology: &kops.TopologySpec{
ControlPlane: kops.TopologyPrivate,
Nodes: kops.TopologyPrivate,
},
},
},
ExpectedErrors: []string{},

View File

@ -80,7 +80,7 @@ func newValidateCluster(cluster *kops.Cluster) field.ErrorList {
func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
allErrs = append(allErrs, validateSubnets(spec, fieldPath.Child("subnets"))...)
allErrs = append(allErrs, validateSubnets(spec, fieldPath.Child("networking", "subnets"))...)
// SSHAccess
for i, cidr := range spec.SSHAccess {
@ -116,12 +116,12 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie
}
// AdditionalNetworkCIDRs
for i, cidr := range spec.AdditionalNetworkCIDRs {
allErrs = append(allErrs, validateCIDR(cidr, fieldPath.Child("additionalNetworkCIDRs").Index(i))...)
for i, cidr := range spec.Networking.AdditionalNetworkCIDRs {
allErrs = append(allErrs, validateCIDR(cidr, fieldPath.Child("networking", "additionalNetworkCIDRs").Index(i))...)
}
if spec.Topology != nil {
allErrs = append(allErrs, validateTopology(c, spec.Topology, fieldPath.Child("topology"))...)
if spec.Networking.Topology != nil {
allErrs = append(allErrs, validateTopology(c, spec.Networking.Topology, fieldPath.Child("networking", "topology"))...)
}
// UpdatePolicy
@ -158,11 +158,9 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie
allErrs = append(allErrs, validateKubelet(spec.ControlPlaneKubelet, c, fieldPath.Child("controlPlaneKubelet"))...)
}
if spec.Networking != nil {
allErrs = append(allErrs, validateNetworking(c, spec.Networking, fieldPath.Child("networking"))...)
if spec.Networking.Calico != nil {
allErrs = append(allErrs, validateNetworkingCalico(&c.Spec, spec.Networking.Calico, fieldPath.Child("networking", "calico"))...)
}
allErrs = append(allErrs, validateNetworking(c, &spec.Networking, fieldPath.Child("networking"))...)
if spec.Networking.Calico != nil {
allErrs = append(allErrs, validateNetworkingCalico(&c.Spec, spec.Networking.Calico, fieldPath.Child("networking", "calico"))...)
}
if spec.NodeAuthorization != nil {
@ -259,7 +257,7 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie
if lbSpec.Type == kops.LoadBalancerTypeInternal {
var hasPrivate bool
for _, subnet := range spec.Subnets {
for _, subnet := range spec.Networking.Subnets {
if subnet.Type == kops.SubnetTypePrivate {
hasPrivate = true
break
@ -438,7 +436,7 @@ func validateTopology(c *kops.Cluster, topology *kops.TopologySpec, fieldPath *f
func validateSubnets(cluster *kops.ClusterSpec, fieldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
subnets := cluster.Subnets
subnets := cluster.Networking.Subnets
// cannot be empty
if len(subnets) == 0 {
@ -1601,7 +1599,7 @@ func validateNodeLocalDNS(spec *kops.ClusterSpec, fldpath *field.Path) field.Err
}
}
if (spec.KubeProxy != nil && spec.KubeProxy.ProxyMode == "ipvs") || (spec.Networking != nil && spec.Networking.Cilium != nil) {
if (spec.KubeProxy != nil && spec.KubeProxy.ProxyMode == "ipvs") || spec.Networking.Cilium != nil {
if spec.Kubelet != nil && spec.Kubelet.ClusterDNS != "" && spec.Kubelet.ClusterDNS != spec.KubeDNS.NodeLocalDNS.LocalIP {
allErrs = append(allErrs, field.Forbidden(fldpath.Child("kubelet", "clusterDNS"), "Kubelet ClusterDNS must be set to the default IP address for LocalIP"))
}

View File

@ -190,7 +190,9 @@ func TestValidateSubnets(t *testing.T) {
CloudProvider: kops.CloudProviderSpec{
AWS: &kops.AWSSpec{},
},
Subnets: g.Input,
Networking: kops.NetworkingSpec{
Subnets: g.Input,
},
}
errs := validateSubnets(cluster, field.NewPath("subnets"))
@ -384,13 +386,10 @@ func Test_Validate_Networking_Flannel(t *testing.T) {
},
}
for _, g := range grid {
networking := &kops.NetworkingSpec{}
networking.Flannel = &g.Input
cluster := &kops.Cluster{}
cluster.Spec.Networking = networking
cluster.Spec.Networking.Flannel = &g.Input
errs := validateNetworking(cluster, networking, field.NewPath("networking"))
errs := validateNetworking(cluster, &cluster.Spec.Networking, field.NewPath("networking"))
testErrors(t, g.Input, errs, g.ExpectedErrors)
}
}
@ -437,8 +436,10 @@ func Test_Validate_AdditionalPolicies(t *testing.T) {
clusterSpec := &kops.ClusterSpec{
KubernetesVersion: "1.17.0",
AdditionalPolicies: &g.Input,
Subnets: []kops.ClusterSubnetSpec{
{Name: "subnet1", Type: kops.SubnetTypePublic},
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{Name: "subnet1", Type: kops.SubnetTypePublic},
},
},
EtcdClusters: []kops.EtcdClusterSpec{
{
@ -640,7 +641,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "encapsulation none with IPv4",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "none",
@ -652,7 +655,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "encapsulation mode IPIP for IPv6",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "::/0",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "::/0",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "ipip",
@ -664,7 +669,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "encapsulation mode VXLAN for IPv6",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "::/0",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "::/0",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "vxlan",
@ -711,7 +718,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico IPIP encapsulation mode (explicit) with IPIP IPPool mode (always)",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "ipip",
@ -723,7 +732,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico IPIP encapsulation mode (explicit) with IPIP IPPool mode (cross-subnet)",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "ipip",
@ -735,7 +746,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico IPIP encapsulation mode (explicit) with IPIP IPPool mode (never)",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "ipip",
@ -747,7 +760,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico VXLAN encapsulation mode with IPIP IPPool mode",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "vxlan",
@ -760,7 +775,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico VXLAN encapsulation mode with IPIP IPPool mode (always)",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "vxlan",
@ -773,7 +790,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico VXLAN encapsulation mode with IPIP IPPool mode (cross-subnet)",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "vxlan",
@ -786,7 +805,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico VXLAN encapsulation mode with IPIP IPPool mode (never)",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "100.64.0.0/10",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "vxlan",
@ -798,7 +819,9 @@ func Test_Validate_Calico(t *testing.T) {
Description: "Calico IPv6 without encapsulation",
Input: caliInput{
Cluster: &kops.ClusterSpec{
NonMasqueradeCIDR: "::/0",
Networking: kops.NetworkingSpec{
NonMasqueradeCIDR: "::/0",
},
},
Calico: &kops.CalicoNetworkingSpec{
EncapsulationMode: "none",
@ -938,7 +961,7 @@ func Test_Validate_Cilium(t *testing.T) {
},
}
for _, g := range grid {
g.Spec.Networking = &kops.NetworkingSpec{
g.Spec.Networking = kops.NetworkingSpec{
Cilium: &g.Cilium,
}
if g.Spec.KubernetesVersion == "" {
@ -1142,7 +1165,7 @@ func Test_Validate_NodeLocalDNS(t *testing.T) {
Enabled: fi.PtrTo(true),
},
},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},
@ -1163,7 +1186,7 @@ func Test_Validate_NodeLocalDNS(t *testing.T) {
LocalIP: "169.254.20.10",
},
},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},

View File

@ -1115,23 +1115,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(GossipConfig)
(*in).DeepCopyInto(*out)
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalNetworkCIDRs != nil {
in, out := &in.AdditionalNetworkCIDRs, &out.AdditionalNetworkCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
(*in).DeepCopyInto(*out)
}
if in.DNSControllerGossipConfig != nil {
in, out := &in.DNSControllerGossipConfig, &out.DNSControllerGossipConfig
*out = new(DNSControllerGossipConfig)
@ -1147,21 +1130,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
**out = **in
}
if in.SSHKeyName != nil {
in, out := &in.SSHKeyName, &out.SSHKeyName
*out = new(string)
**out = **in
}
if in.IsolateMasters != nil {
in, out := &in.IsolateMasters, &out.IsolateMasters
*out = new(bool)
**out = **in
}
if in.UpdatePolicy != nil {
in, out := &in.UpdatePolicy, &out.UpdatePolicy
*out = new(string)
@ -1301,11 +1274,7 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(AWSLoadBalancerControllerConfig)
(*in).DeepCopyInto(*out)
}
if in.Networking != nil {
in, out := &in.Networking, &out.Networking
*out = new(NetworkingSpec)
(*in).DeepCopyInto(*out)
}
in.Networking.DeepCopyInto(&out.Networking)
in.API.DeepCopyInto(&out.API)
if in.Authentication != nil {
in, out := &in.Authentication, &out.Authentication
@ -1351,11 +1320,6 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(bool)
**out = **in
}
if in.TagSubnets != nil {
in, out := &in.TagSubnets, &out.TagSubnets
*out = new(bool)
**out = **in
}
if in.Target != nil {
in, out := &in.Target, &out.Target
*out = new(TargetSpec)
@ -4456,6 +4420,38 @@ func (in *NTPConfig) DeepCopy() *NTPConfig {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkingSpec) DeepCopyInto(out *NetworkingSpec) {
*out = *in
if in.AdditionalNetworkCIDRs != nil {
in, out := &in.AdditionalNetworkCIDRs, &out.AdditionalNetworkCIDRs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]ClusterSubnetSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.TagSubnets != nil {
in, out := &in.TagSubnets, &out.TagSubnets
*out = new(bool)
**out = **in
}
if in.Topology != nil {
in, out := &in.Topology, &out.Topology
*out = new(TopologySpec)
(*in).DeepCopyInto(*out)
}
if in.EgressProxy != nil {
in, out := &in.EgressProxy, &out.EgressProxy
*out = new(EgressProxySpec)
**out = **in
}
if in.IsolateControlPlane != nil {
in, out := &in.IsolateControlPlane, &out.IsolateControlPlane
*out = new(bool)
**out = **in
}
if in.Classic != nil {
in, out := &in.Classic, &out.Classic
*out = new(ClassicNetworkingSpec)

View File

@ -173,7 +173,7 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi
config.UpdatePolicy = kops.UpdatePolicyAutomatic
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.AmazonVPC != nil {
if cluster.Spec.Networking.AmazonVPC != nil {
config.DefaultMachineType = fi.PtrTo(strings.Split(instanceGroup.Spec.MachineType, ",")[0])
}

View File

@ -195,7 +195,7 @@ func TestSetClusterFields(t *testing.T) {
Input: kops.Cluster{},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
IPAM: "on",
},
@ -210,7 +210,7 @@ func TestSetClusterFields(t *testing.T) {
Input: kops.Cluster{},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
EnableHostReachableServices: true,
},
@ -225,7 +225,7 @@ func TestSetClusterFields(t *testing.T) {
Input: kops.Cluster{},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
EnableNodePort: true,
},
@ -240,7 +240,7 @@ func TestSetClusterFields(t *testing.T) {
Input: kops.Cluster{},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
Masquerade: fi.PtrTo(false),
},
@ -268,7 +268,7 @@ func TestSetClusterFields(t *testing.T) {
Input: kops.Cluster{},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
AgentPrometheusPort: 1234,
},
@ -316,7 +316,7 @@ func TestSetCiliumFields(t *testing.T) {
KubeProxy: &kops.KubeProxyConfig{
Enabled: fi.PtrTo(false),
},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
IPAM: "eni",
EnableNodePort: true,

View File

@ -243,7 +243,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Input: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
IPAM: "on",
},
@ -252,7 +252,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},
@ -264,7 +264,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Input: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
EnableHostReachableServices: true,
},
@ -273,7 +273,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},
@ -285,7 +285,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Input: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
EnableNodePort: true,
},
@ -294,7 +294,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},
@ -306,7 +306,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Input: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
Masquerade: fi.PtrTo(false),
},
@ -315,7 +315,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},
@ -344,7 +344,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Input: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
AgentPrometheusPort: 1234,
},
@ -353,7 +353,7 @@ func TestUnsetClusterFields(t *testing.T) {
},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},
@ -396,7 +396,7 @@ func TestUnsetCiliumFields(t *testing.T) {
KubeProxy: &kops.KubeProxyConfig{
Enabled: fi.PtrTo(false),
},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{
IPAM: "eni",
EnableNodePort: true,
@ -408,7 +408,7 @@ func TestUnsetCiliumFields(t *testing.T) {
Output: kops.Cluster{
Spec: kops.ClusterSpec{
KubeProxy: &kops.KubeProxyConfig{},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Cilium: &kops.CiliumNetworkingSpec{},
},
},

View File

@ -25,10 +25,12 @@ import (
func TestRenderInstanceGroupZones(t *testing.T) {
cluster := &kops.Cluster{
Spec: kops.ClusterSpec{
Subnets: []kops.ClusterSubnetSpec{
{Name: "subnet1", Zone: "subnet1zone"},
{Name: "subnet2", Zone: "subnet2zone"},
{Name: "subnet3", Zone: "subnet3zone"},
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{Name: "subnet1", Zone: "subnet1zone"},
{Name: "subnet2", Zone: "subnet2zone"},
{Name: "subnet3", Zone: "subnet3zone"},
},
},
},
}

View File

@ -50,8 +50,8 @@ func getTestSetupOS(t *testing.T) (*RollingUpdateCluster, *openstack.MockCloud)
inCluster.Spec.CloudProvider.Openstack = &kopsapi.OpenstackSpec{}
inCluster.Name = "test.k8s.local"
inCluster.Spec.Topology.ControlPlane = kopsapi.TopologyPrivate
inCluster.Spec.Topology.Nodes = kopsapi.TopologyPrivate
inCluster.Spec.Networking.Topology.ControlPlane = kopsapi.TopologyPrivate
inCluster.Spec.Networking.Topology.Nodes = kopsapi.TopologyPrivate
err := cloudup.PerformAssignments(inCluster, mockcloud)
if err != nil {
@ -190,12 +190,12 @@ func makeGroupOS(t *testing.T, groups map[string]*cloudinstances.CloudInstanceGr
func getGroupsAllNeedUpdateOS(t *testing.T, c *RollingUpdateCluster) (map[string]*cloudinstances.CloudInstanceGroup, *kopsapi.InstanceGroupList) {
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
igList := &kopsapi.InstanceGroupList{}
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Subnets[0].Name, kopsapi.InstanceGroupRoleNode, 3, 3)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Subnets[1].Name, kopsapi.InstanceGroupRoleNode, 3, 3)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Subnets[0].Name, kopsapi.InstanceGroupRoleControlPlane, 1, 1)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Subnets[1].Name, kopsapi.InstanceGroupRoleControlPlane, 1, 1)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Subnets[2].Name, kopsapi.InstanceGroupRoleControlPlane, 1, 1)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Subnets[0].Name, kopsapi.InstanceGroupRoleBastion, 1, 1)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Networking.Subnets[0].Name, kopsapi.InstanceGroupRoleNode, 3, 3)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Networking.Subnets[1].Name, kopsapi.InstanceGroupRoleNode, 3, 3)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Networking.Subnets[0].Name, kopsapi.InstanceGroupRoleControlPlane, 1, 1)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Networking.Subnets[1].Name, kopsapi.InstanceGroupRoleControlPlane, 1, 1)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Networking.Subnets[2].Name, kopsapi.InstanceGroupRoleControlPlane, 1, 1)
makeGroupOS(t, groups, igList, c, c.Cluster.Spec.Networking.Subnets[0].Name, kopsapi.InstanceGroupRoleBastion, 1, 1)
return groups, igList
}

View File

@ -64,7 +64,7 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se
// careful that we aren't accessing the API over DirectConnect (or a VPN).
// We differentiate using the heuristic that if we have an internal ELB
// we are likely connected directly to the VPC.
privateDNS := cluster.Spec.Topology != nil && cluster.Spec.Topology.DNS == kops.DNSTypePrivate
privateDNS := cluster.Spec.Networking.Topology != nil && cluster.Spec.Networking.Topology.DNS == kops.DNSTypePrivate
internalELB := cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.Type == kops.LoadBalancerTypeInternal
if privateDNS && !internalELB {
useELBName = true

View File

@ -70,7 +70,7 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
if len(lbSpec.Subnets) != 0 {
// Subnets have been explicitly set
for _, subnet := range lbSpec.Subnets {
for _, clusterSubnet := range b.Cluster.Spec.Subnets {
for _, clusterSubnet := range b.Cluster.Spec.Networking.Subnets {
if subnet.Name == clusterSubnet.Name {
elbSubnet := b.LinkToSubnet(&clusterSubnet)
elbSubnets = append(elbSubnets, elbSubnet)
@ -92,8 +92,8 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
} else {
// Compute the subnets - only one per zone, and then break ties based on chooseBestSubnetForELB
subnetsByZone := make(map[string][]*kops.ClusterSubnetSpec)
for i := range b.Cluster.Spec.Subnets {
subnet := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnet := &b.Cluster.Spec.Networking.Subnets[i]
switch subnet.Type {
case kops.SubnetTypePublic, kops.SubnetTypeUtility:
@ -542,9 +542,9 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
Protocol: fi.PtrTo("tcp"),
SecurityGroup: masterGroup.Task,
ToPort: fi.PtrTo(int64(443)),
CIDR: fi.PtrTo(b.Cluster.Spec.NetworkCIDR),
CIDR: fi.PtrTo(b.Cluster.Spec.Networking.NetworkCIDR),
})
for _, cidr := range b.Cluster.Spec.AdditionalNetworkCIDRs {
for _, cidr := range b.Cluster.Spec.Networking.AdditionalNetworkCIDRs {
c.AddTask(&awstasks.SecurityGroupRule{
Name: fi.PtrTo(fmt.Sprintf("https-lb-to-master%s-%s", suffix, cidr)),
Lifecycle: b.SecurityLifecycle,
@ -569,9 +569,9 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
Protocol: fi.PtrTo("tcp"),
SecurityGroup: masterGroup.Task,
ToPort: fi.PtrTo(int64(wellknownports.KopsControllerPort)),
CIDR: fi.PtrTo(b.Cluster.Spec.NetworkCIDR),
CIDR: fi.PtrTo(b.Cluster.Spec.Networking.NetworkCIDR),
})
for _, cidr := range b.Cluster.Spec.AdditionalNetworkCIDRs {
for _, cidr := range b.Cluster.Spec.Networking.AdditionalNetworkCIDRs {
c.AddTask(&awstasks.SecurityGroupRule{
Name: fi.PtrTo(fmt.Sprintf("kops-controller-lb-to-master%s-%s", suffix, cidr)),
Lifecycle: b.SecurityLifecycle,

View File

@ -222,7 +222,7 @@ func (b *AutoscalingGroupModelBuilder) buildLaunchTemplateTask(c *fi.ModelBuilde
}
// @step: add an IPv6 address
for _, clusterSubnet := range b.Cluster.Spec.Subnets {
for _, clusterSubnet := range b.Cluster.Spec.Networking.Subnets {
for _, igSubnet := range ig.Spec.Subnets {
if clusterSubnet.Name != igSubnet {
continue

View File

@ -74,7 +74,7 @@ func TestRootVolumeOptimizationFlag(t *testing.T) {
CloudProvider: kops.CloudProviderSpec{
AWS: &kops.AWSSpec{},
},
Networking: &kops.NetworkingSpec{},
Networking: kops.NetworkingSpec{},
KubernetesVersion: "1.20.0",
},
},
@ -135,7 +135,7 @@ func TestAPIServerAdditionalSecurityGroupsWithNLB(t *testing.T) {
igs := make([]*kops.InstanceGroup, _roleCount)
// NB: (*AutoscalingGroupModelBuilder).buildLaunchTemplateTask expects there to be at least
// one subnet specified in each InstanceGroup.
subnets := []string{cluster.Spec.Subnets[0].Name}
subnets := []string{cluster.Spec.Networking.Subnets[0].Name}
igs[roleBastion] = &kops.InstanceGroup{
ObjectMeta: v1.ObjectMeta{
Name: "bastion1",

View File

@ -101,19 +101,19 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error {
var bastionLoadBalancerType kops.LoadBalancerType
{
// Check if we requested a public or internal NLB
if b.Cluster.Spec.Topology != nil && b.Cluster.Spec.Topology.Bastion != nil && b.Cluster.Spec.Topology.Bastion.LoadBalancer != nil {
if b.Cluster.Spec.Topology.Bastion.LoadBalancer.Type != "" {
switch b.Cluster.Spec.Topology.Bastion.LoadBalancer.Type {
if b.Cluster.Spec.Networking.Topology != nil && b.Cluster.Spec.Networking.Topology.Bastion != nil && b.Cluster.Spec.Networking.Topology.Bastion.LoadBalancer != nil {
if b.Cluster.Spec.Networking.Topology.Bastion.LoadBalancer.Type != "" {
switch b.Cluster.Spec.Networking.Topology.Bastion.LoadBalancer.Type {
case kops.LoadBalancerTypeInternal:
bastionLoadBalancerType = "Internal"
case kops.LoadBalancerTypePublic:
bastionLoadBalancerType = "Public"
default:
return fmt.Errorf("unhandled bastion LoadBalancer type %q", b.Cluster.Spec.Topology.Bastion.LoadBalancer.Type)
return fmt.Errorf("unhandled bastion LoadBalancer type %q", b.Cluster.Spec.Networking.Topology.Bastion.LoadBalancer.Type)
}
} else {
// Default to Public
b.Cluster.Spec.Topology.Bastion.LoadBalancer.Type = kops.LoadBalancerTypePublic
b.Cluster.Spec.Networking.Topology.Bastion.LoadBalancer.Type = kops.LoadBalancerTypePublic
bastionLoadBalancerType = "Public"
}
} else {
@ -159,8 +159,8 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error {
{
// Compute the subnets - only one per zone, and then break ties based on chooseBestSubnetForNLB
subnetsByZone := make(map[string][]*kops.ClusterSubnetSpec)
for i := range b.Cluster.Spec.Subnets {
subnet := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnet := &b.Cluster.Spec.Networking.Subnets[i]
switch subnet.Type {
case kops.SubnetTypePublic, kops.SubnetTypeUtility:
@ -311,8 +311,8 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error {
}
publicName := ""
if b.Cluster.Spec.Topology != nil && b.Cluster.Spec.Topology.Bastion != nil {
publicName = b.Cluster.Spec.Topology.Bastion.PublicName
if b.Cluster.Spec.Networking.Topology != nil && b.Cluster.Spec.Networking.Topology.Bastion != nil {
publicName = b.Cluster.Spec.Networking.Topology.Bastion.PublicName
}
if publicName != "" {
// Here we implement the bastion CNAME logic
@ -347,7 +347,7 @@ func (b *BastionModelBuilder) Build(c *fi.ModelBuilderContext) error {
func useIPv6ForBastion(b *BastionModelBuilder) bool {
for _, ig := range b.InstanceGroups {
for _, igSubnetName := range ig.Spec.Subnets {
for _, clusterSubnet := range b.Cluster.Spec.Subnets {
for _, clusterSubnet := range b.Cluster.Spec.Networking.Subnets {
if igSubnetName != clusterSubnet.Name {
continue
}

View File

@ -37,8 +37,8 @@ func (b *AWSModelContext) LinkToSubnet(z *kops.ClusterSubnetSpec) *awstasks.Subn
func (b *AWSModelContext) LinkToPublicSubnetInZone(zoneName string) (*awstasks.Subnet, error) {
var matches []*kops.ClusterSubnetSpec
for i := range b.Cluster.Spec.Subnets {
z := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
z := &b.Cluster.Spec.Networking.Subnets[i]
if z.Zone != zoneName {
continue
}
@ -60,8 +60,8 @@ func (b *AWSModelContext) LinkToPublicSubnetInZone(zoneName string) (*awstasks.S
func (b *AWSModelContext) LinkToUtilitySubnetInZone(zoneName string) (*awstasks.Subnet, error) {
var matches []*kops.ClusterSubnetSpec
for i := range b.Cluster.Spec.Subnets {
s := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
s := &b.Cluster.Spec.Networking.Subnets[i]
if s.Zone != zoneName {
continue
}
@ -82,8 +82,8 @@ func (b *AWSModelContext) LinkToUtilitySubnetInZone(zoneName string) (*awstasks.
}
func (b *AWSModelContext) LinkToPrivateSubnetsInZone(zoneName string) ([]*awstasks.Subnet, error) {
var matches []*kops.ClusterSubnetSpec
for i := range b.Cluster.Spec.Subnets {
s := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
s := &b.Cluster.Spec.Networking.Subnets[i]
if s.Zone != zoneName {
continue
}

View File

@ -44,7 +44,7 @@ func (b *DNSModelBuilder) ensureDNSZone(c *fi.ModelBuilderContext) error {
Lifecycle: b.Lifecycle,
}
topology := b.Cluster.Spec.Topology
topology := b.Cluster.Spec.Networking.Topology
if topology != nil {
switch topology.DNS {
case kops.DNSTypePublic:

View File

@ -213,7 +213,7 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
}
// For AmazonVPC networking, pods running in Nodes could need to reach pods in master/s
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
if b.Cluster.Spec.Networking.AmazonVPC != nil {
// Nodes can talk to masters
for _, src := range nodeGroups {
for _, dest := range masterGroups {

View File

@ -79,15 +79,15 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
// Used only for Terraform rendering.
// Direct rendering is handled via the VPCAmazonIPv6CIDRBlock task
t.AmazonIPv6 = fi.PtrTo(true)
t.AssociateExtraCIDRBlocks = b.Cluster.Spec.AdditionalNetworkCIDRs
t.AssociateExtraCIDRBlocks = b.Cluster.Spec.Networking.AdditionalNetworkCIDRs
}
if b.Cluster.Spec.NetworkID != "" {
t.ID = fi.PtrTo(b.Cluster.Spec.NetworkID)
if b.Cluster.Spec.Networking.NetworkID != "" {
t.ID = fi.PtrTo(b.Cluster.Spec.Networking.NetworkID)
}
if b.Cluster.Spec.NetworkCIDR != "" {
t.CIDR = fi.PtrTo(b.Cluster.Spec.NetworkCIDR)
if b.Cluster.Spec.Networking.NetworkCIDR != "" {
t.CIDR = fi.PtrTo(b.Cluster.Spec.Networking.NetworkCIDR)
}
c.AddTask(t)
@ -103,7 +103,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
})
// Associate additional CIDR blocks with the VPC
for _, cidr := range b.Cluster.Spec.AdditionalNetworkCIDRs {
for _, cidr := range b.Cluster.Spec.Networking.AdditionalNetworkCIDRs {
c.AddTask(&awstasks.VPCCIDRBlock{
Name: fi.PtrTo(cidr),
Lifecycle: b.Lifecycle,
@ -143,13 +143,13 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
allPrivateSubnetsUnmanaged := true
allSubnetsShared := true
allSubnetsSharedInZone := make(map[string]bool)
for i := range b.Cluster.Spec.Subnets {
subnetSpec := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnetSpec := &b.Cluster.Spec.Networking.Subnets[i]
allSubnetsSharedInZone[subnetSpec.Zone] = true
}
for i := range b.Cluster.Spec.Subnets {
subnetSpec := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnetSpec := &b.Cluster.Spec.Networking.Subnets[i]
sharedSubnet := subnetSpec.ID != ""
if !sharedSubnet {
allSubnetsShared = false
@ -217,20 +217,20 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
infoByZone := make(map[string]*zoneInfo)
haveDualStack := map[string]bool{}
for _, subnetSpec := range b.Cluster.Spec.Subnets {
for _, subnetSpec := range b.Cluster.Spec.Networking.Subnets {
if subnetSpec.Type == kops.SubnetTypeDualStack {
haveDualStack[subnetSpec.Zone] = true
}
}
for i := range b.Cluster.Spec.Subnets {
subnetSpec := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnetSpec := &b.Cluster.Spec.Networking.Subnets[i]
sharedSubnet := subnetSpec.ID != ""
subnetName := subnetSpec.Name + "." + b.ClusterName()
tags := map[string]string{}
// Apply tags so that Kubernetes knows which subnets should be used for internal/external ELBs
if b.Cluster.Spec.TagSubnets == nil || *b.Cluster.Spec.TagSubnets {
if b.Cluster.Spec.Networking.TagSubnets == nil || *b.Cluster.Spec.Networking.TagSubnets {
klog.V(2).Infof("applying subnet tags")
tags = b.CloudTags(subnetName, sharedSubnet)
tags["SubnetType"] = string(subnetSpec.Type)
@ -241,7 +241,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
// AWS ALB contoller won't provision any internal ELBs unless this tag is set.
// So we add this to public subnets as well if we do not expect any private subnets.
if b.Cluster.Spec.Topology.Nodes == kops.TopologyPublic {
if b.Cluster.Spec.Networking.Topology.Nodes == kops.TopologyPublic {
tags[aws.TagNameSubnetInternalELB] = "1"
}
@ -579,7 +579,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
return err
}
for _, subnetSpec := range b.Cluster.Spec.Subnets {
for _, subnetSpec := range b.Cluster.Spec.Networking.Subnets {
for _, subnet := range subnets {
if strings.HasPrefix(*subnet.Name, subnetSpec.Name) {
err := addAdditionalRoutes(subnetSpec.AdditionalRoutes, subnetSpec.Name, rt, b.Lifecycle, c)

View File

@ -668,8 +668,8 @@ func (b *SpotInstanceGroupModelBuilder) buildSubnets(ig *kops.InstanceGroup) ([]
func (b *SpotInstanceGroupModelBuilder) buildPublicIPOpts(ig *kops.InstanceGroup) (*bool, error) {
subnetMap := make(map[string]*kops.ClusterSubnetSpec)
for i := range b.Cluster.Spec.Subnets {
subnet := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnet := &b.Cluster.Spec.Networking.Subnets[i]
subnetMap[subnet.Name] = subnet
}

View File

@ -42,7 +42,7 @@ func TestSubnetForLoadbalancer(t *testing.T) {
b := APILoadBalancerModelBuilder{
AzureModelContext: newTestAzureModelContext(),
}
b.Cluster.Spec.Subnets = []kops.ClusterSubnetSpec{
b.Cluster.Spec.Networking.Subnets = []kops.ClusterSubnetSpec{
{
Name: "master",
Type: kops.SubnetTypePrivate,

View File

@ -40,7 +40,7 @@ func (c *AzureModelContext) LinkToVirtualNetwork() *azuretasks.VirtualNetwork {
// NameForVirtualNetwork returns the name of the Azure Virtual Network object the cluster is located in.
func (c *AzureModelContext) NameForVirtualNetwork() string {
networkName := c.Cluster.Spec.NetworkID
networkName := c.Cluster.Spec.Networking.NetworkID
if networkName == "" {
networkName = c.ClusterName()
}

View File

@ -35,13 +35,13 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
Name: fi.PtrTo(b.NameForVirtualNetwork()),
Lifecycle: b.Lifecycle,
ResourceGroup: b.LinkToResourceGroup(),
CIDR: fi.PtrTo(b.Cluster.Spec.NetworkCIDR),
CIDR: fi.PtrTo(b.Cluster.Spec.Networking.NetworkCIDR),
Tags: map[string]*string{},
Shared: fi.PtrTo(b.Cluster.SharedVPC()),
}
c.AddTask(networkTask)
for _, subnetSpec := range b.Cluster.Spec.Subnets {
for _, subnetSpec := range b.Cluster.Spec.Networking.Subnets {
subnetTask := &azuretasks.Subnet{
Name: fi.PtrTo(subnetSpec.Name),
Lifecycle: b.Lifecycle,

View File

@ -55,14 +55,15 @@ func newTestCluster() *kops.Cluster {
RouteTableName: "test-route-table",
},
},
Networking: &kops.NetworkingSpec{},
NetworkID: "test-virtual-network",
NetworkCIDR: "10.0.0.0/8",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "test-subnet",
CIDR: "10.0.1.0/24",
Type: kops.SubnetTypePrivate,
Networking: kops.NetworkingSpec{
NetworkID: "test-virtual-network",
NetworkCIDR: "10.0.0.0/8",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "test-subnet",
CIDR: "10.0.1.0/24",
Type: kops.SubnetTypePrivate,
},
},
},
},

View File

@ -37,7 +37,7 @@ func TestVMScaleSetModelBuilder_Build(t *testing.T) {
Lifecycle: fi.LifecycleSync,
Cluster: &kops.Cluster{
Spec: kops.ClusterSpec{
Networking: &kops.NetworkingSpec{},
Networking: kops.NetworkingSpec{},
},
},
},

View File

@ -370,7 +370,7 @@ func (b *BootstrapScript) Run(c *fi.Context) error {
}
nodeupScript.ProxyEnv = func() (string, error) {
return b.createProxyEnv(c.Cluster.Spec.EgressProxy)
return b.createProxyEnv(c.Cluster.Spec.Networking.EgressProxy)
}
nodeupScript.ClusterSpec = func() (string, error) {

View File

@ -219,10 +219,6 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles
AWS: &kops.AWSSpec{},
},
KubernetesVersion: "1.20.0",
Subnets: []kops.ClusterSubnetSpec{
{Name: "test", Zone: "eu-west-1a"},
},
NonMasqueradeCIDR: "10.100.0.0/16",
EtcdClusters: []kops.EtcdClusterSpec{
{
Name: "main",
@ -246,7 +242,6 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles
Image: "gcr.io/etcd-development/etcd:v3.1.11",
},
},
NetworkCIDR: "10.79.0.0/24",
CloudConfig: &kops.CloudConfiguration{
NodeTags: fi.PtrTo("something"),
},
@ -281,13 +276,19 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles
ControlPlaneKubelet: &kops.KubeletConfigSpec{
KubeconfigPath: "/etc/kubernetes/config.cfg",
},
EgressProxy: &kops.EgressProxySpec{
HTTPProxy: kops.HTTPProxy{
Host: "example.com",
Port: 80,
Networking: kops.NetworkingSpec{
NetworkCIDR: "10.79.0.0/24",
Subnets: []kops.ClusterSubnetSpec{
{Name: "test", Zone: "eu-west-1a"},
},
EgressProxy: &kops.EgressProxySpec{
HTTPProxy: kops.HTTPProxy{
Host: "example.com",
Port: 80,
},
},
NonMasqueradeCIDR: "10.100.0.0/16",
},
Networking: &kops.NetworkingSpec{},
Hooks: []kops.HookSpec{
{
ExecContainer: &kops.ExecContainerAction{

View File

@ -130,7 +130,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error {
}
c.AllowPrivileged = fi.PtrTo(true)
c.ServiceClusterIPRange = clusterSpec.ServiceClusterIPRange
c.ServiceClusterIPRange = clusterSpec.Networking.ServiceClusterIPRange
c.EtcdServers = nil
c.EtcdServersOverrides = nil

View File

@ -55,23 +55,21 @@ func (b *AWSCloudControllerManagerOptionsBuilder) BuildOptions(o interface{}) er
eccm.ClusterName = b.ClusterName
eccm.ClusterCIDR = clusterSpec.NonMasqueradeCIDR
eccm.ClusterCIDR = clusterSpec.Networking.NonMasqueradeCIDR
eccm.AllocateNodeCIDRs = fi.PtrTo(true)
eccm.ConfigureCloudRoutes = fi.PtrTo(false)
// TODO: we want to consolidate this with the logic from KCM
networking := clusterSpec.Networking
if networking == nil {
eccm.ConfigureCloudRoutes = fi.PtrTo(true)
} else if networking.Kubenet != nil {
networking := &clusterSpec.Networking
if networking.Kubenet != nil {
eccm.ConfigureCloudRoutes = fi.PtrTo(true)
} else if networking.GCE != nil {
eccm.ConfigureCloudRoutes = fi.PtrTo(false)
eccm.CIDRAllocatorType = fi.PtrTo("CloudAllocator")
if eccm.ClusterCIDR == "" {
eccm.ClusterCIDR = clusterSpec.PodCIDR
eccm.ClusterCIDR = clusterSpec.Networking.PodCIDR
}
} else if networking.External != nil {
eccm.ConfigureCloudRoutes = fi.PtrTo(false)

View File

@ -31,7 +31,7 @@ func buildContainerdCluster(version string) *kopsapi.Cluster {
AWS: &kopsapi.AWSSpec{},
},
KubernetesVersion: version,
Networking: &kopsapi.NetworkingSpec{
Networking: kopsapi.NetworkingSpec{
Kubenet: &kopsapi.KubenetNetworkingSpec{},
},
},

View File

@ -79,9 +79,9 @@ func UsesCNI(networking *kops.NetworkingSpec) bool {
}
func WellKnownServiceIP(clusterSpec *kops.ClusterSpec, id int) (net.IP, error) {
_, cidr, err := net.ParseCIDR(clusterSpec.ServiceClusterIPRange)
_, cidr, err := net.ParseCIDR(clusterSpec.Networking.ServiceClusterIPRange)
if err != nil {
return nil, fmt.Errorf("error parsing ServiceClusterIPRange %q: %v", clusterSpec.ServiceClusterIPRange, err)
return nil, fmt.Errorf("error parsing ServiceClusterIPRange %q: %v", clusterSpec.Networking.ServiceClusterIPRange, err)
}
ip4 := cidr.IP.To4()
@ -107,7 +107,7 @@ func WellKnownServiceIP(clusterSpec *kops.ClusterSpec, id int) (net.IP, error) {
return serviceIP, nil
}
return nil, fmt.Errorf("unexpected IP address type for ServiceClusterIPRange: %s", clusterSpec.ServiceClusterIPRange)
return nil, fmt.Errorf("unexpected IP address type for ServiceClusterIPRange: %s", clusterSpec.Networking.ServiceClusterIPRange)
}
func IsBaseURL(kubernetesVersion string) bool {

View File

@ -55,7 +55,7 @@ func (b *GCPCloudControllerManagerOptionsBuilder) BuildOptions(options interface
ccmConfig.AllocateNodeCIDRs = fi.PtrTo(true)
ccmConfig.CIDRAllocatorType = fi.PtrTo("CloudAllocator")
if ccmConfig.ClusterCIDR == "" {
ccmConfig.ClusterCIDR = clusterSpec.PodCIDR
ccmConfig.ClusterCIDR = clusterSpec.Networking.PodCIDR
}
if ccmConfig.Image == "" {
// TODO: Implement CCM image publishing

View File

@ -48,7 +48,7 @@ func (b *HetznerCloudControllerManagerOptionsBuilder) BuildOptions(o interface{}
LeaderElect: fi.PtrTo(false),
}
eccm.ClusterCIDR = clusterSpec.NonMasqueradeCIDR
eccm.ClusterCIDR = clusterSpec.Networking.NonMasqueradeCIDR
eccm.AllocateNodeCIDRs = fi.PtrTo(true)
eccm.ConfigureCloudRoutes = fi.PtrTo(false)

View File

@ -131,7 +131,7 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
kcm.AllocateNodeCIDRs = fi.PtrTo(!clusterSpec.IsKopsControllerIPAM())
if kcm.ClusterCIDR == "" && !clusterSpec.IsKopsControllerIPAM() {
kcm.ClusterCIDR = clusterSpec.PodCIDR
kcm.ClusterCIDR = clusterSpec.Networking.PodCIDR
}
if utils.IsIPv6CIDR(kcm.ClusterCIDR) {
@ -145,10 +145,8 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
kcm.NodeCIDRMaskSize = fi.PtrTo(int32(clusterSize + nodeSize))
}
networking := clusterSpec.Networking
if networking == nil {
kcm.ConfigureCloudRoutes = fi.PtrTo(true)
} else if networking.Kubenet != nil {
networking := &clusterSpec.Networking
if networking.Kubenet != nil {
kcm.ConfigureCloudRoutes = fi.PtrTo(true)
} else if networking.GCE != nil {
kcm.ConfigureCloudRoutes = fi.PtrTo(false)

View File

@ -150,7 +150,7 @@ func Test_Build_KCM_Builder_CIDR_Mask_Size(t *testing.T) {
},
}
c.Spec.PodCIDR = tc.PodCIDR
c.Spec.Networking.PodCIDR = tc.PodCIDR
c.Spec.KubeControllerManager = &api.KubeControllerManagerConfig{
ClusterCIDR: tc.ClusterCIDR,
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package components
import (
"fmt"
"strings"
"time"
@ -115,9 +114,9 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
clusterSpec.Kubelet.KubeconfigPath = kubeconfigPath
clusterSpec.ControlPlaneKubelet.KubeconfigPath = kubeconfigPath
// IsolateMasters enables the legacy behaviour, where master pods on a separate network
// IsolateControlPlane enables the legacy behaviour, where master pods on a separate network
// In newer versions of kubernetes, most of that functionality has been removed though
if fi.ValueOf(clusterSpec.IsolateMasters) {
if fi.ValueOf(clusterSpec.Networking.IsolateControlPlane) {
clusterSpec.ControlPlaneKubelet.EnableDebuggingHandlers = fi.PtrTo(false)
clusterSpec.ControlPlaneKubelet.HairpinMode = "none"
}
@ -168,14 +167,11 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
}
if clusterSpec.ContainerRuntime == "docker" || clusterSpec.ContainerRuntime == "" {
networking := clusterSpec.Networking
if networking == nil {
return fmt.Errorf("no networking mode set")
}
networking := &clusterSpec.Networking
if UsesKubenet(networking) && b.IsKubernetesLT("1.24") {
clusterSpec.Kubelet.NetworkPluginName = fi.PtrTo("kubenet")
clusterSpec.Kubelet.NetworkPluginMTU = fi.PtrTo(int32(9001))
clusterSpec.Kubelet.NonMasqueradeCIDR = fi.PtrTo(clusterSpec.NonMasqueradeCIDR)
clusterSpec.Kubelet.NonMasqueradeCIDR = fi.PtrTo(networking.NonMasqueradeCIDR)
}
}

View File

@ -27,10 +27,11 @@ import (
func buildKubeletTestCluster() *kops.Cluster {
return &kops.Cluster{
Spec: kops.ClusterSpec{
KubernetesVersion: "1.6.2",
ServiceClusterIPRange: "10.10.0.0/16",
Kubelet: &kops.KubeletConfigSpec{},
Networking: &kops.NetworkingSpec{},
KubernetesVersion: "1.6.2",
Kubelet: &kops.KubeletConfigSpec{},
Networking: kops.NetworkingSpec{
ServiceClusterIPRange: "10.10.0.0/16",
},
},
}
}

View File

@ -8,7 +8,8 @@ spec:
alwaysAllow: {}
cloudProvider: {}
kubernetesVersion: v1.24.0
topology:
controlPlane: public
dns: Public
nodes: public
networking:
topology:
controlPlane: public
dns: Public
nodes: public

View File

@ -8,7 +8,8 @@ spec:
alwaysAllow: {}
cloudProvider: {}
kubernetesVersion: v1.21.0
topology:
controlPlane: public
dns: Public
nodes: public
networking:
topology:
controlPlane: public
dns: Public
nodes: public

View File

@ -10,7 +10,8 @@ spec:
kubeScheduler:
maxPersistentVolumes: 20
kubernetesVersion: v1.24.0
topology:
controlPlane: public
dns: Public
nodes: public
networking:
topology:
controlPlane: public
dns: Public
nodes: public

View File

@ -39,11 +39,7 @@ func (b *NetworkingOptionsBuilder) BuildOptions(o interface{}) error {
options.Kubelet = &kops.KubeletConfigSpec{}
}
networking := clusterSpec.Networking
if networking == nil {
return fmt.Errorf("networking not set")
}
networking := &clusterSpec.Networking
if b.Context.IsKubernetesLT("1.24") {
if UsesCNI(networking) {
options.Kubelet.NetworkPluginName = fi.PtrTo("cni")

View File

@ -61,8 +61,8 @@ func (b *KopsModelContext) GatherSubnets(ig *kops.InstanceGroup) ([]*kops.Cluste
for _, subnetName := range ig.Spec.Subnets {
var matches []*kops.ClusterSubnetSpec
for i := range b.Cluster.Spec.Subnets {
clusterSubnet := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
clusterSubnet := &b.Cluster.Spec.Networking.Subnets[i]
if clusterSubnet.Name == subnetName {
matches = append(matches, clusterSubnet)
}
@ -252,7 +252,7 @@ func (b *KopsModelContext) UseBootstrapTokens() bool {
// UsesBastionDns checks if we should use a specific name for the bastion dns
func (b *KopsModelContext) UsesBastionDns() bool {
if b.Cluster.Spec.Topology.Bastion != nil && b.Cluster.Spec.Topology.Bastion.PublicName != "" {
if b.Cluster.Spec.Networking.Topology.Bastion != nil && b.Cluster.Spec.Networking.Topology.Bastion.PublicName != "" {
return true
}
return false
@ -347,7 +347,7 @@ func (b *KopsModelContext) UseIPv6ForAPI() bool {
break
}
for _, igSubnetName := range ig.Spec.Subnets {
for _, clusterSubnet := range b.Cluster.Spec.Subnets {
for _, clusterSubnet := range b.Cluster.Spec.Networking.Subnets {
if igSubnetName != clusterSubnet.Name {
continue
}
@ -389,10 +389,10 @@ func (b *KopsModelContext) UseServiceAccountExternalPermissions() bool {
// NetworkingIsCalico returns true if we are using calico networking
func (b *KopsModelContext) NetworkingIsCalico() bool {
return b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Calico != nil
return b.Cluster.Spec.Networking.Calico != nil
}
// NetworkingIsCilium returns true if we are using cilium networking
func (b *KopsModelContext) NetworkingIsCilium() bool {
return b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Cilium != nil
return b.Cluster.Spec.Networking.Cilium != nil
}

View File

@ -61,17 +61,17 @@ func (b *APILoadBalancerModelBuilder) Build(c *fi.ModelBuilderContext) error {
// Create LoadBalancer for API LB
loadbalancer := &dotasks.LoadBalancer{
Name: fi.PtrTo(loadbalancerName),
Region: fi.PtrTo(b.Cluster.Spec.Subnets[0].Region),
Region: fi.PtrTo(b.Cluster.Spec.Networking.Subnets[0].Region),
DropletTag: fi.PtrTo(clusterMasterTag),
Lifecycle: b.Lifecycle,
}
if b.Cluster.Spec.NetworkID != "" {
loadbalancer.VPCUUID = fi.PtrTo(b.Cluster.Spec.NetworkID)
} else if b.Cluster.Spec.NetworkCIDR != "" {
if b.Cluster.Spec.Networking.NetworkID != "" {
loadbalancer.VPCUUID = fi.PtrTo(b.Cluster.Spec.Networking.NetworkID)
} else if b.Cluster.Spec.Networking.NetworkCIDR != "" {
vpcName := "vpc-" + clusterName
loadbalancer.VPCName = fi.PtrTo(vpcName)
loadbalancer.NetworkCIDR = fi.PtrTo(b.Cluster.Spec.NetworkCIDR)
loadbalancer.NetworkCIDR = fi.PtrTo(b.Cluster.Spec.Networking.NetworkCIDR)
}
c.AddTask(loadbalancer)

View File

@ -62,7 +62,7 @@ func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error {
Lifecycle: d.Lifecycle,
// kops do supports allow only 1 region
Region: fi.PtrTo(d.Cluster.Spec.Subnets[0].Region),
Region: fi.PtrTo(d.Cluster.Spec.Networking.Subnets[0].Region),
Size: fi.PtrTo(ig.Spec.MachineType),
Image: fi.PtrTo(ig.Spec.Image),
SSHKey: fi.PtrTo(sshKeyFingerPrint),
@ -81,14 +81,14 @@ func (d *DropletBuilder) Build(c *fi.ModelBuilderContext) error {
droplet.Tags = append(droplet.Tags, do.TagKubernetesInstanceGroup+":"+ig.Name)
}
if d.Cluster.Spec.NetworkID != "" {
droplet.VPCUUID = fi.PtrTo(d.Cluster.Spec.NetworkID)
} else if d.Cluster.Spec.NetworkCIDR != "" {
if d.Cluster.Spec.Networking.NetworkID != "" {
droplet.VPCUUID = fi.PtrTo(d.Cluster.Spec.Networking.NetworkID)
} else if d.Cluster.Spec.Networking.NetworkCIDR != "" {
// since networkCIDR specified as part of the request, it is made sure that vpc with this cidr exist before
// creating the droplet, so you can associate with vpc uuid for this droplet.
vpcName := "vpc-" + clusterName
droplet.VPCName = fi.PtrTo(vpcName)
droplet.NetworkCIDR = fi.PtrTo(d.Cluster.Spec.NetworkCIDR)
droplet.NetworkCIDR = fi.PtrTo(d.Cluster.Spec.Networking.NetworkCIDR)
}
userData, err := d.BootstrapScriptBuilder.ResourceNodeUp(c, ig)

View File

@ -33,7 +33,7 @@ var _ fi.ModelBuilder = &NetworkModelBuilder{}
func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
ipRange := b.Cluster.Spec.NetworkCIDR
ipRange := b.Cluster.Spec.Networking.NetworkCIDR
if ipRange == "" {
// no cidr specified, use the default vpc in DO that's always available
return nil
@ -45,7 +45,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
// Create a separate vpc for this cluster.
vpc := &dotasks.VPC{
Name: fi.PtrTo(vpcName),
Region: fi.PtrTo(b.Cluster.Spec.Subnets[0].Region),
Region: fi.PtrTo(b.Cluster.Spec.Networking.Subnets[0].Region),
Lifecycle: b.Lifecycle,
IPRange: fi.PtrTo(ipRange),
}

View File

@ -149,7 +149,7 @@ func createInternalLB(b *APILoadBalancerBuilder, c *fi.ModelBuilderContext) erro
Lifecycle: fi.LifecycleExistsAndWarnIfChanges,
}
// TODO: automatically associate forwarding rule to subnets if no subnets are specified here.
if subnetNotSpecified(sn, b.Cluster.Spec.Subnets) {
if subnetNotSpecified(sn, b.Cluster.Spec.Networking.Subnets) {
c.AddTask(subnet)
}
c.AddTask(&gcetasks.ForwardingRule{

View File

@ -92,7 +92,7 @@ func (b *AutoscalingGroupModelBuilder) buildInstanceTemplate(c *fi.ModelBuilderC
Preemptible: fi.PtrTo(fi.ValueOf(ig.Spec.GCPProvisioningModel) == "SPOT"),
GCPProvisioningModel: ig.Spec.GCPProvisioningModel,
HasExternalIP: fi.PtrTo(b.Cluster.Spec.Topology.ControlPlane == kops.TopologyPublic),
HasExternalIP: fi.PtrTo(b.Cluster.Spec.Networking.Topology.ControlPlane == kops.TopologyPublic),
Scopes: []string{
"compute-rw",

View File

@ -33,10 +33,10 @@ type GCEModelContext struct {
// LinkToNetwork returns the GCE Network object the cluster is located in
func (c *GCEModelContext) LinkToNetwork() (*gcetasks.Network, error) {
if c.Cluster.Spec.NetworkID == "" {
if c.Cluster.Spec.Networking.NetworkID == "" {
return &gcetasks.Network{Name: s(c.SafeTruncatedClusterName())}, nil
}
name, project, err := gce.ParseNameAndProjectFromNetworkID(c.Cluster.Spec.NetworkID)
name, project, err := gce.ParseNameAndProjectFromNetworkID(c.Cluster.Spec.Networking.NetworkID)
if err != nil {
return nil, err
}
@ -133,11 +133,11 @@ func (c *GCEModelContext) NameForFirewallRule(id string) string {
}
func (c *GCEModelContext) NetworkingIsIPAlias() bool {
return c.Cluster.Spec.Networking != nil && c.Cluster.Spec.Networking.GCE != nil
return c.Cluster.Spec.Networking.GCE != nil
}
func (c *GCEModelContext) NetworkingIsGCERoutes() bool {
return c.Cluster.Spec.Networking != nil && c.Cluster.Spec.Networking.Kubenet != nil
return c.Cluster.Spec.Networking.Kubenet != nil
}
// LinkToServiceAccount returns a link to the GCE ServiceAccount object for VMs in the given role

View File

@ -136,7 +136,7 @@ func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error {
if b.NetworkingIsIPAlias() || b.NetworkingIsGCERoutes() {
// When using IP alias or custom routes, SourceTags for identifying traffic don't work, and we must recognize by CIDR
if b.Cluster.Spec.PodCIDR == "" {
if b.Cluster.Spec.Networking.PodCIDR == "" {
return fmt.Errorf("expected PodCIDR to be set for IPAlias / kubenet")
}
@ -148,7 +148,7 @@ func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error {
Name: s(b.NameForFirewallRule("pod-cidrs-to-node")),
Lifecycle: b.Lifecycle,
Network: network,
SourceRanges: []string{b.Cluster.Spec.PodCIDR},
SourceRanges: []string{b.Cluster.Spec.Networking.PodCIDR},
TargetTags: []string{b.GCETagForRole(kops.InstanceGroupRoleNode)},
Allowed: allProtocols,
})

View File

@ -34,7 +34,7 @@ type NetworkModelBuilder struct {
var _ fi.ModelBuilder = &NetworkModelBuilder{}
func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
sharedNetwork := b.Cluster.Spec.NetworkID != ""
sharedNetwork := b.Cluster.Spec.Networking.NetworkID != ""
network, err := b.LinkToNetwork()
if err != nil {
@ -51,8 +51,8 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
}
c.AddTask(network)
for i := range b.Cluster.Spec.Subnets {
subnet := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnet := &b.Cluster.Spec.Networking.Subnets[i]
sharedSubnet := subnet.ID != ""
@ -80,8 +80,8 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
// All the CIDRs must be valid RFC1918 IP addresses, which makes conversion from the "pure kubenet" 100.64.0.0 GCE range difficult
t.CIDR = s(subnet.CIDR)
t.SecondaryIpRanges[b.NameForIPAliasRange("pods")] = b.Cluster.Spec.PodCIDR
t.SecondaryIpRanges[b.NameForIPAliasRange("services")] = b.Cluster.Spec.ServiceClusterIPRange
t.SecondaryIpRanges[b.NameForIPAliasRange("pods")] = b.Cluster.Spec.Networking.PodCIDR
t.SecondaryIpRanges[b.NameForIPAliasRange("services")] = b.Cluster.Spec.Networking.ServiceClusterIPRange
}
c.AddTask(t)
@ -96,8 +96,8 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
var subnetworks []*gcetasks.Subnet
for i := range b.Cluster.Spec.Subnets {
subnet := &b.Cluster.Spec.Subnets[i]
for i := range b.Cluster.Spec.Networking.Subnets {
subnet := &b.Cluster.Spec.Networking.Subnets[i]
// Only need to deal with private subnets
if subnet.Type != kops.SubnetTypeDualStack && subnet.Type != kops.SubnetTypePrivate {
continue

View File

@ -36,17 +36,17 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
Lifecycle: b.Lifecycle,
}
if b.Cluster.Spec.NetworkID == "" {
network.IPRange = b.Cluster.Spec.NetworkCIDR
if b.Cluster.Spec.Networking.NetworkID == "" {
network.IPRange = b.Cluster.Spec.Networking.NetworkCIDR
network.Region = b.Region
network.Subnets = []string{
b.Cluster.Spec.NetworkCIDR,
b.Cluster.Spec.Networking.NetworkCIDR,
}
network.Labels = map[string]string{
hetzner.TagKubernetesClusterName: b.ClusterName(),
}
} else {
network.ID = fi.PtrTo(b.Cluster.Spec.NetworkID)
network.ID = fi.PtrTo(b.Cluster.Spec.Networking.NetworkID)
}
c.AddTask(network)

View File

@ -365,15 +365,15 @@ func (r *NodeRoleAPIServer) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) {
addECRPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
if b.Cluster.Spec.Networking.AmazonVPC != nil {
addAmazonVPCCNIPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Cilium != nil && b.Cluster.Spec.Networking.Cilium.IPAM == kops.CiliumIpamEni {
if b.Cluster.Spec.Networking.Cilium != nil && b.Cluster.Spec.Networking.Cilium.IPAM == kops.CiliumIpamEni {
addCiliumEniPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Calico != nil && b.Cluster.Spec.Networking.Calico.AWSSrcDstCheck != "DoNothing" && !b.Cluster.Spec.IsIPv6Only() {
if b.Cluster.Spec.Networking.Calico != nil && b.Cluster.Spec.Networking.Calico.AWSSrcDstCheck != "DoNothing" && !b.Cluster.Spec.IsIPv6Only() {
addCalicoSrcDstCheckPermissions(p)
}
@ -444,15 +444,15 @@ func (r *NodeRoleMaster) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) {
addECRPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
if b.Cluster.Spec.Networking.AmazonVPC != nil {
addAmazonVPCCNIPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Cilium != nil && b.Cluster.Spec.Networking.Cilium.IPAM == kops.CiliumIpamEni {
if b.Cluster.Spec.Networking.Cilium != nil && b.Cluster.Spec.Networking.Cilium.IPAM == kops.CiliumIpamEni {
addCiliumEniPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Calico != nil && b.Cluster.Spec.Networking.Calico.AWSSrcDstCheck != "DoNothing" && !b.Cluster.Spec.IsIPv6Only() {
if b.Cluster.Spec.Networking.Calico != nil && b.Cluster.Spec.Networking.Calico.AWSSrcDstCheck != "DoNothing" && !b.Cluster.Spec.IsIPv6Only() {
addCalicoSrcDstCheckPermissions(p)
}
@ -474,11 +474,11 @@ func (r *NodeRoleNode) BuildAWSPolicy(b *PolicyBuilder) (*Policy, error) {
addECRPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.AmazonVPC != nil {
if b.Cluster.Spec.Networking.AmazonVPC != nil {
addAmazonVPCCNIPermissions(p)
}
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Calico != nil && b.Cluster.Spec.Networking.Calico.AWSSrcDstCheck != "DoNothing" && !b.Cluster.Spec.IsIPv6Only() {
if b.Cluster.Spec.Networking.Calico != nil && b.Cluster.Spec.Networking.Calico.AWSSrcDstCheck != "DoNothing" && !b.Cluster.Spec.IsIPv6Only() {
addCalicoSrcDstCheckPermissions(p)
}
@ -712,19 +712,17 @@ func ReadableStatePaths(cluster *kops.Cluster, role Subject) ([]string, error) {
paths = append(paths, "/pki/private/kubelet/*")
}
networkingSpec := cluster.Spec.Networking
networkingSpec := &cluster.Spec.Networking
if networkingSpec != nil {
// @check if kuberoute is enabled and permit access to the private key
if networkingSpec.KubeRouter != nil {
paths = append(paths, "/pki/private/kube-router/*")
}
// @check if kuberoute is enabled and permit access to the private key
if networkingSpec.KubeRouter != nil {
paths = append(paths, "/pki/private/kube-router/*")
}
// @check if cilium is enabled as the CNI provider and permit access to the cilium etc client TLS certificate by default
// As long as the Cilium Etcd cluster exists, we should do this
if networkingSpec.Cilium != nil && model.UseCiliumEtcd(cluster) {
paths = append(paths, "/pki/private/etcd-client-cilium/*")
}
// @check if cilium is enabled as the CNI provider and permit access to the cilium etc client TLS certificate by default
// As long as the Cilium Etcd cluster exists, we should do this
if networkingSpec.Cilium != nil && model.UseCiliumEtcd(cluster) {
paths = append(paths, "/pki/private/etcd-client-cilium/*")
}
}
}

View File

@ -171,7 +171,7 @@ func TestPolicyGeneration(t *testing.T) {
Enabled: fi.PtrTo(true),
},
},
Networking: &kops.NetworkingSpec{
Networking: kops.NetworkingSpec{
Kubenet: &kops.KubenetNetworkingSpec{},
},
},

View File

@ -57,7 +57,7 @@ func (c *OpenstackModelContext) UseVIPACL() bool {
}
func (c *OpenstackModelContext) GetNetworkName() (string, error) {
if c.Cluster.Spec.NetworkID == "" {
if c.Cluster.Spec.Networking.NetworkID == "" {
return c.ClusterName(), nil
}
@ -66,7 +66,7 @@ func (c *OpenstackModelContext) GetNetworkName() (string, error) {
return "", err
}
network, err := osCloud.GetNetwork(c.Cluster.Spec.NetworkID)
network, err := osCloud.GetNetwork(c.Cluster.Spec.Networking.NetworkID)
if err != nil {
return "", err
}
@ -74,7 +74,7 @@ func (c *OpenstackModelContext) GetNetworkName() (string, error) {
}
func (c *OpenstackModelContext) findSubnetClusterSpec(subnet string) (string, error) {
for _, sp := range c.Cluster.Spec.Subnets {
for _, sp := range c.Cluster.Spec.Networking.Subnets {
if sp.Name == subnet {
name, err := c.findSubnetNameByID(sp.ID, sp.Name)
if err != nil {

View File

@ -299,7 +299,7 @@ func (b *FirewallModelBuilder) addHTTPSRules(c *fi.ModelBuilderContext, sgMap ma
EtherType: s(IPV4),
PortRangeMin: i(443),
PortRangeMax: i(443),
RemoteIPPrefix: s(b.Cluster.Spec.NetworkCIDR),
RemoteIPPrefix: s(b.Cluster.Spec.Networking.NetworkCIDR),
})
}
}
@ -412,43 +412,41 @@ func (b *FirewallModelBuilder) addCNIRules(c *fi.ModelBuilderContext, sgMap map[
// allow cadvisor
tcpPorts = append(tcpPorts, 4194)
if b.Cluster.Spec.Networking != nil {
if b.Cluster.Spec.Networking.Kopeio != nil {
// VXLAN over UDP
// https://tools.ietf.org/html/rfc7348
udpPorts = append(udpPorts, 4789)
}
if b.Cluster.Spec.Networking.Kopeio != nil {
// VXLAN over UDP
// https://tools.ietf.org/html/rfc7348
udpPorts = append(udpPorts, 4789)
}
if b.Cluster.Spec.Networking.Cilium != nil {
if b.Cluster.Spec.Networking.Cilium != nil {
udpPorts = append(udpPorts, 8472)
tcpPorts = append(tcpPorts, 4240)
}
if b.Cluster.Spec.Networking.Weave != nil {
udpPorts = append(udpPorts, 6783)
tcpPorts = append(tcpPorts, 6783)
udpPorts = append(udpPorts, 6784)
}
if b.Cluster.Spec.Networking.Flannel != nil {
switch b.Cluster.Spec.Networking.Flannel.Backend {
case "", "udp":
udpPorts = append(udpPorts, 8285)
case "vxlan":
udpPorts = append(udpPorts, 8472)
tcpPorts = append(tcpPorts, 4240)
default:
klog.Warningf("unknown flannel networking backend %q", b.Cluster.Spec.Networking.Flannel.Backend)
}
}
if b.Cluster.Spec.Networking.Weave != nil {
udpPorts = append(udpPorts, 6783)
tcpPorts = append(tcpPorts, 6783)
udpPorts = append(udpPorts, 6784)
}
if b.Cluster.Spec.Networking.Calico != nil {
tcpPorts = append(tcpPorts, 179)
protocols = append(protocols, ProtocolIPEncap)
}
if b.Cluster.Spec.Networking.Flannel != nil {
switch b.Cluster.Spec.Networking.Flannel.Backend {
case "", "udp":
udpPorts = append(udpPorts, 8285)
case "vxlan":
udpPorts = append(udpPorts, 8472)
default:
klog.Warningf("unknown flannel networking backend %q", b.Cluster.Spec.Networking.Flannel.Backend)
}
}
if b.Cluster.Spec.Networking.Calico != nil {
tcpPorts = append(tcpPorts, 179)
protocols = append(protocols, ProtocolIPEncap)
}
if b.Cluster.Spec.Networking.KubeRouter != nil {
protocols = append(protocols, ProtocolIPEncap)
}
if b.Cluster.Spec.Networking.KubeRouter != nil {
protocols = append(protocols, ProtocolIPEncap)
}
masterName := b.SecurityGroupName(kops.InstanceGroupRoleControlPlane)

View File

@ -43,7 +43,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
{
t := &openstacktasks.Network{
Name: s(netName),
ID: s(b.Cluster.Spec.NetworkID),
ID: s(b.Cluster.Spec.Networking.NetworkID),
Tag: s(clusterName),
Lifecycle: b.Lifecycle,
}
@ -59,7 +59,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
needRouter = false
}
routerName := strings.Replace(clusterName, ".", "-", -1)
for _, sp := range b.Cluster.Spec.Subnets {
for _, sp := range b.Cluster.Spec.Networking.Subnets {
// assumes that we do not need to create routers if we use existing subnets
if sp.ID != "" {
needRouter = false

View File

@ -175,7 +175,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg *
Name: instanceName,
Lifecycle: b.Lifecycle,
GroupName: s(groupName),
Region: fi.PtrTo(b.Cluster.Spec.Subnets[0].Region),
Region: fi.PtrTo(b.Cluster.Spec.Networking.Subnets[0].Region),
Flavor: fi.PtrTo(ig.Spec.MachineType),
Image: fi.PtrTo(ig.Spec.Image),
SSHKey: fi.PtrTo(sshKeyName),
@ -206,7 +206,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg *
instanceTask.FloatingIP = t
case kops.InstanceGroupRoleControlPlane:
if b.Cluster.Spec.Topology == nil || b.Cluster.Spec.Topology.ControlPlane != kops.TopologyPrivate {
if b.Cluster.Spec.Networking.Topology == nil || b.Cluster.Spec.Networking.Topology.ControlPlane != kops.TopologyPrivate {
t := &openstacktasks.FloatingIP{
Name: fi.PtrTo(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)),
Lifecycle: b.Lifecycle,
@ -216,7 +216,7 @@ func (b *ServerGroupModelBuilder) buildInstances(c *fi.ModelBuilderContext, sg *
instanceTask.FloatingIP = t
}
default:
if b.Cluster.Spec.Topology == nil || b.Cluster.Spec.Topology.Nodes != kops.TopologyPrivate {
if b.Cluster.Spec.Networking.Topology == nil || b.Cluster.Spec.Networking.Topology.Nodes != kops.TopologyPrivate {
t := &openstacktasks.FloatingIP{
Name: fi.PtrTo(fmt.Sprintf("%s-%s", "fip", *instanceTask.Name)),
Lifecycle: b.Lifecycle,
@ -272,7 +272,7 @@ func (b *ServerGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Cluster.Spec.CloudProvider.Openstack.Loadbalancer != nil {
var lbSubnetName string
var err error
for _, sp := range b.Cluster.Spec.Subnets {
for _, sp := range b.Cluster.Spec.Networking.Subnets {
if sp.Type == kops.SubnetTypeDualStack || sp.Type == kops.SubnetTypePrivate {
lbSubnetName, err = b.findSubnetNameByID(sp.ID, sp.Name)
if err != nil {

View File

@ -63,10 +63,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -123,19 +125,21 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
{
Name: "utility-subnet",
Region: "region",
},
},
{
Name: "utility-subnet",
Region: "region",
Topology: &kops.TopologySpec{
Nodes: "private",
},
},
Topology: &kops.TopologySpec{
Nodes: "private",
},
},
},
instanceGroups: []*kops.InstanceGroup{
@ -211,18 +215,20 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
},
{
Name: "subnet-b",
Region: "region",
},
{
Name: "subnet-c",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
},
{
Name: "subnet-b",
Region: "region",
},
{
Name: "subnet-c",
Region: "region",
},
},
},
},
@ -335,10 +341,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -395,25 +403,27 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
},
Topology: &kops.TopologySpec{
ControlPlane: kops.TopologyPrivate,
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
Type: kops.SubnetTypePrivate,
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
Type: kops.SubnetTypePrivate,
},
{
Name: "subnet-b",
Region: "region",
Type: kops.SubnetTypePrivate,
},
{
Name: "subnet-c",
Region: "region",
Type: kops.SubnetTypePrivate,
},
},
{
Name: "subnet-b",
Region: "region",
Type: kops.SubnetTypePrivate,
},
{
Name: "subnet-c",
Region: "region",
Type: kops.SubnetTypePrivate,
Topology: &kops.TopologySpec{
ControlPlane: kops.TopologyPrivate,
},
},
},
@ -523,18 +533,20 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
},
{
Name: "subnet-b",
Region: "region",
},
{
Name: "subnet-c",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
},
{
Name: "subnet-b",
Region: "region",
},
{
Name: "subnet-c",
Region: "region",
},
},
},
},
@ -647,18 +659,20 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
},
{
Name: "subnet-b",
Region: "region",
},
{
Name: "subnet-c",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet-a",
Region: "region",
},
{
Name: "subnet-b",
Region: "region",
},
{
Name: "subnet-c",
Region: "region",
},
},
},
},
@ -731,10 +745,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -793,14 +809,16 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
{
Name: "utility-subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
{
Name: "utility-subnet",
Region: "region",
},
},
},
},
@ -878,10 +896,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -924,10 +944,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -972,10 +994,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -1018,10 +1042,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
CloudLabels: map[string]string{
@ -1064,10 +1090,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -1110,10 +1138,12 @@ func getServerGroupModelBuilderTestInput() []serverGroupModelBuilderTestInput {
},
},
KubernetesVersion: "1.24.0",
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
Networking: kops.NetworkingSpec{
Subnets: []kops.ClusterSubnetSpec{
{
Name: "subnet",
Region: "region",
},
},
},
},
@ -1181,9 +1211,6 @@ func RunGoldenTest(t *testing.T, basedir string, testCase serverGroupModelBuilde
testutils.SetupMockOpenstack()
clusterLifecycle := fi.LifecycleSync
if testCase.cluster.Spec.Networking == nil {
testCase.cluster.Spec.Networking = &kops.NetworkingSpec{}
}
bootstrapScriptBuilder := &model.BootstrapScriptBuilder{
KopsModelContext: &model.KopsModelContext{
IAMModelContext: iam.IAMModelContext{Cluster: testCase.cluster},

View File

@ -29,7 +29,7 @@ func BuildMinimalCluster(clusterName string) *kops.Cluster {
c := &kops.Cluster{}
c.ObjectMeta.Name = clusterName
c.Spec.KubernetesVersion = "1.23.2"
c.Spec.Subnets = []kops.ClusterSubnetSpec{
c.Spec.Networking.Subnets = []kops.ClusterSubnetSpec{
{Name: "subnet-us-test-1a", Zone: "us-test-1a", CIDR: "172.20.1.0/24", Type: kops.SubnetTypePrivate},
}
@ -41,22 +41,20 @@ func BuildMinimalCluster(clusterName string) *kops.Cluster {
c.Spec.SSHAccess = []string{"0.0.0.0/0"}
// Default to public topology
c.Spec.Topology = &kops.TopologySpec{
c.Spec.Networking.Topology = &kops.TopologySpec{
ControlPlane: kops.TopologyPublic,
Nodes: kops.TopologyPublic,
DNS: kops.DNSTypePublic,
}
c.Spec.Networking = &kops.NetworkingSpec{}
c.Spec.NetworkCIDR = "172.20.0.0/16"
c.Spec.Subnets = []kops.ClusterSubnetSpec{
c.Spec.Networking.NetworkCIDR = "172.20.0.0/16"
c.Spec.Networking.Subnets = []kops.ClusterSubnetSpec{
{Name: "subnet-us-test-1a", Zone: "us-test-1a", CIDR: "172.20.1.0/24", Type: kops.SubnetTypePublic},
{Name: "subnet-us-test-1b", Zone: "us-test-1b", CIDR: "172.20.2.0/24", Type: kops.SubnetTypePublic},
{Name: "subnet-us-test-1c", Zone: "us-test-1c", CIDR: "172.20.3.0/24", Type: kops.SubnetTypePublic},
}
c.Spec.NonMasqueradeCIDR = "100.64.0.0/10"
c.Spec.Networking.NonMasqueradeCIDR = "100.64.0.0/10"
c.Spec.CloudProvider.AWS = &kops.AWSSpec{}
c.Spec.ConfigBase = "memfs://unittest-bucket/" + clusterName
@ -72,7 +70,7 @@ func BuildMinimalCluster(clusterName string) *kops.Cluster {
func addEtcdClusters(c *kops.Cluster) {
subnetNames := sets.NewString()
for _, z := range c.Spec.Subnets {
for _, z := range c.Spec.Networking.Subnets {
subnetNames.Insert(z.Name)
}
etcdZones := subnetNames.List()

View File

@ -118,7 +118,7 @@ func CreateAddons(channel *kops.Channel, kubernetesVersion *semver.Version, clus
return addons, nil
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Kopeio != nil {
if cluster.Spec.Networking.Kopeio != nil {
// TODO: Check that we haven't manually loaded a kopeio-networking operator
// TODO: Check that we haven't manually created a kopeio-networking CRD

View File

@ -43,6 +43,7 @@ spec:
cloudProvider: {}
kubeControllerManager: {}
kubelet: {}
networking: {}
`
if yamlString != expected {
diffString := diff.FormatDiff(expected, yamlString)

View File

@ -52,7 +52,7 @@ func (t *Tester) setSkipRegexFlag() error {
// https://github.com/kubernetes/kubernetes/issues/113964
skipRegex += "|LoadBalancers.should.be.able.to.preserve.UDP.traffic"
networking := cluster.Spec.Networking
networking := cluster.Spec.LegacyNetworking
switch {
case networking.Kubenet != nil, networking.Canal != nil, networking.Weave != nil, networking.Cilium != nil:
skipRegex += "|Services.*rejected.*endpoints"

Some files were not shown because too many files have changed in this diff Show More