mirror of https://github.com/kubernetes/kops.git
Replace convenience functions with fi.* alternatives
This commit is contained in:
parent
8f0aa33131
commit
bd7176f45f
|
@ -187,7 +187,7 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles
|
|||
Members: []kops.EtcdMemberSpec{
|
||||
{
|
||||
Name: "test",
|
||||
InstanceGroup: s("ig-1"),
|
||||
InstanceGroup: fi.String("ig-1"),
|
||||
},
|
||||
},
|
||||
Version: "3.1.11",
|
||||
|
@ -197,7 +197,7 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles
|
|||
Members: []kops.EtcdMemberSpec{
|
||||
{
|
||||
Name: "test",
|
||||
InstanceGroup: s("ig-1"),
|
||||
InstanceGroup: fi.String("ig-1"),
|
||||
},
|
||||
},
|
||||
Version: "3.1.11",
|
||||
|
@ -206,14 +206,14 @@ func makeTestCluster(hookSpecRoles []kops.InstanceGroupRole, fileAssetSpecRoles
|
|||
},
|
||||
NetworkCIDR: "10.79.0.0/24",
|
||||
CloudConfig: &kops.CloudConfiguration{
|
||||
NodeTags: s("something"),
|
||||
NodeTags: fi.String("something"),
|
||||
},
|
||||
ContainerRuntime: "docker",
|
||||
Containerd: &kops.ContainerdConfig{
|
||||
LogLevel: s("info"),
|
||||
LogLevel: fi.String("info"),
|
||||
},
|
||||
Docker: &kops.DockerConfig{
|
||||
LogLevel: s("INFO"),
|
||||
LogLevel: fi.String("INFO"),
|
||||
},
|
||||
KubeAPIServer: &kops.KubeAPIServerConfig{
|
||||
Image: "CoreOS",
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
)
|
||||
|
||||
// s is a helper that builds a *string from a string value
|
||||
func s(v string) *string {
|
||||
return fi.String(v)
|
||||
}
|
||||
|
||||
// i64 is a helper that builds a *int64 from an int64 value
|
||||
func i64(v int64) *int64 {
|
||||
return fi.Int64(v)
|
||||
}
|
|
@ -78,11 +78,11 @@ func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) ([]Secu
|
|||
// Allow full egress
|
||||
{
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s("node-egress" + src.Suffix),
|
||||
Name: fi.String("node-egress" + src.Suffix),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: src.Task,
|
||||
Egress: fi.Bool(true),
|
||||
CIDR: s("0.0.0.0/0"),
|
||||
CIDR: fi.String("0.0.0.0/0"),
|
||||
}
|
||||
b.AddDirectionalGroupRule(c, t)
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) ([]Secu
|
|||
suffix := JoinSuffixes(src, dest)
|
||||
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s("all-node-to-node" + suffix),
|
||||
Name: fi.String("all-node-to-node" + suffix),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: dest.Task,
|
||||
SourceGroup: src.Task,
|
||||
|
@ -159,25 +159,25 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
|
|||
|
||||
for _, r := range udpRanges {
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s(fmt.Sprintf("node-to-master-udp-%d-%d%s", r.From, r.To, suffix)),
|
||||
Name: fi.String(fmt.Sprintf("node-to-master-udp-%d-%d%s", r.From, r.To, suffix)),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: masterGroup.Task,
|
||||
SourceGroup: nodeGroup.Task,
|
||||
FromPort: i64(int64(r.From)),
|
||||
ToPort: i64(int64(r.To)),
|
||||
Protocol: s("udp"),
|
||||
FromPort: fi.Int64(int64(r.From)),
|
||||
ToPort: fi.Int64(int64(r.To)),
|
||||
Protocol: fi.String("udp"),
|
||||
}
|
||||
b.AddDirectionalGroupRule(c, t)
|
||||
}
|
||||
for _, r := range tcpRanges {
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s(fmt.Sprintf("node-to-master-tcp-%d-%d%s", r.From, r.To, suffix)),
|
||||
Name: fi.String(fmt.Sprintf("node-to-master-tcp-%d-%d%s", r.From, r.To, suffix)),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: masterGroup.Task,
|
||||
SourceGroup: nodeGroup.Task,
|
||||
FromPort: i64(int64(r.From)),
|
||||
ToPort: i64(int64(r.To)),
|
||||
Protocol: s("tcp"),
|
||||
FromPort: fi.Int64(int64(r.From)),
|
||||
ToPort: fi.Int64(int64(r.To)),
|
||||
Protocol: fi.String("tcp"),
|
||||
}
|
||||
b.AddDirectionalGroupRule(c, t)
|
||||
}
|
||||
|
@ -192,11 +192,11 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
|
|||
}
|
||||
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s(fmt.Sprintf("node-to-master-protocol-%s%s", name, suffix)),
|
||||
Name: fi.String(fmt.Sprintf("node-to-master-protocol-%s%s", name, suffix)),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: masterGroup.Task,
|
||||
SourceGroup: nodeGroup.Task,
|
||||
Protocol: s(awsName),
|
||||
Protocol: fi.String(awsName),
|
||||
}
|
||||
b.AddDirectionalGroupRule(c, t)
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
|
|||
suffix := JoinSuffixes(src, dest)
|
||||
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s("all-nodes-to-master" + suffix),
|
||||
Name: fi.String("all-nodes-to-master" + suffix),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: dest.Task,
|
||||
SourceGroup: src.Task,
|
||||
|
@ -238,11 +238,11 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext, nodeG
|
|||
// Allow full egress
|
||||
{
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s("master-egress" + src.Suffix),
|
||||
Name: fi.String("master-egress" + src.Suffix),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: src.Task,
|
||||
Egress: fi.Bool(true),
|
||||
CIDR: s("0.0.0.0/0"),
|
||||
CIDR: fi.String("0.0.0.0/0"),
|
||||
}
|
||||
b.AddDirectionalGroupRule(c, t)
|
||||
}
|
||||
|
@ -252,7 +252,7 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext, nodeG
|
|||
suffix := JoinSuffixes(src, dest)
|
||||
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s("all-master-to-master" + suffix),
|
||||
Name: fi.String("all-master-to-master" + suffix),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: dest.Task,
|
||||
SourceGroup: src.Task,
|
||||
|
@ -265,7 +265,7 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext, nodeG
|
|||
suffix := JoinSuffixes(src, dest)
|
||||
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s("all-master-to-node" + suffix),
|
||||
Name: fi.String("all-master-to-node" + suffix),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SecurityGroup: dest.Task,
|
||||
SourceGroup: src.Task,
|
||||
|
@ -288,9 +288,9 @@ func (b *KopsModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Sec
|
|||
if role == kops.InstanceGroupRoleMaster {
|
||||
name := b.SecurityGroupName(role)
|
||||
baseGroup = &awstasks.SecurityGroup{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
VPC: b.LinkToVPC(),
|
||||
Description: s("Security group for masters"),
|
||||
Description: fi.String("Security group for masters"),
|
||||
RemoveExtraRules: []string{
|
||||
"port=22", // SSH
|
||||
"port=443", // k8s api
|
||||
|
@ -310,18 +310,18 @@ func (b *KopsModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Sec
|
|||
} else if role == kops.InstanceGroupRoleNode {
|
||||
name := b.SecurityGroupName(role)
|
||||
baseGroup = &awstasks.SecurityGroup{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
VPC: b.LinkToVPC(),
|
||||
Description: s("Security group for nodes"),
|
||||
Description: fi.String("Security group for nodes"),
|
||||
RemoveExtraRules: []string{"port=22"},
|
||||
}
|
||||
baseGroup.Tags = b.CloudTags(name, false)
|
||||
} else if role == kops.InstanceGroupRoleBastion {
|
||||
name := b.SecurityGroupName(role)
|
||||
baseGroup = &awstasks.SecurityGroup{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
VPC: b.LinkToVPC(),
|
||||
Description: s("Security group for bastion"),
|
||||
Description: fi.String("Security group for bastion"),
|
||||
RemoveExtraRules: []string{"port=22"},
|
||||
}
|
||||
baseGroup.Tags = b.CloudTags(name, false)
|
||||
|
|
|
@ -155,7 +155,7 @@ func (b *IAMModelBuilder) buildIAMRole(role iam.Subject, iamName string, c *fi.M
|
|||
}
|
||||
|
||||
iamRole := &awstasks.IAMRole{
|
||||
Name: s(iamName),
|
||||
Name: fi.String(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
RolePolicyDocument: rolePolicy,
|
||||
|
@ -164,10 +164,10 @@ func (b *IAMModelBuilder) buildIAMRole(role iam.Subject, iamName string, c *fi.M
|
|||
|
||||
if isServiceAccount {
|
||||
// e.g. kube-system-dns-controller
|
||||
iamRole.ExportWithID = s(roleKey)
|
||||
iamRole.ExportWithID = fi.String(roleKey)
|
||||
} else {
|
||||
// e.g. nodes
|
||||
iamRole.ExportWithID = s(roleKey + "s")
|
||||
iamRole.ExportWithID = fi.String(roleKey + "s")
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.IAM != nil && b.Cluster.Spec.IAM.PermissionsBoundary != nil {
|
||||
|
@ -199,7 +199,7 @@ func (b *IAMModelBuilder) buildIAMRolePolicy(role iam.Subject, iamName string, i
|
|||
}
|
||||
|
||||
t := &awstasks.IAMRolePolicy{
|
||||
Name: s(iamName),
|
||||
Name: fi.String(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
Role: iamRole,
|
||||
|
@ -252,7 +252,7 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi.
|
|||
var iamInstanceProfile *awstasks.IAMInstanceProfile
|
||||
{
|
||||
iamInstanceProfile = &awstasks.IAMInstanceProfile{
|
||||
Name: s(iamName),
|
||||
Name: fi.String(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Shared: fi.Bool(shared),
|
||||
Tags: b.CloudTags(iamName, false),
|
||||
|
@ -262,7 +262,7 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi.
|
|||
|
||||
{
|
||||
iamInstanceProfileRole := &awstasks.IAMInstanceProfileRole{
|
||||
Name: s(iamName),
|
||||
Name: fi.String(iamName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
InstanceProfile: iamInstanceProfile,
|
||||
|
@ -283,7 +283,7 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi.
|
|||
|
||||
name := fmt.Sprintf("%s-policyoverride", roleKey)
|
||||
t := &awstasks.IAMRolePolicy{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Role: iamRole,
|
||||
Managed: true,
|
||||
|
@ -305,7 +305,7 @@ func (b *IAMModelBuilder) buildIAMTasks(role iam.Subject, iamName string, c *fi.
|
|||
additionalPolicyName := "additional." + iamName
|
||||
|
||||
t := &awstasks.IAMRolePolicy{
|
||||
Name: s(additionalPolicyName),
|
||||
Name: fi.String(additionalPolicyName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
Role: iamRole,
|
||||
|
|
|
@ -183,19 +183,19 @@ func (b *MasterVolumeBuilder) addAWSVolume(c *fi.ModelBuilderContext, name strin
|
|||
encrypted := fi.BoolValue(m.EncryptedVolume)
|
||||
|
||||
t := &awstasks.EBSVolume{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
AvailabilityZone: s(zone),
|
||||
AvailabilityZone: fi.String(zone),
|
||||
SizeGB: fi.Int64(int64(volumeSize)),
|
||||
VolumeType: s(volumeType),
|
||||
VolumeType: fi.String(volumeType),
|
||||
KmsKeyId: m.KmsKeyId,
|
||||
Encrypted: fi.Bool(encrypted),
|
||||
Tags: tags,
|
||||
}
|
||||
switch volumeType {
|
||||
case ec2.VolumeTypeGp3:
|
||||
t.VolumeThroughput = i64(int64(volumeThroughput))
|
||||
t.VolumeThroughput = fi.Int64(int64(volumeThroughput))
|
||||
fallthrough
|
||||
case ec2.VolumeTypeIo1, ec2.VolumeTypeIo2:
|
||||
t.VolumeIops = fi.Int64(int64(volumeIops))
|
||||
|
@ -223,10 +223,10 @@ func (b *MasterVolumeBuilder) addDOVolume(c *fi.ModelBuilderContext, name string
|
|||
tags[do.TagKubernetesClusterNamePrefix] = do.SafeClusterName(b.Cluster.ObjectMeta.Name)
|
||||
|
||||
t := &dotasks.Volume{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
Lifecycle: b.Lifecycle,
|
||||
SizeGB: fi.Int64(int64(volumeSize)),
|
||||
Region: s(zone),
|
||||
Region: fi.String(zone),
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
|
@ -268,12 +268,12 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, name strin
|
|||
}
|
||||
|
||||
t := &gcetasks.Disk{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
Zone: s(zone),
|
||||
Zone: fi.String(zone),
|
||||
SizeGB: fi.Int64(int64(volumeSize)),
|
||||
VolumeType: s(volumeType),
|
||||
VolumeType: fi.String(volumeType),
|
||||
Labels: tags,
|
||||
}
|
||||
|
||||
|
@ -299,9 +299,9 @@ func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.ModelBuilderContext, name
|
|||
zone = fi.StringValue(b.Cluster.Spec.CloudConfig.Openstack.BlockStorage.OverrideAZ)
|
||||
}
|
||||
t := &openstacktasks.Volume{
|
||||
Name: s(name),
|
||||
AvailabilityZone: s(zone),
|
||||
VolumeType: s(volumeType),
|
||||
Name: fi.String(name),
|
||||
AvailabilityZone: fi.String(zone),
|
||||
VolumeType: fi.String(volumeType),
|
||||
SizeGB: fi.Int64(int64(volumeSize)),
|
||||
Tags: tags,
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
@ -338,10 +338,10 @@ func (b *MasterVolumeBuilder) addALIVolume(c *fi.ModelBuilderContext, name strin
|
|||
|
||||
t := &alitasks.Disk{
|
||||
Lifecycle: b.Lifecycle,
|
||||
Name: s(name),
|
||||
ZoneId: s(zone),
|
||||
Name: fi.String(name),
|
||||
ZoneId: fi.String(zone),
|
||||
SizeGB: fi.Int(int(volumeSize)),
|
||||
DiskCategory: s(volumeType),
|
||||
DiskCategory: fi.String(volumeType),
|
||||
Encrypted: fi.Bool(encrypted),
|
||||
Tags: tags,
|
||||
}
|
||||
|
|
|
@ -259,7 +259,7 @@ func (b *KopsModelContext) NamePrivateRouteTableInZone(zoneName string) string {
|
|||
}
|
||||
|
||||
func (b *KopsModelContext) LinkToPrivateRouteTableInZone(zoneName string) *awstasks.RouteTable {
|
||||
return &awstasks.RouteTable{Name: s(b.NamePrivateRouteTableInZone(zoneName))}
|
||||
return &awstasks.RouteTable{Name: fi.String(b.NamePrivateRouteTableInZone(zoneName))}
|
||||
}
|
||||
|
||||
func (b *KopsModelContext) InstanceName(ig *kops.InstanceGroup, suffix string) string {
|
||||
|
|
|
@ -57,7 +57,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
vpcTags = nil
|
||||
}
|
||||
t := &awstasks.VPC{
|
||||
Name: s(vpcName),
|
||||
Name: fi.String(vpcName),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Shared: fi.Bool(sharedVPC),
|
||||
EnableDNSSupport: fi.Bool(true),
|
||||
|
@ -77,11 +77,11 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
if b.Cluster.Spec.NetworkID != "" {
|
||||
t.ID = s(b.Cluster.Spec.NetworkID)
|
||||
t.ID = fi.String(b.Cluster.Spec.NetworkID)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.NetworkCIDR != "" {
|
||||
t.CIDR = s(b.Cluster.Spec.NetworkCIDR)
|
||||
t.CIDR = fi.String(b.Cluster.Spec.NetworkCIDR)
|
||||
}
|
||||
|
||||
c.AddTask(t)
|
||||
|
@ -90,11 +90,11 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
if !sharedVPC {
|
||||
for _, cidr := range b.Cluster.Spec.AdditionalNetworkCIDRs {
|
||||
c.AddTask(&awstasks.VPCCIDRBlock{
|
||||
Name: s(cidr),
|
||||
Name: fi.String(cidr),
|
||||
Lifecycle: b.Lifecycle,
|
||||
VPC: b.LinkToVPC(),
|
||||
Shared: fi.Bool(sharedVPC),
|
||||
CIDRBlock: s(cidr),
|
||||
CIDRBlock: fi.String(cidr),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -102,22 +102,22 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
// TODO: would be good to create these as shared, to verify them
|
||||
if !sharedVPC {
|
||||
dhcp := &awstasks.DHCPOptions{
|
||||
Name: s(b.ClusterName()),
|
||||
Name: fi.String(b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
DomainNameServers: s("AmazonProvidedDNS"),
|
||||
DomainNameServers: fi.String("AmazonProvidedDNS"),
|
||||
|
||||
Tags: tags,
|
||||
Shared: fi.Bool(sharedVPC),
|
||||
}
|
||||
if b.Region == "us-east-1" {
|
||||
dhcp.DomainName = s("ec2.internal")
|
||||
dhcp.DomainName = fi.String("ec2.internal")
|
||||
} else {
|
||||
dhcp.DomainName = s(b.Region + ".compute.internal")
|
||||
dhcp.DomainName = fi.String(b.Region + ".compute.internal")
|
||||
}
|
||||
c.AddTask(dhcp)
|
||||
|
||||
c.AddTask(&awstasks.VPCDHCPOptionsAssociation{
|
||||
Name: s(b.ClusterName()),
|
||||
Name: fi.String(b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
VPC: b.LinkToVPC(),
|
||||
DHCPOptions: dhcp,
|
||||
|
@ -150,7 +150,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
if !allSubnetsUnmanaged {
|
||||
// The internet gateway is the main entry point to the cluster.
|
||||
igw := &awstasks.InternetGateway{
|
||||
Name: s(b.ClusterName()),
|
||||
Name: fi.String(b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
VPC: b.LinkToVPC(),
|
||||
Shared: fi.Bool(sharedVPC),
|
||||
|
@ -166,7 +166,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
routeTableTags := b.CloudTags(vpcName, sharedRouteTable)
|
||||
routeTableTags[awsup.TagNameKopsRole] = "public"
|
||||
publicRouteTable = &awstasks.RouteTable{
|
||||
Name: s(b.ClusterName()),
|
||||
Name: fi.String(b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
VPC: b.LinkToVPC(),
|
||||
|
@ -178,9 +178,9 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
// TODO: Validate when allSubnetsShared
|
||||
c.AddTask(&awstasks.Route{
|
||||
Name: s("0.0.0.0/0"),
|
||||
Name: fi.String("0.0.0.0/0"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
CIDR: s("0.0.0.0/0"),
|
||||
CIDR: fi.String("0.0.0.0/0"),
|
||||
RouteTable: publicRouteTable,
|
||||
InternetGateway: igw,
|
||||
})
|
||||
|
@ -216,18 +216,18 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
subnet := &awstasks.Subnet{
|
||||
Name: s(subnetName),
|
||||
ShortName: s(subnetSpec.Name),
|
||||
Name: fi.String(subnetName),
|
||||
ShortName: fi.String(subnetSpec.Name),
|
||||
Lifecycle: b.Lifecycle,
|
||||
VPC: b.LinkToVPC(),
|
||||
AvailabilityZone: s(subnetSpec.Zone),
|
||||
CIDR: s(subnetSpec.CIDR),
|
||||
AvailabilityZone: fi.String(subnetSpec.Zone),
|
||||
CIDR: fi.String(subnetSpec.CIDR),
|
||||
Shared: fi.Bool(sharedSubnet),
|
||||
Tags: tags,
|
||||
}
|
||||
|
||||
if subnetSpec.ProviderID != "" {
|
||||
subnet.ID = s(subnetSpec.ProviderID)
|
||||
subnet.ID = fi.String(subnetSpec.ProviderID)
|
||||
}
|
||||
c.AddTask(subnet)
|
||||
|
||||
|
@ -235,7 +235,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
case kops.SubnetTypePublic, kops.SubnetTypeUtility:
|
||||
if !sharedSubnet && !isUnmanaged(subnetSpec) {
|
||||
c.AddTask(&awstasks.RouteTableAssociation{
|
||||
Name: s(subnetSpec.Name + "." + b.ClusterName()),
|
||||
Name: fi.String(subnetSpec.Name + "." + b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
RouteTable: publicRouteTable,
|
||||
Subnet: subnet,
|
||||
|
@ -250,7 +250,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
//
|
||||
// Map the Private subnet to the Private route table
|
||||
c.AddTask(&awstasks.RouteTableAssociation{
|
||||
Name: s("private-" + subnetSpec.Name + "." + b.ClusterName()),
|
||||
Name: fi.String("private-" + subnetSpec.Name + "." + b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
RouteTable: b.LinkToPrivateRouteTableInZone(subnetSpec.Zone),
|
||||
Subnet: subnet,
|
||||
|
@ -309,10 +309,10 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
if strings.HasPrefix(egress, "nat-") {
|
||||
|
||||
ngw = &awstasks.NatGateway{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
Name: fi.String(zone + "." + b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subnet: utilitySubnet,
|
||||
ID: s(egress),
|
||||
ID: fi.String(egress),
|
||||
AssociatedRouteTable: b.LinkToPrivateRouteTableInZone(zone),
|
||||
// If we're here, it means this NatGateway was specified, so we are Shared
|
||||
Shared: fi.Bool(true),
|
||||
|
@ -324,8 +324,8 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
} else if strings.HasPrefix(egress, "eipalloc-") {
|
||||
|
||||
eip := &awstasks.ElasticIP{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
ID: s(egress),
|
||||
Name: fi.String(zone + "." + b.ClusterName()),
|
||||
ID: fi.String(egress),
|
||||
Lifecycle: b.Lifecycle,
|
||||
AssociatedNatGatewayRouteTable: b.LinkToPrivateRouteTableInZone(zone),
|
||||
Shared: fi.Bool(true),
|
||||
|
@ -334,7 +334,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
c.AddTask(eip)
|
||||
|
||||
ngw = &awstasks.NatGateway{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
Name: fi.String(zone + "." + b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subnet: utilitySubnet,
|
||||
ElasticIP: eip,
|
||||
|
@ -346,9 +346,9 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
} else if strings.HasPrefix(egress, "i-") {
|
||||
|
||||
in = &awstasks.Instance{
|
||||
Name: s(egress),
|
||||
Name: fi.String(egress),
|
||||
Lifecycle: b.Lifecycle,
|
||||
ID: s(egress),
|
||||
ID: fi.String(egress),
|
||||
Shared: fi.Bool(true),
|
||||
Tags: nil, // We don't need to add tags here
|
||||
}
|
||||
|
@ -368,13 +368,13 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
// subnet needs a NGW, lets create it. We tie it to a subnet
|
||||
// so we can track it in AWS
|
||||
eip := &awstasks.ElasticIP{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
Name: fi.String(zone + "." + b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
AssociatedNatGatewayRouteTable: b.LinkToPrivateRouteTableInZone(zone),
|
||||
}
|
||||
|
||||
if publicIP != "" {
|
||||
eip.PublicIP = s(publicIP)
|
||||
eip.PublicIP = fi.String(publicIP)
|
||||
eip.Tags = b.CloudTags(*eip.Name, true)
|
||||
} else {
|
||||
eip.Tags = b.CloudTags(*eip.Name, false)
|
||||
|
@ -391,7 +391,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
//var ngw = &awstasks.NatGateway{}
|
||||
ngw = &awstasks.NatGateway{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
Name: fi.String(zone + "." + b.ClusterName()),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subnet: utilitySubnet,
|
||||
ElasticIP: eip,
|
||||
|
@ -410,7 +410,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
routeTableTags := b.CloudTags(b.NamePrivateRouteTableInZone(zone), routeTableShared)
|
||||
routeTableTags[awsup.TagNameKopsRole] = "private-" + zone
|
||||
rt := &awstasks.RouteTable{
|
||||
Name: s(b.NamePrivateRouteTableInZone(zone)),
|
||||
Name: fi.String(b.NamePrivateRouteTableInZone(zone)),
|
||||
VPC: b.LinkToVPC(),
|
||||
Lifecycle: b.Lifecycle,
|
||||
|
||||
|
@ -427,9 +427,9 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
if in != nil {
|
||||
|
||||
r = &awstasks.Route{
|
||||
Name: s("private-" + zone + "-0.0.0.0/0"),
|
||||
Name: fi.String("private-" + zone + "-0.0.0.0/0"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
CIDR: s("0.0.0.0/0"),
|
||||
CIDR: fi.String("0.0.0.0/0"),
|
||||
RouteTable: rt,
|
||||
Instance: in,
|
||||
}
|
||||
|
@ -437,9 +437,9 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
} else {
|
||||
|
||||
r = &awstasks.Route{
|
||||
Name: s("private-" + zone + "-0.0.0.0/0"),
|
||||
Name: fi.String("private-" + zone + "-0.0.0.0/0"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
CIDR: s("0.0.0.0/0"),
|
||||
CIDR: fi.String("0.0.0.0/0"),
|
||||
RouteTable: rt,
|
||||
// Only one of these will be not nil
|
||||
NatGateway: ngw,
|
||||
|
|
|
@ -39,7 +39,7 @@ func (b *SSHKeyModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
return err
|
||||
}
|
||||
t := &awstasks.SSHKey{
|
||||
Name: s(name),
|
||||
Name: fi.String(name),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Tags: b.CloudTags(b.ClusterName(), false),
|
||||
Shared: fi.StringValue(b.Cluster.Spec.SSHKeyName) != "",
|
||||
|
|
Loading…
Reference in New Issue