mirror of https://github.com/kubernetes/kops.git
Merge pull request #10157 from rifelpet/acm-nlb
Setup a second NLB listener when an AWS ACM certificate is used
This commit is contained in:
commit
e43efbe102
|
@ -217,6 +217,12 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie
|
|||
if featureflag.Spotinst.Enabled() && spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath, "cannot use NLB together with spotinst"))
|
||||
}
|
||||
if spec.API.LoadBalancer.SSLCertificate != "" && spec.API.LoadBalancer.Class != kops.LoadBalancerClassNetwork && c.IsKubernetesGTE("1.19") {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath, "sslCertificate requires network loadbalancer for K8s 1.19+ see https://github.com/kubernetes/kops/blob/master/permalinks/acm_nlb.md"))
|
||||
}
|
||||
if spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork && spec.API.LoadBalancer.UseForInternalApi && spec.API.LoadBalancer.Type == kops.LoadBalancerTypeInternal {
|
||||
allErrs = append(allErrs, field.Forbidden(fieldPath, "useForInternalApi cannot be used with internal NLB due lack of hairpinning support"))
|
||||
}
|
||||
}
|
||||
|
||||
return allErrs
|
||||
|
|
|
@ -99,11 +99,17 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se
|
|||
|
||||
b := NewKubeconfigBuilder()
|
||||
|
||||
// Use the secondary load balancer port if a certificate is on the primary listener
|
||||
if admin != 0 && cluster.Spec.API != nil && cluster.Spec.API.LoadBalancer != nil && cluster.Spec.API.LoadBalancer.SSLCertificate != "" && cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork {
|
||||
server = server + ":8443"
|
||||
}
|
||||
|
||||
b.Context = clusterName
|
||||
b.Server = server
|
||||
|
||||
// add the CA Cert to the kubeconfig only if we didn't specify a SSL cert for the LB or are targeting the internal DNS name
|
||||
if cluster.Spec.API == nil || cluster.Spec.API.LoadBalancer == nil || cluster.Spec.API.LoadBalancer.SSLCertificate == "" || internal {
|
||||
// add the CA Cert to the kubeconfig only if we didn't specify a certificate for the LB
|
||||
// or if we're using admin credentials and the secondary port
|
||||
if cluster.Spec.API == nil || cluster.Spec.API.LoadBalancer == nil || cluster.Spec.API.LoadBalancer.SSLCertificate == "" || cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork || internal {
|
||||
cert, _, _, err := keyStore.FindKeypair(fi.CertificateIDCA)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching CA keypair: %v", err)
|
||||
|
|
|
@ -71,10 +71,20 @@ func (f fakeKeyStore) MirrorTo(basedir vfs.Path) error {
|
|||
}
|
||||
|
||||
// build a generic minimal cluster
|
||||
func buildMinimalCluster(clusterName string, masterPublicName string) *kops.Cluster {
|
||||
func buildMinimalCluster(clusterName string, masterPublicName string, lbCert bool, nlb bool) *kops.Cluster {
|
||||
cluster := testutils.BuildMinimalCluster(clusterName)
|
||||
cluster.Spec.MasterPublicName = masterPublicName
|
||||
cluster.Spec.MasterInternalName = fmt.Sprintf("internal.%v", masterPublicName)
|
||||
cluster.Spec.KubernetesVersion = "1.19.3"
|
||||
cluster.Spec.API = &kops.AccessSpec{
|
||||
LoadBalancer: &kops.LoadBalancerAccessSpec{},
|
||||
}
|
||||
if lbCert {
|
||||
cluster.Spec.API.LoadBalancer.SSLCertificate = "cert-arn"
|
||||
}
|
||||
if nlb {
|
||||
cluster.Spec.API.LoadBalancer.Class = kops.LoadBalancerClassNetwork
|
||||
}
|
||||
return cluster
|
||||
}
|
||||
|
||||
|
@ -107,9 +117,12 @@ func TestBuildKubecfg(t *testing.T) {
|
|||
useKopsAuthenticationPlugin bool
|
||||
}
|
||||
|
||||
publiccluster := buildMinimalCluster("testcluster", "testcluster.test.com")
|
||||
emptyMasterPublicNameCluster := buildMinimalCluster("emptyMasterPublicNameCluster", "")
|
||||
gossipCluster := buildMinimalCluster("testgossipcluster.k8s.local", "")
|
||||
publicCluster := buildMinimalCluster("testcluster", "testcluster.test.com", false, false)
|
||||
emptyMasterPublicNameCluster := buildMinimalCluster("emptyMasterPublicNameCluster", "", false, false)
|
||||
gossipCluster := buildMinimalCluster("testgossipcluster.k8s.local", "", false, false)
|
||||
certCluster := buildMinimalCluster("testcluster", "testcluster.test.com", true, false)
|
||||
certNLBCluster := buildMinimalCluster("testcluster", "testcluster.test.com", true, true)
|
||||
certGossipNLBCluster := buildMinimalCluster("testgossipcluster.k8s.local", "", true, true)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
@ -121,7 +134,7 @@ func TestBuildKubecfg(t *testing.T) {
|
|||
{
|
||||
name: "Test Kube Config Data For Public DNS with admin",
|
||||
args: args{
|
||||
cluster: publiccluster,
|
||||
cluster: publicCluster,
|
||||
status: fakeStatusStore{},
|
||||
admin: DefaultKubecfgAdminLifetime,
|
||||
user: "",
|
||||
|
@ -134,10 +147,55 @@ func TestBuildKubecfg(t *testing.T) {
|
|||
},
|
||||
wantClientCert: true,
|
||||
},
|
||||
{
|
||||
name: "Test Kube Config Data For Public DNS with admin and secondary NLB port",
|
||||
args: args{
|
||||
cluster: certNLBCluster,
|
||||
status: fakeStatusStore{},
|
||||
admin: DefaultKubecfgAdminLifetime,
|
||||
},
|
||||
want: &KubeconfigBuilder{
|
||||
Context: "testcluster",
|
||||
Server: "https://testcluster.test.com:8443",
|
||||
CACert: []byte(certData),
|
||||
User: "testcluster",
|
||||
},
|
||||
wantClientCert: true,
|
||||
},
|
||||
{
|
||||
name: "Test Kube Config Data For Public DNS with admin and CLB ACM Certificate",
|
||||
args: args{
|
||||
cluster: certCluster,
|
||||
status: fakeStatusStore{},
|
||||
admin: DefaultKubecfgAdminLifetime,
|
||||
},
|
||||
want: &KubeconfigBuilder{
|
||||
Context: "testcluster",
|
||||
Server: "https://testcluster.test.com",
|
||||
CACert: nil,
|
||||
User: "testcluster",
|
||||
},
|
||||
wantClientCert: true,
|
||||
},
|
||||
{
|
||||
name: "Test Kube Config Data For Public DNS without admin and with ACM certificate",
|
||||
args: args{
|
||||
cluster: certNLBCluster,
|
||||
status: fakeStatusStore{},
|
||||
admin: 0,
|
||||
},
|
||||
want: &KubeconfigBuilder{
|
||||
Context: "testcluster",
|
||||
Server: "https://testcluster.test.com",
|
||||
CACert: []byte(certData),
|
||||
User: "testcluster",
|
||||
},
|
||||
wantClientCert: false,
|
||||
},
|
||||
{
|
||||
name: "Test Kube Config Data For Public DNS without admin",
|
||||
args: args{
|
||||
cluster: publiccluster,
|
||||
cluster: publicCluster,
|
||||
status: fakeStatusStore{},
|
||||
admin: 0,
|
||||
user: "myuser",
|
||||
|
@ -191,7 +249,7 @@ func TestBuildKubecfg(t *testing.T) {
|
|||
{
|
||||
name: "Public DNS with kops auth plugin",
|
||||
args: args{
|
||||
cluster: publiccluster,
|
||||
cluster: publicCluster,
|
||||
status: fakeStatusStore{},
|
||||
admin: 0,
|
||||
useKopsAuthenticationPlugin: true,
|
||||
|
@ -214,7 +272,7 @@ func TestBuildKubecfg(t *testing.T) {
|
|||
{
|
||||
name: "Test Kube Config Data For internal DNS name with admin",
|
||||
args: args{
|
||||
cluster: publiccluster,
|
||||
cluster: publicCluster,
|
||||
status: fakeStatusStore{},
|
||||
admin: DefaultKubecfgAdminLifetime,
|
||||
internal: true,
|
||||
|
@ -227,6 +285,29 @@ func TestBuildKubecfg(t *testing.T) {
|
|||
},
|
||||
wantClientCert: true,
|
||||
},
|
||||
{
|
||||
name: "Test Kube Config Data For Gossip cluster with admin and secondary NLB port",
|
||||
args: args{
|
||||
cluster: certGossipNLBCluster,
|
||||
status: fakeStatusStore{
|
||||
GetApiIngressStatusFn: func(cluster *kops.Cluster) ([]kops.ApiIngressStatus, error) {
|
||||
return []kops.ApiIngressStatus{
|
||||
{
|
||||
Hostname: "nlbHostName",
|
||||
},
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
admin: DefaultKubecfgAdminLifetime,
|
||||
},
|
||||
want: &KubeconfigBuilder{
|
||||
Context: "testgossipcluster.k8s.local",
|
||||
Server: "https://nlbHostName:8443",
|
||||
CACert: []byte(certData),
|
||||
User: "testgossipcluster.k8s.local",
|
||||
},
|
||||
wantClientCert: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
|
@ -109,14 +109,21 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
"443": {InstancePort: 443},
|
||||
}
|
||||
|
||||
nlbListenerPort := "443"
|
||||
nlbListeners := map[string]*awstasks.NetworkLoadBalancerListener{
|
||||
nlbListenerPort: {Port: 443},
|
||||
nlbListeners := []*awstasks.NetworkLoadBalancerListener{
|
||||
{
|
||||
Port: 443,
|
||||
TargetGroupName: b.NLBTargetGroupName("tcp"),
|
||||
},
|
||||
}
|
||||
|
||||
if lbSpec.SSLCertificate != "" {
|
||||
listeners["443"].SSLCertificateID = lbSpec.SSLCertificate
|
||||
nlbListeners["443"].SSLCertificateID = lbSpec.SSLCertificate
|
||||
nlbListeners[0].Port = 8443
|
||||
nlbListeners = append(nlbListeners, &awstasks.NetworkLoadBalancerListener{
|
||||
Port: 443,
|
||||
TargetGroupName: b.NLBTargetGroupName("tls"),
|
||||
SSLCertificateID: lbSpec.SSLCertificate,
|
||||
})
|
||||
}
|
||||
|
||||
if lbSpec.SecurityGroupOverride != nil {
|
||||
|
@ -138,6 +145,7 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
LoadBalancerName: fi.String(loadBalancerName),
|
||||
Subnets: elbSubnets,
|
||||
Listeners: nlbListeners,
|
||||
TargetGroups: make([]*awstasks.TargetGroup, 0),
|
||||
|
||||
Tags: tags,
|
||||
VPC: b.LinkToVPC(),
|
||||
|
@ -196,19 +204,18 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
c.AddTask(clb)
|
||||
} else if b.APILoadBalancerClass() == kops.LoadBalancerClassNetwork {
|
||||
|
||||
targetGroupPort := fi.Int64(443)
|
||||
targetGroupName := b.NLBTargetGroupName("api")
|
||||
tags := b.CloudTags(targetGroupName, false)
|
||||
tcpGroupName := b.NLBTargetGroupName("tcp")
|
||||
tcpGroupTags := b.CloudTags(tcpGroupName, false)
|
||||
|
||||
// Override the returned name to be the expected NLB TG name
|
||||
tags["Name"] = targetGroupName
|
||||
tcpGroupTags["Name"] = tcpGroupName
|
||||
|
||||
tg := &awstasks.TargetGroup{
|
||||
Name: fi.String(targetGroupName),
|
||||
Name: fi.String(tcpGroupName),
|
||||
VPC: b.LinkToVPC(),
|
||||
Tags: tags,
|
||||
Tags: tcpGroupTags,
|
||||
Protocol: fi.String("TCP"),
|
||||
Port: targetGroupPort,
|
||||
Port: fi.Int64(443),
|
||||
HealthyThreshold: fi.Int64(2),
|
||||
UnhealthyThreshold: fi.Int64(2),
|
||||
Shared: fi.Bool(false),
|
||||
|
@ -216,8 +223,28 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
c.AddTask(tg)
|
||||
|
||||
nlb.TargetGroup = tg
|
||||
nlb.TargetGroups = append(nlb.TargetGroups, tg)
|
||||
|
||||
if lbSpec.SSLCertificate != "" {
|
||||
tlsGroupName := b.NLBTargetGroupName("tls")
|
||||
tlsGroupTags := b.CloudTags(tlsGroupName, false)
|
||||
|
||||
// Override the returned name to be the expected NLB TG name
|
||||
tlsGroupTags["Name"] = tlsGroupName
|
||||
secondaryTG := &awstasks.TargetGroup{
|
||||
Name: fi.String(tlsGroupName),
|
||||
VPC: b.LinkToVPC(),
|
||||
Tags: tlsGroupTags,
|
||||
Protocol: fi.String("TLS"),
|
||||
Port: fi.Int64(443),
|
||||
HealthyThreshold: fi.Int64(2),
|
||||
UnhealthyThreshold: fi.Int64(2),
|
||||
Shared: fi.Bool(false),
|
||||
}
|
||||
c.AddTask(secondaryTG)
|
||||
nlb.TargetGroups = append(nlb.TargetGroups, secondaryTG)
|
||||
}
|
||||
sort.Stable(awstasks.OrderTargetGroupsByName(nlb.TargetGroups))
|
||||
c.AddTask(nlb)
|
||||
}
|
||||
|
||||
|
@ -291,7 +318,6 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
for _, masterGroup := range masterGroups {
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
// TODO: figure out how to only add this to one SG (GetSecurityGroups above returns multiple)
|
||||
Name: fi.String(fmt.Sprintf("https-api-elb-%s", cidr)),
|
||||
Lifecycle: b.SecurityLifecycle,
|
||||
CIDR: fi.String(cidr),
|
||||
|
@ -304,7 +330,6 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
|
||||
// Allow ICMP traffic required for PMTU discovery
|
||||
c.AddTask(&awstasks.SecurityGroupRule{
|
||||
// TODO: figure out how to only add this to one SG (GetSecurityGroups above returns multiple)
|
||||
Name: fi.String("icmp-pmtu-api-elb-" + cidr),
|
||||
Lifecycle: b.SecurityLifecycle,
|
||||
CIDR: fi.String(cidr),
|
||||
|
@ -313,6 +338,19 @@ func (b *APILoadBalancerBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
SecurityGroup: masterGroup.Task,
|
||||
ToPort: fi.Int64(4),
|
||||
})
|
||||
|
||||
if b.Cluster.Spec.API != nil && b.Cluster.Spec.API.LoadBalancer != nil && b.Cluster.Spec.API.LoadBalancer.SSLCertificate != "" {
|
||||
// Allow access to masters on secondary port through NLB
|
||||
c.AddTask(&awstasks.SecurityGroupRule{
|
||||
Name: fi.String(fmt.Sprintf("tcp-api-%s", cidr)),
|
||||
Lifecycle: b.SecurityLifecycle,
|
||||
CIDR: fi.String(cidr),
|
||||
FromPort: fi.Int64(8443),
|
||||
Protocol: fi.String("tcp"),
|
||||
SecurityGroup: masterGroup.Task,
|
||||
ToPort: fi.Int64(8443),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -366,13 +366,18 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil
|
|||
|
||||
t.InstanceProtection = ig.Spec.InstanceProtection
|
||||
|
||||
t.TargetGroups = []*awstasks.TargetGroup{}
|
||||
|
||||
// When Spotinst Elastigroups are used, there is no need to create
|
||||
// a separate task for the attachment of the load balancer since this
|
||||
// is already done as part of the Elastigroup's creation, if needed.
|
||||
if !featureflag.Spotinst.Enabled() {
|
||||
if b.UseLoadBalancerForAPI() && ig.Spec.Role == kops.InstanceGroupRoleMaster {
|
||||
if b.UseNetworkLoadBalancer() {
|
||||
t.TargetGroups = append(t.TargetGroups, b.LinkToTargetGroup("api"))
|
||||
t.TargetGroups = append(t.TargetGroups, b.LinkToTargetGroup("tcp"))
|
||||
if b.Cluster.Spec.API.LoadBalancer.SSLCertificate != "" {
|
||||
t.TargetGroups = append(t.TargetGroups, b.LinkToTargetGroup("tls"))
|
||||
}
|
||||
} else {
|
||||
t.LoadBalancers = append(t.LoadBalancers, b.LinkToCLB("api"))
|
||||
}
|
||||
|
@ -383,7 +388,7 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil
|
|||
}
|
||||
}
|
||||
|
||||
for _, extLB := range ig.Spec.ExternalLoadBalancers {
|
||||
for i, extLB := range ig.Spec.ExternalLoadBalancers {
|
||||
if extLB.LoadBalancerName != nil {
|
||||
lb := &awstasks.ClassicLoadBalancer{
|
||||
Name: extLB.LoadBalancerName,
|
||||
|
@ -396,7 +401,7 @@ func (b *AutoscalingGroupModelBuilder) buildAutoScalingGroupTask(c *fi.ModelBuil
|
|||
|
||||
if extLB.TargetGroupARN != nil {
|
||||
tg := &awstasks.TargetGroup{
|
||||
Name: extLB.TargetGroupARN,
|
||||
Name: fi.String(fmt.Sprintf("external-tg-%d", i)),
|
||||
ARN: extLB.TargetGroupARN,
|
||||
Shared: fi.Bool(true),
|
||||
}
|
||||
|
|
|
@ -300,6 +300,7 @@ func (b *KopsModelContext) GetSecurityGroups(role kops.InstanceGroupRole) ([]Sec
|
|||
"port=4002", // etcd events
|
||||
"port=4789", // VXLAN
|
||||
"port=179", // Calico
|
||||
"port=8443", // k8s api secondary listener
|
||||
|
||||
// TODO: UDP vs TCP
|
||||
// TODO: Protocol 4 for calico
|
||||
|
|
|
@ -105,7 +105,7 @@ func (b *KopsModelContext) LinkToNLB(prefix string) *awstasks.NetworkLoadBalance
|
|||
}
|
||||
|
||||
func (b *KopsModelContext) LinkToTargetGroup(prefix string) *awstasks.TargetGroup {
|
||||
name := b.NLBTargetGroupName(prefix) // TODO: this will need to change for the ACM cert bugfix since we'll have multiple TGs
|
||||
name := b.NLBTargetGroupName(prefix)
|
||||
return &awstasks.TargetGroup{Name: &name}
|
||||
}
|
||||
|
||||
|
|
|
@ -86,7 +86,10 @@
|
|||
],
|
||||
"TargetGroupARNs": [
|
||||
{
|
||||
"Ref": "AWSElasticLoadBalancingV2TargetGroupapicomplexexamplecomvd3t5n"
|
||||
"Ref": "AWSElasticLoadBalancingV2TargetGrouptcpcomplexexamplecomvpjolq"
|
||||
},
|
||||
{
|
||||
"Ref": "AWSElasticLoadBalancingV2TargetGrouptlscomplexexamplecom5nursn"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -850,6 +853,30 @@
|
|||
"CidrIpv6": "2001:0:85a3::/48"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngresstcpapi111024": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom"
|
||||
},
|
||||
"FromPort": 8443,
|
||||
"ToPort": 8443,
|
||||
"IpProtocol": "tcp",
|
||||
"CidrIp": "1.1.1.0/24"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngresstcpapi20010850040": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmasterscomplexexamplecom"
|
||||
},
|
||||
"FromPort": 8443,
|
||||
"ToPort": 8443,
|
||||
"IpProtocol": "tcp",
|
||||
"CidrIpv6": "2001:0:8500::/40"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupapielbcomplexexamplecom": {
|
||||
"Type": "AWS::EC2::SecurityGroup",
|
||||
"Properties": {
|
||||
|
@ -1148,11 +1175,16 @@
|
|||
"AWSElasticLoadBalancingV2Listenerapicomplexexamplecom443": {
|
||||
"Type": "AWS::ElasticLoadBalancingV2::Listener",
|
||||
"Properties": {
|
||||
"Certificates": [
|
||||
{
|
||||
"CertificateArn": "arn:aws:acm:us-test-1:000000000000:certificate/123456789012-1234-1234-1234-12345678"
|
||||
}
|
||||
],
|
||||
"DefaultActions": [
|
||||
{
|
||||
"Type": "forward",
|
||||
"TargetGroupArn": {
|
||||
"Ref": "AWSElasticLoadBalancingV2TargetGroupapicomplexexamplecomvd3t5n"
|
||||
"Ref": "AWSElasticLoadBalancingV2TargetGrouptcpcomplexexamplecomvpjolq"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -1160,6 +1192,24 @@
|
|||
"Ref": "AWSElasticLoadBalancingV2LoadBalancerapicomplexexamplecom"
|
||||
},
|
||||
"Port": 443,
|
||||
"Protocol": "TLS"
|
||||
}
|
||||
},
|
||||
"AWSElasticLoadBalancingV2Listenerapicomplexexamplecom8443": {
|
||||
"Type": "AWS::ElasticLoadBalancingV2::Listener",
|
||||
"Properties": {
|
||||
"DefaultActions": [
|
||||
{
|
||||
"Type": "forward",
|
||||
"TargetGroupArn": {
|
||||
"Ref": "AWSElasticLoadBalancingV2TargetGrouptlscomplexexamplecom5nursn"
|
||||
}
|
||||
}
|
||||
],
|
||||
"LoadBalancerArn": {
|
||||
"Ref": "AWSElasticLoadBalancingV2LoadBalancerapicomplexexamplecom"
|
||||
},
|
||||
"Port": 8443,
|
||||
"Protocol": "TCP"
|
||||
}
|
||||
},
|
||||
|
@ -1198,10 +1248,10 @@
|
|||
]
|
||||
}
|
||||
},
|
||||
"AWSElasticLoadBalancingV2TargetGroupapicomplexexamplecomvd3t5n": {
|
||||
"AWSElasticLoadBalancingV2TargetGrouptcpcomplexexamplecomvpjolq": {
|
||||
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
|
||||
"Properties": {
|
||||
"Name": "api-complex-example-com-vd3t5n",
|
||||
"Name": "tcp-complex-example-com-vpjolq",
|
||||
"Port": 443,
|
||||
"Protocol": "TCP",
|
||||
"VpcId": {
|
||||
|
@ -1214,7 +1264,7 @@
|
|||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "api-complex-example-com-vd3t5n"
|
||||
"Value": "tcp-complex-example-com-vpjolq"
|
||||
},
|
||||
{
|
||||
"Key": "Owner",
|
||||
|
@ -1234,6 +1284,42 @@
|
|||
"UnhealthyThresholdCount": 2
|
||||
}
|
||||
},
|
||||
"AWSElasticLoadBalancingV2TargetGrouptlscomplexexamplecom5nursn": {
|
||||
"Type": "AWS::ElasticLoadBalancingV2::TargetGroup",
|
||||
"Properties": {
|
||||
"Name": "tls-complex-example-com-5nursn",
|
||||
"Port": 443,
|
||||
"Protocol": "TLS",
|
||||
"VpcId": {
|
||||
"Ref": "AWSEC2VPCcomplexexamplecom"
|
||||
},
|
||||
"Tags": [
|
||||
{
|
||||
"Key": "KubernetesCluster",
|
||||
"Value": "complex.example.com"
|
||||
},
|
||||
{
|
||||
"Key": "Name",
|
||||
"Value": "tls-complex-example-com-5nursn"
|
||||
},
|
||||
{
|
||||
"Key": "Owner",
|
||||
"Value": "John Doe"
|
||||
},
|
||||
{
|
||||
"Key": "foo/bar",
|
||||
"Value": "fib+baz"
|
||||
},
|
||||
{
|
||||
"Key": "kubernetes.io/cluster/complex.example.com",
|
||||
"Value": "owned"
|
||||
}
|
||||
],
|
||||
"HealthCheckProtocol": "TLS",
|
||||
"HealthyThresholdCount": 2,
|
||||
"UnhealthyThresholdCount": 2
|
||||
}
|
||||
},
|
||||
"AWSIAMInstanceProfilemasterscomplexexamplecom": {
|
||||
"Type": "AWS::IAM::InstanceProfile",
|
||||
"Properties": {
|
||||
|
|
|
@ -12,6 +12,7 @@ spec:
|
|||
- sg-exampleid4
|
||||
crossZoneLoadBalancing: true
|
||||
class: Network
|
||||
sslCertificate: arn:aws:acm:us-test-1:000000000000:certificate/123456789012-1234-1234-1234-12345678
|
||||
kubernetesApiAccess:
|
||||
- 1.1.1.0/24
|
||||
- 2001:0:8500::/40
|
||||
|
|
|
@ -12,6 +12,7 @@ spec:
|
|||
- sg-exampleid4
|
||||
crossZoneLoadBalancing: true
|
||||
class: Network
|
||||
sslCertificate: arn:aws:acm:us-test-1:000000000000:certificate/123456789012-1234-1234-1234-12345678
|
||||
kubernetesApiAccess:
|
||||
- 1.1.1.0/24
|
||||
- 2001:0:8500::/40
|
||||
|
|
|
@ -135,7 +135,7 @@ resource "aws_autoscaling_group" "master-us-test-1a-masters-complex-example-com"
|
|||
propagate_at_launch = true
|
||||
value = "owned"
|
||||
}
|
||||
target_group_arns = [aws_lb_target_group.api-complex-example-com-vd3t5n.id]
|
||||
target_group_arns = [aws_lb_target_group.tcp-complex-example-com-vpjolq.id, aws_lb_target_group.tls-complex-example-com-5nursn.id]
|
||||
vpc_zone_identifier = [aws_subnet.us-test-1a-complex-example-com.id]
|
||||
}
|
||||
|
||||
|
@ -423,27 +423,57 @@ resource "aws_launch_template" "nodes-complex-example-com" {
|
|||
}
|
||||
|
||||
resource "aws_lb_listener" "api-complex-example-com-443" {
|
||||
certificate_arn = "arn:aws:acm:us-test-1:000000000000:certificate/123456789012-1234-1234-1234-12345678"
|
||||
default_action {
|
||||
target_group_arn = aws_lb_target_group.api-complex-example-com-vd3t5n.id
|
||||
target_group_arn = aws_lb_target_group.tcp-complex-example-com-vpjolq.id
|
||||
type = "forward"
|
||||
}
|
||||
load_balancer_arn = aws_lb.api-complex-example-com.id
|
||||
port = 443
|
||||
protocol = "TLS"
|
||||
}
|
||||
|
||||
resource "aws_lb_listener" "api-complex-example-com-8443" {
|
||||
default_action {
|
||||
target_group_arn = aws_lb_target_group.tls-complex-example-com-5nursn.id
|
||||
type = "forward"
|
||||
}
|
||||
load_balancer_arn = aws_lb.api-complex-example-com.id
|
||||
port = 8443
|
||||
protocol = "TCP"
|
||||
}
|
||||
|
||||
resource "aws_lb_target_group" "api-complex-example-com-vd3t5n" {
|
||||
resource "aws_lb_target_group" "tcp-complex-example-com-vpjolq" {
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
protocol = "TCP"
|
||||
unhealthy_threshold = 2
|
||||
}
|
||||
name = "api-complex-example-com-vd3t5n"
|
||||
name = "tcp-complex-example-com-vpjolq"
|
||||
port = 443
|
||||
protocol = "TCP"
|
||||
tags = {
|
||||
"KubernetesCluster" = "complex.example.com"
|
||||
"Name" = "api-complex-example-com-vd3t5n"
|
||||
"Name" = "tcp-complex-example-com-vpjolq"
|
||||
"Owner" = "John Doe"
|
||||
"foo/bar" = "fib+baz"
|
||||
"kubernetes.io/cluster/complex.example.com" = "owned"
|
||||
}
|
||||
vpc_id = aws_vpc.complex-example-com.id
|
||||
}
|
||||
|
||||
resource "aws_lb_target_group" "tls-complex-example-com-5nursn" {
|
||||
health_check {
|
||||
healthy_threshold = 2
|
||||
protocol = "TCP"
|
||||
unhealthy_threshold = 2
|
||||
}
|
||||
name = "tls-complex-example-com-5nursn"
|
||||
port = 443
|
||||
protocol = "TLS"
|
||||
tags = {
|
||||
"KubernetesCluster" = "complex.example.com"
|
||||
"Name" = "tls-complex-example-com-5nursn"
|
||||
"Owner" = "John Doe"
|
||||
"foo/bar" = "fib+baz"
|
||||
"kubernetes.io/cluster/complex.example.com" = "owned"
|
||||
|
@ -716,6 +746,24 @@ resource "aws_security_group_rule" "ssh-external-to-node-2001_0_85a3__--48" {
|
|||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "tcp-api-1-1-1-0--24" {
|
||||
cidr_blocks = ["1.1.1.0/24"]
|
||||
from_port = 8443
|
||||
protocol = "tcp"
|
||||
security_group_id = aws_security_group.masters-complex-example-com.id
|
||||
to_port = 8443
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "tcp-api-2001_0_8500__--40" {
|
||||
cidr_blocks = ["2001:0:8500::/40"]
|
||||
from_port = 8443
|
||||
protocol = "tcp"
|
||||
security_group_id = aws_security_group.masters-complex-example-com.id
|
||||
to_port = 8443
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "api-elb-complex-example-com" {
|
||||
description = "Security group for api ELB"
|
||||
name = "api-elb.complex.example.com"
|
||||
|
|
|
@ -162,13 +162,13 @@ func (e *AutoscalingGroup) Find(c *fi.Context) (*AutoscalingGroup, error) {
|
|||
}
|
||||
}
|
||||
|
||||
actual.TargetGroups = []*TargetGroup{}
|
||||
if len(g.TargetGroupARNs) > 0 {
|
||||
targetGroups := make([]*TargetGroup, 0)
|
||||
actualTGs := make([]*TargetGroup, 0)
|
||||
for _, tg := range g.TargetGroupARNs {
|
||||
targetGroups = append(actual.TargetGroups, &TargetGroup{ARN: aws.String(*tg)})
|
||||
actualTGs = append(actualTGs, &TargetGroup{ARN: aws.String(*tg)})
|
||||
}
|
||||
|
||||
targetGroups, err := ReconcileTargetGroups(c.Cloud.(awsup.AWSCloud), targetGroups, e.TargetGroups)
|
||||
targetGroups, err := ReconcileTargetGroups(c.Cloud.(awsup.AWSCloud), actualTGs, e.TargetGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -645,7 +645,7 @@ func (v *AutoscalingGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Autos
|
|||
}
|
||||
if detachTGRequest != nil {
|
||||
if _, err := t.Cloud.Autoscaling().DetachLoadBalancerTargetGroups(detachTGRequest); err != nil {
|
||||
return fmt.Errorf("error attaching TargetGroups: %v", err)
|
||||
return fmt.Errorf("error detaching TargetGroups: %v", err)
|
||||
}
|
||||
}
|
||||
if attachTGRequest != nil {
|
||||
|
|
|
@ -55,7 +55,7 @@ type NetworkLoadBalancer struct {
|
|||
|
||||
Subnets []*Subnet
|
||||
|
||||
Listeners map[string]*NetworkLoadBalancerListener
|
||||
Listeners []*NetworkLoadBalancerListener
|
||||
|
||||
Scheme *string
|
||||
|
||||
|
@ -66,8 +66,8 @@ type NetworkLoadBalancer struct {
|
|||
|
||||
Type *string
|
||||
|
||||
VPC *VPC
|
||||
TargetGroup *TargetGroup
|
||||
VPC *VPC
|
||||
TargetGroups []*TargetGroup
|
||||
}
|
||||
|
||||
var _ fi.CompareWithID = &NetworkLoadBalancer{}
|
||||
|
@ -78,15 +78,25 @@ func (e *NetworkLoadBalancer) CompareWithID() *string {
|
|||
|
||||
type NetworkLoadBalancerListener struct {
|
||||
Port int
|
||||
TargetGroupName string
|
||||
SSLCertificateID string
|
||||
}
|
||||
|
||||
func (e *NetworkLoadBalancerListener) mapToAWS(targetGroupArn string, loadBalancerArn string) *elbv2.CreateListenerInput {
|
||||
func (e *NetworkLoadBalancerListener) mapToAWS(targetGroups []*TargetGroup, loadBalancerArn string) (*elbv2.CreateListenerInput, error) {
|
||||
var tgARN string
|
||||
for _, tg := range targetGroups {
|
||||
if fi.StringValue(tg.Name) == e.TargetGroupName {
|
||||
tgARN = fi.StringValue(tg.ARN)
|
||||
}
|
||||
}
|
||||
if tgARN == "" {
|
||||
return nil, fmt.Errorf("target group not found for NLB listener %+v", e)
|
||||
}
|
||||
|
||||
l := &elbv2.CreateListenerInput{
|
||||
DefaultActions: []*elbv2.Action{
|
||||
{
|
||||
TargetGroupArn: aws.String(targetGroupArn),
|
||||
TargetGroupArn: aws.String(tgARN),
|
||||
Type: aws.String(elbv2.ActionTypeEnumForward),
|
||||
},
|
||||
},
|
||||
|
@ -104,7 +114,7 @@ func (e *NetworkLoadBalancerListener) mapToAWS(targetGroupArn string, loadBalanc
|
|||
l.Protocol = aws.String(elbv2.ProtocolEnumTcp)
|
||||
}
|
||||
|
||||
return l
|
||||
return l, nil
|
||||
}
|
||||
|
||||
var _ fi.HasDependencies = &NetworkLoadBalancerListener{}
|
||||
|
@ -113,6 +123,15 @@ func (e *NetworkLoadBalancerListener) GetDependencies(tasks map[string]fi.Task)
|
|||
return nil
|
||||
}
|
||||
|
||||
// OrderListenersByPort implements sort.Interface for []OrderListenersByPort, based on port number
|
||||
type OrderListenersByPort []*NetworkLoadBalancerListener
|
||||
|
||||
func (a OrderListenersByPort) Len() int { return len(a) }
|
||||
func (a OrderListenersByPort) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a OrderListenersByPort) Less(i, j int) bool {
|
||||
return a[i].Port < a[j].Port
|
||||
}
|
||||
|
||||
//The load balancer name 'api.renamenlbcluster.k8s.local' can only contain characters that are alphanumeric characters and hyphens(-)\n\tstatus code: 400,
|
||||
func findNetworkLoadBalancerByLoadBalancerName(cloud awsup.AWSCloud, loadBalancerName string) (*elbv2.LoadBalancer, error) {
|
||||
request := &elbv2.DescribeLoadBalancersInput{
|
||||
|
@ -343,36 +362,44 @@ func (e *NetworkLoadBalancer) Find(c *fi.Context) (*NetworkLoadBalancer, error)
|
|||
return nil, fmt.Errorf("error querying for NLB listeners :%v", err)
|
||||
}
|
||||
|
||||
actual.Listeners = make(map[string]*NetworkLoadBalancerListener)
|
||||
|
||||
actual.Listeners = []*NetworkLoadBalancerListener{}
|
||||
actual.TargetGroups = []*TargetGroup{}
|
||||
for _, l := range response.Listeners {
|
||||
loadBalancerPort := strconv.FormatInt(aws.Int64Value(l.Port), 10)
|
||||
|
||||
actualListener := &NetworkLoadBalancerListener{}
|
||||
actualListener.Port = int(aws.Int64Value(l.Port))
|
||||
if len(l.Certificates) != 0 {
|
||||
actualListener.SSLCertificateID = aws.StringValue(l.Certificates[0].CertificateArn) // What if there is more then one certificate, can we just grab the default certificate? we don't set it as default, we only set the one.
|
||||
}
|
||||
actual.Listeners[loadBalancerPort] = actualListener
|
||||
|
||||
// This will need to be rearranged when we recognized multiple listeners and target groups per NLB
|
||||
if len(l.DefaultActions) > 0 {
|
||||
targetGroupARN := l.DefaultActions[0].TargetGroupArn
|
||||
if targetGroupARN != nil {
|
||||
actual.TargetGroup = &TargetGroup{ARN: targetGroupARN}
|
||||
actual.TargetGroups = append(actual.TargetGroups, &TargetGroup{ARN: targetGroupARN})
|
||||
|
||||
cloud := c.Cloud.(awsup.AWSCloud)
|
||||
descResp, err := cloud.ELBV2().DescribeTargetGroups(&elbv2.DescribeTargetGroupsInput{
|
||||
TargetGroupArns: []*string{targetGroupARN},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error querying for NLB listener target groups: %v", err)
|
||||
}
|
||||
if len(descResp.TargetGroups) != 1 {
|
||||
return nil, fmt.Errorf("unexpected DescribeTargetGroups response: %v", descResp)
|
||||
}
|
||||
actualListener.TargetGroupName = aws.StringValue(descResp.TargetGroups[0].TargetGroupName)
|
||||
}
|
||||
}
|
||||
actual.Listeners = append(actual.Listeners, actualListener)
|
||||
}
|
||||
// This will be cleaned up once the NLB task supports multiple target groups
|
||||
if actual.TargetGroup != nil {
|
||||
actualTGs := []*TargetGroup{actual.TargetGroup}
|
||||
expectedTGs := []*TargetGroup{e.TargetGroup}
|
||||
targetGroups, err := ReconcileTargetGroups(c.Cloud.(awsup.AWSCloud), actualTGs, expectedTGs)
|
||||
if len(actual.TargetGroups) > 0 {
|
||||
targetGroups, err := ReconcileTargetGroups(c.Cloud.(awsup.AWSCloud), actual.TargetGroups, e.TargetGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
actual.TargetGroup = targetGroups[0]
|
||||
actual.TargetGroups = targetGroups
|
||||
}
|
||||
sort.Stable(OrderTargetGroupsByName(actual.TargetGroups))
|
||||
|
||||
}
|
||||
|
||||
|
@ -465,6 +492,8 @@ func (e *NetworkLoadBalancer) Run(c *fi.Context) error {
|
|||
func (e *NetworkLoadBalancer) Normalize() {
|
||||
// We need to sort our arrays consistently, so we don't get spurious changes
|
||||
sort.Stable(OrderSubnetsById(e.Subnets))
|
||||
sort.Stable(OrderListenersByPort(e.Listeners))
|
||||
sort.Stable(OrderTargetGroupsByName(e.TargetGroups))
|
||||
}
|
||||
|
||||
func (s *NetworkLoadBalancer) CheckChanges(a, e, changes *NetworkLoadBalancer) error {
|
||||
|
@ -493,12 +522,18 @@ func (_ *NetworkLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Ne
|
|||
var loadBalancerName string
|
||||
var loadBalancerArn string
|
||||
|
||||
if len(e.Listeners) != len(e.TargetGroups) {
|
||||
return fmt.Errorf("nlb listeners and target groups do not match: %v listeners vs %v target groups", len(e.Listeners), len(e.TargetGroups))
|
||||
}
|
||||
|
||||
if a == nil {
|
||||
if e.LoadBalancerName == nil {
|
||||
return fi.RequiredField("LoadBalancerName")
|
||||
}
|
||||
if e.TargetGroup.ARN == nil {
|
||||
return fmt.Errorf("missing required target group ARN for NLB creation %v", e.TargetGroup)
|
||||
for _, tg := range e.TargetGroups {
|
||||
if tg.ARN == nil {
|
||||
return fmt.Errorf("missing required target group ARN for NLB creation %v", tg)
|
||||
}
|
||||
}
|
||||
|
||||
loadBalancerName = *e.LoadBalancerName
|
||||
|
@ -533,10 +568,13 @@ func (_ *NetworkLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Ne
|
|||
|
||||
{
|
||||
for _, listener := range e.Listeners {
|
||||
createListenerInput := listener.mapToAWS(*e.TargetGroup.ARN, loadBalancerArn)
|
||||
createListenerInput, err := listener.mapToAWS(e.TargetGroups, loadBalancerArn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Creating Listener for NLB")
|
||||
_, err := t.Cloud.ELBV2().CreateListener(createListenerInput)
|
||||
klog.V(2).Infof("Creating Listener for NLB with port %v", listener.Port)
|
||||
_, err = t.Cloud.ELBV2().CreateListener(createListenerInput)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating listener for NLB: %v", err)
|
||||
}
|
||||
|
@ -606,10 +644,14 @@ func (_ *NetworkLoadBalancer) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Ne
|
|||
}
|
||||
|
||||
for _, listener := range changes.Listeners {
|
||||
awsListener := listener.mapToAWS(*e.TargetGroup.ARN, loadBalancerArn)
|
||||
|
||||
klog.V(2).Infof("Creating Listener for NLB")
|
||||
_, err := t.Cloud.ELBV2().CreateListener(awsListener)
|
||||
awsListener, err := listener.mapToAWS(e.TargetGroups, loadBalancerArn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Creating Listener for NLB with port %v", listener.Port)
|
||||
_, err = t.Cloud.ELBV2().CreateListener(awsListener)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating NLB listener: %v", err)
|
||||
}
|
||||
|
@ -674,18 +716,14 @@ func (_ *NetworkLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e
|
|||
return err
|
||||
}
|
||||
|
||||
for portStr, listener := range e.Listeners {
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, listener := range e.Listeners {
|
||||
listenerTF := &terraformNetworkLoadBalancerListener{
|
||||
LoadBalancer: e.TerraformLink(),
|
||||
Port: int64(port),
|
||||
Port: int64(listener.Port),
|
||||
DefaultAction: []terraformNetworkLoadBalancerListenerAction{
|
||||
{
|
||||
Type: elbv2.ActionTypeEnumForward,
|
||||
TargetGroupARN: e.TargetGroup.TerraformLink(),
|
||||
TargetGroupARN: e.TargetGroups[i].TerraformLink(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -696,7 +734,7 @@ func (_ *NetworkLoadBalancer) RenderTerraform(t *terraform.TerraformTarget, a, e
|
|||
listenerTF.Protocol = elbv2.ProtocolEnumTcp
|
||||
}
|
||||
|
||||
err = t.RenderResource("aws_lb_listener", fmt.Sprintf("%v-%v", *e.Name, portStr), listenerTF)
|
||||
err = t.RenderResource("aws_lb_listener", fmt.Sprintf("%v-%v", *e.Name, listener.Port), listenerTF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -721,11 +759,15 @@ type cloudformationNetworkLoadBalancer struct {
|
|||
}
|
||||
|
||||
type cloudformationNetworkLoadBalancerListener struct {
|
||||
Certificates []string `json:"Certificates,omitempty"`
|
||||
DefaultActions []cloudformationNetworkLoadBalancerListenerAction `json:"DefaultActions"`
|
||||
LoadBalancerARN *cloudformation.Literal `json:"LoadBalancerArn"`
|
||||
Port int64 `json:"Port"`
|
||||
Protocol string `json:"Protocol"`
|
||||
Certificates []cloudformationNetworkLoadBalancerListenerCertificate `json:"Certificates,omitempty"`
|
||||
DefaultActions []cloudformationNetworkLoadBalancerListenerAction `json:"DefaultActions"`
|
||||
LoadBalancerARN *cloudformation.Literal `json:"LoadBalancerArn"`
|
||||
Port int64 `json:"Port"`
|
||||
Protocol string `json:"Protocol"`
|
||||
}
|
||||
|
||||
type cloudformationNetworkLoadBalancerListenerCertificate struct {
|
||||
CertificateArn string `json:"CertificateArn"`
|
||||
}
|
||||
|
||||
type cloudformationNetworkLoadBalancerListenerAction struct {
|
||||
|
@ -753,29 +795,27 @@ func (_ *NetworkLoadBalancer) RenderCloudformation(t *cloudformation.Cloudformat
|
|||
return err
|
||||
}
|
||||
|
||||
for portStr, listener := range e.Listeners {
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, listener := range e.Listeners {
|
||||
listenerCF := &cloudformationNetworkLoadBalancerListener{
|
||||
LoadBalancerARN: e.CloudformationLink(),
|
||||
Port: int64(port),
|
||||
Port: int64(listener.Port),
|
||||
DefaultActions: []cloudformationNetworkLoadBalancerListenerAction{
|
||||
{
|
||||
Type: elbv2.ActionTypeEnumForward,
|
||||
TargetGroupARN: e.TargetGroup.CloudformationLink(),
|
||||
TargetGroupARN: e.TargetGroups[i].CloudformationLink(),
|
||||
},
|
||||
},
|
||||
}
|
||||
if listener.SSLCertificateID != "" {
|
||||
listenerCF.Certificates = []string{listener.SSLCertificateID}
|
||||
listenerCF.Certificates = []cloudformationNetworkLoadBalancerListenerCertificate{
|
||||
{CertificateArn: listener.SSLCertificateID},
|
||||
}
|
||||
listenerCF.Protocol = elbv2.ProtocolEnumTls
|
||||
} else {
|
||||
listenerCF.Protocol = elbv2.ProtocolEnumTcp
|
||||
}
|
||||
|
||||
err = t.RenderResource("AWS::ElasticLoadBalancingV2::Listener", fmt.Sprintf("%v-%v", *e.Name, portStr), listenerCF)
|
||||
err = t.RenderResource("AWS::ElasticLoadBalancingV2::Listener", fmt.Sprintf("%v-%v", *e.Name, listener.Port), listenerCF)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -794,5 +834,4 @@ func (e *NetworkLoadBalancer) CloudformationAttrCanonicalHostedZoneNameID() *clo
|
|||
|
||||
func (e *NetworkLoadBalancer) CloudformationAttrDNSName() *cloudformation.Literal {
|
||||
return cloudformation.GetAtt("AWS::ElasticLoadBalancingV2::LoadBalancer", *e.Name, "DNSName")
|
||||
|
||||
}
|
||||
|
|
|
@ -60,8 +60,7 @@ func (e *TargetGroup) Find(c *fi.Context) (*TargetGroup, error) {
|
|||
request := &elbv2.DescribeTargetGroupsInput{}
|
||||
if e.ARN != nil {
|
||||
request.TargetGroupArns = []*string{e.ARN}
|
||||
}
|
||||
if e.Name != nil {
|
||||
} else if e.Name != nil {
|
||||
request.Names = []*string{e.Name}
|
||||
}
|
||||
|
||||
|
@ -128,6 +127,9 @@ func FindTargetGroupByName(cloud awsup.AWSCloud, findName string) (*elbv2.Target
|
|||
resp, err := cloud.ELBV2().DescribeTargetGroups(request)
|
||||
|
||||
if err != nil {
|
||||
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == elbv2.ErrCodeTargetGroupNotFoundException {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error describing TargetGroups: %v", err)
|
||||
}
|
||||
if len(resp.TargetGroups) == 0 {
|
||||
|
@ -192,6 +194,15 @@ func (_ *TargetGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *TargetGrou
|
|||
return nil
|
||||
}
|
||||
|
||||
// OrderTargetGroupsByName implements sort.Interface for []OrderTargetGroupsByName, based on port number
|
||||
type OrderTargetGroupsByName []*TargetGroup
|
||||
|
||||
func (a OrderTargetGroupsByName) Len() int { return len(a) }
|
||||
func (a OrderTargetGroupsByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a OrderTargetGroupsByName) Less(i, j int) bool {
|
||||
return fi.StringValue(a[i].Name) < fi.StringValue(a[j].Name)
|
||||
}
|
||||
|
||||
// pkg/model/awsmodel doesn't know the ARN of the API TargetGroup tasks that it passes to the master ASGs,
|
||||
// it only knows the ARN of external target groups passed through the InstanceGroupSpec.
|
||||
// We lookup the ARN for TargetGroup tasks that don't have it set in order to attach the LB to the ASG.
|
||||
|
@ -206,12 +217,12 @@ func (_ *TargetGroup) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *TargetGrou
|
|||
// Because we don't know whether any given ARN attached to an ASG is an API TargetGroup task or not,
|
||||
// we have to find the API TargetGroup task, lookup its ARN, then compare that to the list of attached TargetGroups.
|
||||
func ReconcileTargetGroups(cloud awsup.AWSCloud, actual []*TargetGroup, expected []*TargetGroup) ([]*TargetGroup, error) {
|
||||
var apiTGTask *TargetGroup
|
||||
apiTGTasks := make([]*TargetGroup, 0)
|
||||
for _, tg := range expected {
|
||||
// All external TargetGroups have their Shared field set to true. The API TargetGroups do not.
|
||||
// Note that Shared is set by the kops model rather than AWS tags.
|
||||
if !fi.BoolValue(tg.Shared) {
|
||||
apiTGTask = tg
|
||||
apiTGTasks = append(apiTGTasks, tg)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -221,18 +232,19 @@ func ReconcileTargetGroups(cloud awsup.AWSCloud, actual []*TargetGroup, expected
|
|||
return reconciled, nil
|
||||
}
|
||||
|
||||
var apiTG *elbv2.TargetGroup
|
||||
var err error
|
||||
if apiTGTask != nil {
|
||||
apiTG, err = FindTargetGroupByName(cloud, fi.StringValue(apiTGTask.Name))
|
||||
apiTGs := make(map[string]*TargetGroup)
|
||||
for _, task := range apiTGTasks {
|
||||
apiTG, err := FindTargetGroupByName(cloud, fi.StringValue(task.Name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if apiTG != nil {
|
||||
apiTGs[aws.StringValue(apiTG.TargetGroupArn)] = task
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(actual); i++ {
|
||||
tg := actual[i]
|
||||
if apiTG != nil && aws.StringValue(apiTG.TargetGroupArn) == aws.StringValue(tg.ARN) {
|
||||
reconciled = append(reconciled, apiTGTask)
|
||||
for _, tg := range actual {
|
||||
if apiTask, ok := apiTGs[aws.StringValue(tg.ARN)]; ok {
|
||||
reconciled = append(reconciled, apiTask)
|
||||
} else {
|
||||
reconciled = append(reconciled, tg)
|
||||
}
|
||||
|
@ -275,7 +287,7 @@ func (_ *TargetGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, change
|
|||
HealthCheck: terraformTargetGroupHealthCheck{
|
||||
HealthyThreshold: *e.HealthyThreshold,
|
||||
UnhealthyThreshold: *e.UnhealthyThreshold,
|
||||
Protocol: *e.Protocol,
|
||||
Protocol: elbv2.ProtocolEnumTcp,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue