Fix behaviour for `kops export kubeconfig --internal`

This commit is contained in:
Ciprian Hacman 2023-03-17 06:51:26 +02:00
parent 49fb1fabd3
commit 8f703f5509
2 changed files with 55 additions and 23 deletions

View File

@ -45,7 +45,6 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto
} else { } else {
server = "https://api." + clusterName server = "https://api." + clusterName
} }
}
// If a load balancer exists we use it, except for when an SSL certificate is set. // If a load balancer exists we use it, except for when an SSL certificate is set.
// This should avoid a lot of pain with DNS pre-creation. // This should avoid a lot of pain with DNS pre-creation.
@ -75,6 +74,7 @@ func BuildKubecfg(ctx context.Context, cluster *kops.Cluster, keyStore fi.Keysto
server = "https://" + targets[0] server = "https://" + targets[0]
} }
} }
}
b := NewKubeconfigBuilder() b := NewKubeconfigBuilder()

View File

@ -343,6 +343,38 @@ func TestBuildKubecfg(t *testing.T) {
}, },
wantClientCert: true, wantClientCert: true,
}, },
{
name: "Test Kube Config Data for Public cluster with admin and internal option",
args: args{
cluster: publicCluster,
status: fakeStatus,
admin: DefaultKubecfgAdminLifetime,
internal: true,
},
want: &KubeconfigBuilder{
Context: "testcluster",
Server: "https://api.internal.testcluster",
CACerts: []byte(nextCertificate + certData),
User: "testcluster",
},
wantClientCert: true,
},
{
name: "Test Kube Config Data for Public cluster without admin and with internal option",
args: args{
cluster: publicCluster,
status: fakeStatus,
admin: 0,
internal: true,
},
want: &KubeconfigBuilder{
Context: "testcluster",
Server: "https://api.internal.testcluster",
CACerts: []byte(nextCertificate + certData),
User: "testcluster",
},
wantClientCert: false,
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {