mirror of https://github.com/kubernetes/kops.git
Merge pull request #15646 from johngmyers/prune-dead
Remove dead code for non-kops-controller bootstrap
This commit is contained in:
commit
bb4dbdce90
|
|
@ -35,7 +35,6 @@ import (
|
|||
|
||||
"golang.org/x/crypto/ssh"
|
||||
"k8s.io/kops/cmd/kops/util"
|
||||
"k8s.io/kops/pkg/apis/kops/model"
|
||||
"k8s.io/kops/pkg/diff"
|
||||
"k8s.io/kops/pkg/featureflag"
|
||||
"k8s.io/kops/pkg/model/iam"
|
||||
|
|
@ -1503,20 +1502,6 @@ func (i *integrationTest) setupCluster(t *testing.T, ctx context.Context, inputY
|
|||
secondaryCertificate: "-----BEGIN CERTIFICATE-----\nMIIBfDCCASagAwIBAgIMFo+b23acX0hZEkbkMA0GCSqGSIb3DQEBCwUAMB8xHTAb\nBgNVBAMTFGV0Y2QtcGVlcnMtY2EtY2lsaXVtMB4XDTIxMDcwNTIwMjIzN1oXDTMx\nMDcwNTIwMjIzN1owHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1jaWxpdW0wXDAN\nBgkqhkiG9w0BAQEFAANLADBIAkEAw3T2pyEOgBPBKwofuILLokPxAFplVzdu540f\noREJ4iVqiroUlsz1G90mEwmqR+B7/0kt70ve9i5Z6E7Qz2nQaQIDAQABo0IwQDAO\nBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU0hyEvGir\n2ucsJrojyZaDBIb8JLAwDQYJKoZIhvcNAQELBQADQQA9vQylgkvgROIMspzOlbZr\nZwsTAzp9J2ZxZL06AQ9iWzpvIw/H3oClV63q6zN2aHtpBTkhUOSX3Q4L/X/0MOkj\n-----END CERTIFICATE-----",
|
||||
})
|
||||
}
|
||||
if !model.UseKopsControllerForNodeBootstrap(cluster.Spec.GetCloudProvider()) {
|
||||
storeKeyset(t, ctx, keyStore, "kubelet", &testingKeyset{
|
||||
primaryKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIBOgIBAAJBAM6BUO6Gjjskn8s87GdJB8QPpNTx949t5Z/GgQpLVCapj741c1//\nvyH6JPsyqFUVy+lsBXQHSdCz2awMhKd9x5kCAwEAAQJARozbj4Ic2Yvbo92+jlLe\n+la146J/B1tuVbXFpDS0HTi3W94fVfu6R7FR9um1te1hzBAr6I4RqXxBAvipzG9P\n4QIhAPUg1AV/uyzKxELhVNKysAqvz1oLx2NeAh3DewRQn2MNAiEA16n2q69vFDvd\nnoCi2jwfR9/VyuMjloJElRyG1hoqg70CIQDkH/QRVgkcq2uxDkFBgLgiifF/zJx3\n1mJDzsuqfVmH9QIgEP/2z8W+bcviRlJBhA5lMNc2FQ4eigiuu0pKXqolW8kCIBy/\n27C5grBlEqjw1taSKqoSnylUW6SL8N8UR0MJU5up\n-----END RSA PRIVATE KEY-----",
|
||||
primaryCertificate: "-----BEGIN CERTIFICATE-----\nMIIBkzCCAT2gAwIBAgIMFpL6CzllQiBcgTbiMA0GCSqGSIb3DQEBCwUAMBgxFjAU\nBgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzE2MTk0MjIxWhcNMzEwNzE2MTk0\nMjIxWjApMRUwEwYDVQQKEwxzeXN0ZW06bm9kZXMxEDAOBgNVBAMTB2t1YmVsZXQw\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAzoFQ7oaOOySfyzzsZ0kHxA+k1PH3j23l\nn8aBCktUJqmPvjVzX/+/Ifok+zKoVRXL6WwFdAdJ0LPZrAyEp33HmQIDAQABo1Yw\nVDAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/\nBAIwADAfBgNVHSMEGDAWgBTRt81Y03C5ScA7CePyvQ1eyqIVADANBgkqhkiG9w0B\nAQsFAANBAGOPYAM8wEDpRs4Sa+UxSRNM5xt2a0ctNqLxYbN0gsoTXY3vEFb06qLH\npgBJgBLXG8siOEhyEhsFiXSw4klQ/y8=\n-----END CERTIFICATE-----",
|
||||
secondaryKey: "",
|
||||
secondaryCertificate: "",
|
||||
})
|
||||
storeKeyset(t, ctx, keyStore, "kube-proxy", &testingKeyset{
|
||||
primaryKey: "-----BEGIN RSA PRIVATE KEY-----\nMIIBOgIBAAJBAM7f0Zt5vDchamMg9TABxyAWGRVhWVmLqmfKr1rGvohWB/eVJmxZ\nCSNg6ShIDnDT2qJx5Aw05jjfDRJsrlCcAkMCAwEAAQJAeeRo5boBy14WCFiH/4Rc\npqw+lVlpwxhHDKbhUZRe+YbfobR7M35GoKJ5Zjtvh5V1eC1irGzSvUQg96snVCIv\nqQIhAPWGxfFedkYvddBHpp6pg/55AshVp8NPeYfV1olKc10FAiEA17Lzn7yyekzY\nr8tgm5zt6Hf9DfOPS+iCUwTpJzkhRKcCIAJUiyBlUx4LaUTWyUAMP9J0d5BLL9Js\nuKyPXP/kkv+5AiEApTYO/jmU5rH3gmafP3Gqk9VbwRTdnAGh2J65Sm6quZ8CIC4v\nqwjRQtwPYB4PPym2gTL4hjgWTj7bQEspm3A9eEs5\n-----END RSA PRIVATE KEY-----",
|
||||
primaryCertificate: "-----BEGIN CERTIFICATE-----\nMIIBhjCCATCgAwIBAgIMFpL6CzlkDYhRlgqCMA0GCSqGSIb3DQEBCwUAMBgxFjAU\nBgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzE2MTk0MjIxWhcNMzEwNzE2MTk0\nMjIxWjAcMRowGAYDVQQDExFzeXN0ZW06a3ViZS1wcm94eTBcMA0GCSqGSIb3DQEB\nAQUAA0sAMEgCQQDO39Gbebw3IWpjIPUwAccgFhkVYVlZi6pnyq9axr6IVgf3lSZs\nWQkjYOkoSA5w09qiceQMNOY43w0SbK5QnAJDAgMBAAGjVjBUMA4GA1UdDwEB/wQE\nAwIHgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB8GA1UdIwQY\nMBaAFNG3zVjTcLlJwDsJ4/K9DV7KohUAMA0GCSqGSIb3DQEBCwUAA0EANRng3dTL\nZYQLfeRolSiKFHrsDxfNL5sXbsNcJNkP9VNmxTGs3RyvNlzsaVQkXaBnlHYx0+nk\nGWXMq4Kke2ukxQ==\n-----END CERTIFICATE-----",
|
||||
secondaryKey: "",
|
||||
secondaryCertificate: "",
|
||||
})
|
||||
}
|
||||
|
||||
return factory
|
||||
}
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ type BootstrapClientBuilder struct {
|
|||
}
|
||||
|
||||
func (b BootstrapClientBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
||||
if b.IsMaster || !b.UseKopsControllerForNodeBootstrap() {
|
||||
if b.IsMaster {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -284,82 +284,27 @@ func (c *NodeupModelContext) GetBootstrapCert(name string, signer string) (cert,
|
|||
|
||||
// BuildBootstrapKubeconfig generates a kubeconfig with a client certificate from either kops-controller or the state store.
|
||||
func (c *NodeupModelContext) BuildBootstrapKubeconfig(name string, ctx *fi.NodeupModelBuilderContext) (fi.Resource, error) {
|
||||
if c.UseKopsControllerForNodeBootstrap() {
|
||||
cert, key, err := c.GetBootstrapCert(name, fi.CertificateIDCA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeConfig := &nodetasks.KubeConfig{
|
||||
Name: name,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
CA: fi.NewStringResource(c.NodeupConfig.CAs[fi.CertificateIDCA]),
|
||||
}
|
||||
if c.HasAPIServer {
|
||||
// @note: use https even for local connections, so we can turn off the insecure port
|
||||
kubeConfig.ServerURL = "https://127.0.0.1"
|
||||
} else {
|
||||
kubeConfig.ServerURL = "https://" + c.APIInternalName()
|
||||
}
|
||||
|
||||
ctx.EnsureTask(kubeConfig)
|
||||
|
||||
return kubeConfig.GetConfig(), nil
|
||||
} else {
|
||||
keyset, err := c.KeyStore.FindKeyset(ctx.Context(), name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching keyset %q from keystore: %w", name, err)
|
||||
}
|
||||
if keyset == nil {
|
||||
return nil, fmt.Errorf("keyset %q not found", name)
|
||||
}
|
||||
|
||||
keypairID := c.NodeupConfig.KeypairIDs[name]
|
||||
if keypairID == "" {
|
||||
return nil, fmt.Errorf("keypairID for %s missing from NodeupConfig", name)
|
||||
}
|
||||
item := keyset.Items[keypairID]
|
||||
if item == nil {
|
||||
return nil, fmt.Errorf("keypairID %s missing from %s keyset", keypairID, name)
|
||||
}
|
||||
|
||||
cert, err := item.Certificate.AsBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := item.PrivateKey.AsBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeConfig := &nodetasks.KubeConfig{
|
||||
Name: name,
|
||||
Cert: fi.NewBytesResource(cert),
|
||||
Key: fi.NewBytesResource(key),
|
||||
CA: fi.NewStringResource(c.NodeupConfig.CAs[fi.CertificateIDCA]),
|
||||
}
|
||||
if c.HasAPIServer {
|
||||
// @note: use https even for local connections, so we can turn off the insecure port
|
||||
// This code path is used for the kubelet cert in Kubernetes 1.18 and earlier.
|
||||
kubeConfig.ServerURL = "https://127.0.0.1"
|
||||
} else {
|
||||
kubeConfig.ServerURL = "https://" + c.APIInternalName()
|
||||
}
|
||||
|
||||
err = kubeConfig.Run(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := fi.ResourceAsBytes(kubeConfig.GetConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return fi.NewBytesResource(config), nil
|
||||
cert, key, err := c.GetBootstrapCert(name, fi.CertificateIDCA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeConfig := &nodetasks.KubeConfig{
|
||||
Name: name,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
CA: fi.NewStringResource(c.NodeupConfig.CAs[fi.CertificateIDCA]),
|
||||
}
|
||||
if c.HasAPIServer {
|
||||
// @note: use https even for local connections, so we can turn off the insecure port
|
||||
kubeConfig.ServerURL = "https://127.0.0.1"
|
||||
} else {
|
||||
kubeConfig.ServerURL = "https://" + c.APIInternalName()
|
||||
}
|
||||
|
||||
ctx.EnsureTask(kubeConfig)
|
||||
|
||||
return kubeConfig.GetConfig(), nil
|
||||
}
|
||||
|
||||
// RemapImage applies any needed remapping to an image reference.
|
||||
|
|
@ -392,11 +337,6 @@ func (c *NodeupModelContext) UseVolumeMounts() bool {
|
|||
return len(c.NodeupConfig.VolumeMounts) > 0
|
||||
}
|
||||
|
||||
// UseKopsControllerForNodeBootstrap checks if nodeup should use kops-controller to bootstrap.
|
||||
func (c *NodeupModelContext) UseKopsControllerForNodeBootstrap() bool {
|
||||
return model.UseKopsControllerForNodeBootstrap(c.CloudProvider())
|
||||
}
|
||||
|
||||
// UseChallengeCallback is true if we should use a callback challenge during node provisioning with kops-controller.
|
||||
func (c *NodeupModelContext) UseChallengeCallback(cloudProvider kops.CloudProviderID) bool {
|
||||
return model.UseChallengeCallback(cloudProvider)
|
||||
|
|
|
|||
|
|
@ -51,12 +51,10 @@ func (b *EtcHostsBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
|||
Hostname: b.APIInternalName(),
|
||||
Addresses: b.BootConfig.APIServerIPs,
|
||||
})
|
||||
if b.UseKopsControllerForNodeBootstrap() {
|
||||
task.Records = append(task.Records, nodetasks.HostRecord{
|
||||
Hostname: "kops-controller.internal." + b.NodeupConfig.ClusterName,
|
||||
Addresses: b.BootConfig.APIServerIPs,
|
||||
})
|
||||
}
|
||||
task.Records = append(task.Records, nodetasks.HostRecord{
|
||||
Hostname: "kops-controller.internal." + b.NodeupConfig.ClusterName,
|
||||
Addresses: b.BootConfig.APIServerIPs,
|
||||
})
|
||||
}
|
||||
|
||||
if len(task.Records) != 0 {
|
||||
|
|
|
|||
|
|
@ -336,10 +336,8 @@ func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.Kubelet
|
|||
}
|
||||
}
|
||||
|
||||
if b.UseKopsControllerForNodeBootstrap() {
|
||||
flags += " --tls-cert-file=" + b.PathSrvKubernetes() + "/kubelet-server.crt"
|
||||
flags += " --tls-private-key-file=" + b.PathSrvKubernetes() + "/kubelet-server.key"
|
||||
}
|
||||
flags += " --tls-cert-file=" + b.PathSrvKubernetes() + "/kubelet-server.crt"
|
||||
flags += " --tls-private-key-file=" + b.PathSrvKubernetes() + "/kubelet-server.key"
|
||||
|
||||
if b.IsIPv6Only() {
|
||||
flags += " --node-ip=::"
|
||||
|
|
@ -688,51 +686,49 @@ func (b *KubeletBuilder) buildControlPlaneKubeletKubeconfig(c *fi.NodeupModelBui
|
|||
}
|
||||
|
||||
func (b *KubeletBuilder) buildKubeletServingCertificate(c *fi.NodeupModelBuilderContext) error {
|
||||
if b.UseKopsControllerForNodeBootstrap() {
|
||||
name := "kubelet-server"
|
||||
dir := b.PathSrvKubernetes()
|
||||
name := "kubelet-server"
|
||||
dir := b.PathSrvKubernetes()
|
||||
|
||||
names, err := b.kubeletNames()
|
||||
names, err := b.kubeletNames()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.HasAPIServer {
|
||||
cert, key, err := b.GetBootstrapCert(name, fi.CertificateIDCA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.HasAPIServer {
|
||||
cert, key, err := b.GetBootstrapCert(name, fi.CertificateIDCA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".crt"),
|
||||
Contents: cert,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0644"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".crt"),
|
||||
Contents: cert,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0644"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".key"),
|
||||
Contents: key,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0400"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".key"),
|
||||
Contents: key,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0400"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
|
||||
} else {
|
||||
issueCert := &nodetasks.IssueCert{
|
||||
Name: name,
|
||||
Signer: fi.CertificateIDCA,
|
||||
KeypairID: b.NodeupConfig.KeypairIDs[fi.CertificateIDCA],
|
||||
Type: "server",
|
||||
Subject: nodetasks.PKIXName{
|
||||
CommonName: names[0],
|
||||
},
|
||||
AlternateNames: names,
|
||||
}
|
||||
c.AddTask(issueCert)
|
||||
return issueCert.AddFileTasks(c, dir, name, "", nil)
|
||||
} else {
|
||||
issueCert := &nodetasks.IssueCert{
|
||||
Name: name,
|
||||
Signer: fi.CertificateIDCA,
|
||||
KeypairID: b.NodeupConfig.KeypairIDs[fi.CertificateIDCA],
|
||||
Type: "server",
|
||||
Subject: nodetasks.PKIXName{
|
||||
CommonName: names[0],
|
||||
},
|
||||
AlternateNames: names,
|
||||
}
|
||||
c.AddTask(issueCert)
|
||||
return issueCert.AddFileTasks(c, dir, name, "", nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -168,31 +168,27 @@ func (b *CiliumBuilder) buildCiliumEtcdSecrets(c *fi.NodeupModelBuilderContext)
|
|||
c.AddTask(issueCert)
|
||||
return issueCert.AddFileTasks(c, dir, name, "", nil)
|
||||
} else {
|
||||
if b.UseKopsControllerForNodeBootstrap() {
|
||||
cert, key, err := b.GetBootstrapCert(name, signer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".crt"),
|
||||
Contents: cert,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0644"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".key"),
|
||||
Contents: key,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0400"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
|
||||
return nil
|
||||
} else {
|
||||
return b.BuildCertificatePairTask(c, name, dir, name, nil, []string{"kubelet.service"})
|
||||
cert, key, err := b.GetBootstrapCert(name, signer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".crt"),
|
||||
Contents: cert,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0644"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
|
||||
c.AddTask(&nodetasks.File{
|
||||
Path: filepath.Join(dir, name+".key"),
|
||||
Contents: key,
|
||||
Type: nodetasks.FileType_File,
|
||||
Mode: fi.PtrTo("0400"),
|
||||
BeforeServices: []string{"kubelet.service"},
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,11 +22,6 @@ import (
|
|||
"k8s.io/kops/pkg/apis/kops/util"
|
||||
)
|
||||
|
||||
// UseKopsControllerForNodeBootstrap is true if nodeup should use kops-controller for bootstrapping.
|
||||
func UseKopsControllerForNodeBootstrap(cloudProvider kops.CloudProviderID) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// UseChallengeCallback is true if we should use a callback challenge during node provisioning with kops-controller.
|
||||
func UseChallengeCallback(cloudProvider kops.CloudProviderID) bool {
|
||||
switch cloudProvider {
|
||||
|
|
@ -54,7 +49,7 @@ func UseKopsControllerForNodeConfig(cluster *kops.Cluster) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
return UseKopsControllerForNodeBootstrap(cluster.Spec.GetCloudProvider())
|
||||
return true
|
||||
}
|
||||
|
||||
// UseCiliumEtcd is true if we are using the Cilium etcd cluster.
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kops/pkg/apis/kops/model"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/scaleway"
|
||||
"k8s.io/kops/upup/pkg/fi/utils"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
|
@ -134,12 +133,7 @@ func (b *BootstrapScript) buildEnvironmentVariables() (map[string]string, error)
|
|||
}
|
||||
|
||||
if os.Getenv("S3_ENDPOINT") != "" {
|
||||
passEnvs := false
|
||||
if b.ig.IsControlPlane() || !b.builder.UseKopsControllerForNodeBootstrap() {
|
||||
passEnvs = true
|
||||
}
|
||||
|
||||
if passEnvs {
|
||||
if b.ig.IsControlPlane() {
|
||||
env["S3_ENDPOINT"] = os.Getenv("S3_ENDPOINT")
|
||||
env["S3_REGION"] = os.Getenv("S3_REGION")
|
||||
env["S3_ACCESS_KEY_ID"] = os.Getenv("S3_ACCESS_KEY_ID")
|
||||
|
|
@ -190,12 +184,7 @@ func (b *BootstrapScript) buildEnvironmentVariables() (map[string]string, error)
|
|||
}
|
||||
|
||||
if cluster.Spec.GetCloudProvider() == kops.CloudProviderDO {
|
||||
passEnvs := false
|
||||
if b.ig.IsControlPlane() || !b.builder.UseKopsControllerForNodeBootstrap() {
|
||||
passEnvs = true
|
||||
}
|
||||
|
||||
if passEnvs {
|
||||
if b.ig.IsControlPlane() {
|
||||
doToken := os.Getenv("DIGITALOCEAN_ACCESS_TOKEN")
|
||||
if doToken != "" {
|
||||
env["DIGITALOCEAN_ACCESS_TOKEN"] = doToken
|
||||
|
|
@ -255,16 +244,8 @@ func (b *BootstrapScriptBuilder) ResourceNodeUp(c *fi.CloudupModelBuilderContext
|
|||
}
|
||||
}
|
||||
|
||||
if model.UseCiliumEtcd(b.Cluster) && !model.UseKopsControllerForNodeBootstrap(b.Cluster.Spec.GetCloudProvider()) {
|
||||
keypairs = append(keypairs, "etcd-client-cilium")
|
||||
}
|
||||
if ig.HasAPIServer() {
|
||||
keypairs = append(keypairs, "apiserver-aggregator-ca", "service-account", "etcd-clients-ca")
|
||||
} else if !model.UseKopsControllerForNodeBootstrap(b.Cluster.Spec.GetCloudProvider()) {
|
||||
keypairs = append(keypairs, "kubelet", "kube-proxy")
|
||||
if b.Cluster.Spec.Networking.KubeRouter != nil {
|
||||
keypairs = append(keypairs, "kube-router")
|
||||
}
|
||||
}
|
||||
|
||||
if ig.IsBastion() {
|
||||
|
|
|
|||
|
|
@ -145,16 +145,6 @@ func (b *EtcdManagerBuilder) Build(c *fi.CloudupModelBuilderContext) error {
|
|||
Type: "ca",
|
||||
}
|
||||
c.AddTask(clientsCaCilium)
|
||||
|
||||
if !b.UseKopsControllerForNodeBootstrap() {
|
||||
c.AddTask(&fitasks.Keypair{
|
||||
Name: fi.PtrTo("etcd-client-cilium"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=cilium",
|
||||
Type: "client",
|
||||
Signer: clientsCaCilium,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -251,20 +251,6 @@ func (b *KopsModelContext) CloudTags(name string, shared bool) map[string]string
|
|||
return tags
|
||||
}
|
||||
|
||||
// UseKopsControllerForNodeBootstrap checks if nodeup should use kops-controller to bootstrap.
|
||||
func (b *KopsModelContext) UseKopsControllerForNodeBootstrap() bool {
|
||||
return model.UseKopsControllerForNodeBootstrap(b.Cluster.Spec.GetCloudProvider())
|
||||
}
|
||||
|
||||
// UseBootstrapTokens checks if bootstrap tokens are enabled
|
||||
func (b *KopsModelContext) UseBootstrapTokens() bool {
|
||||
if b.Cluster.Spec.KubeAPIServer == nil || b.UseKopsControllerForNodeBootstrap() {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.ValueOf(b.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken)
|
||||
}
|
||||
|
||||
// UsesBastionDns checks if we should use a specific name for the bastion dns
|
||||
func (b *KopsModelContext) UsesBastionDns() bool {
|
||||
if b.Cluster.Spec.Networking.Topology.Bastion != nil && b.Cluster.Spec.Networking.Topology.Bastion.PublicName != "" {
|
||||
|
|
|
|||
|
|
@ -53,6 +53,11 @@ func (b *LoadBalancerModelBuilder) Build(c *fi.CloudupModelBuilderContext) error
|
|||
ListenerPort: fi.PtrTo(wellknownports.KubeAPIServer),
|
||||
DestinationPort: fi.PtrTo(wellknownports.KubeAPIServer),
|
||||
},
|
||||
{
|
||||
Protocol: string(hcloud.LoadBalancerServiceProtocolTCP),
|
||||
ListenerPort: fi.PtrTo(wellknownports.KopsControllerPort),
|
||||
DestinationPort: fi.PtrTo(wellknownports.KopsControllerPort),
|
||||
},
|
||||
},
|
||||
Target: strings.Join(controlPlaneLabelSelector, ","),
|
||||
Labels: map[string]string{
|
||||
|
|
@ -60,14 +65,6 @@ func (b *LoadBalancerModelBuilder) Build(c *fi.CloudupModelBuilderContext) error
|
|||
},
|
||||
}
|
||||
|
||||
if b.Cluster.UsesNoneDNS() || b.UseKopsControllerForNodeBootstrap() {
|
||||
loadbalancer.Services = append(loadbalancer.Services, &hetznertasks.LoadBalancerService{
|
||||
Protocol: string(hcloud.LoadBalancerServiceProtocolTCP),
|
||||
ListenerPort: fi.PtrTo(wellknownports.KopsControllerPort),
|
||||
DestinationPort: fi.PtrTo(wellknownports.KopsControllerPort),
|
||||
})
|
||||
}
|
||||
|
||||
c.AddTask(&loadbalancer)
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -702,31 +702,6 @@ func ReadableStatePaths(cluster *kops.Cluster, role Subject) ([]string, error) {
|
|||
"/igconfig/node/*",
|
||||
)
|
||||
}
|
||||
if !model.UseKopsControllerForNodeBootstrap(cluster.Spec.GetCloudProvider()) {
|
||||
paths = append(paths,
|
||||
"/secrets/dockerconfig",
|
||||
"/pki/private/kube-proxy/*",
|
||||
)
|
||||
|
||||
if useBootstrapTokens(cluster) {
|
||||
paths = append(paths, "/pki/private/node-authorizer-client/*")
|
||||
} else {
|
||||
paths = append(paths, "/pki/private/kubelet/*")
|
||||
}
|
||||
|
||||
networkingSpec := &cluster.Spec.Networking
|
||||
|
||||
// @check if kuberoute is enabled and permit access to the private key
|
||||
if networkingSpec.KubeRouter != nil {
|
||||
paths = append(paths, "/pki/private/kube-router/*")
|
||||
}
|
||||
|
||||
// @check if cilium is enabled as the CNI provider and permit access to the cilium etc client TLS certificate by default
|
||||
// As long as the Cilium Etcd cluster exists, we should do this
|
||||
if networkingSpec.Cilium != nil && model.UseCiliumEtcd(cluster) {
|
||||
paths = append(paths, "/pki/private/etcd-client-cilium/*")
|
||||
}
|
||||
}
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
|
@ -780,16 +755,6 @@ func (b *PolicyResource) Open() (io.Reader, error) {
|
|||
return bytes.NewReader([]byte(j)), nil
|
||||
}
|
||||
|
||||
// useBootstrapTokens check if we are using bootstrap tokens - @TODO, i don't like this we should probably pass in
|
||||
// the kops model into the builder rather than duplicating the code. I'll leave for another PR
|
||||
func useBootstrapTokens(cluster *kops.Cluster) bool {
|
||||
if cluster.Spec.KubeAPIServer == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return fi.ValueOf(cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken)
|
||||
}
|
||||
|
||||
func addECRPermissions(p *Policy) {
|
||||
// TODO - I think we can just have GetAuthorizationToken here, as we are not
|
||||
// TODO - making any API calls except for GetAuthorizationToken.
|
||||
|
|
|
|||
|
|
@ -17,9 +17,6 @@ limitations under the License.
|
|||
package model
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kops/pkg/rbac"
|
||||
"k8s.io/kops/pkg/tokens"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/fitasks"
|
||||
|
|
@ -45,42 +42,6 @@ func (b *PKIModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
|
|||
}
|
||||
c.AddTask(defaultCA)
|
||||
|
||||
{
|
||||
// @check if kops-controller bootstrap or bootstrap tokens are enabled. If so, disable the creation of the kubelet certificate - we also
|
||||
// block at the IAM level for AWS cluster for pre-existing clusters.
|
||||
if !b.UseKopsControllerForNodeBootstrap() && !b.UseBootstrapTokens() {
|
||||
c.AddTask(&fitasks.Keypair{
|
||||
Name: fi.PtrTo("kubelet"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "o=" + rbac.NodesGroup + ",cn=kubelet",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if !b.UseKopsControllerForNodeBootstrap() {
|
||||
t := &fitasks.Keypair{
|
||||
Name: fi.PtrTo("kube-proxy"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=" + rbac.KubeProxy,
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
if b.KopsModelContext.Cluster.Spec.Networking.KubeRouter != nil && !b.UseKopsControllerForNodeBootstrap() {
|
||||
t := &fitasks.Keypair{
|
||||
Name: fi.PtrTo("kube-router"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=" + rbac.KubeRouter,
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
}
|
||||
c.AddTask(t)
|
||||
}
|
||||
|
||||
{
|
||||
aggregatorCA := &fitasks.Keypair{
|
||||
Name: fi.PtrTo("apiserver-aggregator-ca"),
|
||||
|
|
@ -102,42 +63,6 @@ func (b *PKIModelBuilder) Build(c *fi.CloudupModelBuilderContext) error {
|
|||
c.AddTask(serviceAccount)
|
||||
}
|
||||
|
||||
// @TODO this is VERY presumptuous, i'm going on the basis we can make it configurable in the future.
|
||||
// But I'm conscious not to do too much work on bootstrap tokens as it might overlay further down the
|
||||
// line with the machines api
|
||||
if b.UseBootstrapTokens() {
|
||||
serviceName := "node-authorizer-internal"
|
||||
|
||||
alternateNames := []string{
|
||||
"127.0.0.1",
|
||||
"localhost",
|
||||
serviceName,
|
||||
strings.Join([]string{serviceName, b.Cluster.Name}, "."),
|
||||
}
|
||||
if b.Cluster.Spec.DNSZone != "" {
|
||||
alternateNames = append(alternateNames, strings.Join([]string{serviceName, b.Cluster.Spec.DNSZone}, "."))
|
||||
}
|
||||
|
||||
// @note: the certificate used by the node authorizers
|
||||
c.AddTask(&fitasks.Keypair{
|
||||
Name: fi.PtrTo("node-authorizer"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=node-authorizaer",
|
||||
Type: "server",
|
||||
AlternateNames: alternateNames,
|
||||
Signer: defaultCA,
|
||||
})
|
||||
|
||||
// @note: we use this for mutual tls between node and authorizer
|
||||
c.AddTask(&fitasks.Keypair{
|
||||
Name: fi.PtrTo("node-authorizer-client"),
|
||||
Lifecycle: b.Lifecycle,
|
||||
Subject: "cn=node-authorizer-client",
|
||||
Type: "client",
|
||||
Signer: defaultCA,
|
||||
})
|
||||
}
|
||||
|
||||
// Create auth tokens (though this is deprecated)
|
||||
for _, x := range tokens.GetKubernetesAuthTokens_Deprecated() {
|
||||
c.AddTask(&fitasks.Secret{Name: fi.PtrTo(x), Lifecycle: b.Lifecycle})
|
||||
|
|
|
|||
|
|
@ -91,13 +91,11 @@ func (b *APILoadBalancerModelBuilder) Build(c *fi.CloudupModelBuilderContext) er
|
|||
// if we're not going to use an alias for it
|
||||
loadBalancer.ForAPIServer = true
|
||||
|
||||
if b.Cluster.UsesNoneDNS() || b.UseKopsControllerForNodeBootstrap() {
|
||||
lbBackendKopsController, lbFrontendKopsController := createLbBackendAndFrontend("kops-controller", wellknownports.KopsControllerPort, zone, loadBalancer)
|
||||
lbBackendKopsController.Lifecycle = b.Lifecycle
|
||||
c.AddTask(lbBackendKopsController)
|
||||
lbFrontendKopsController.Lifecycle = b.Lifecycle
|
||||
c.AddTask(lbFrontendKopsController)
|
||||
}
|
||||
lbBackendKopsController, lbFrontendKopsController := createLbBackendAndFrontend("kops-controller", wellknownports.KopsControllerPort, zone, loadBalancer)
|
||||
lbBackendKopsController.Lifecycle = b.Lifecycle
|
||||
c.AddTask(lbBackendKopsController)
|
||||
lbFrontendKopsController.Lifecycle = b.Lifecycle
|
||||
c.AddTask(lbFrontendKopsController)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -32,10 +32,8 @@ spec:
|
|||
k8s-addon: kops-controller.addons.k8s.io
|
||||
k8s-app: kops-controller
|
||||
version: v{{ KopsVersion }}
|
||||
{{ if UseKopsControllerForNodeBootstrap }}
|
||||
annotations:
|
||||
dns.alpha.kubernetes.io/internal: kops-controller.internal.{{ ClusterName }}
|
||||
{{ end }}
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ spec:
|
|||
{{ else }}
|
||||
- --cert-dir=/tmp
|
||||
{{ end }}
|
||||
{{ if or (not UseKopsControllerForNodeBootstrap) (WithDefaultBool .MetricsServer.Insecure true) }}
|
||||
{{ if WithDefaultBool .MetricsServer.Insecure true }}
|
||||
- --kubelet-insecure-tls
|
||||
{{ end }}
|
||||
image: {{ or .MetricsServer.Image "registry.k8s.io/metrics-server/metrics-server:v0.6.2" }}
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
# Source: https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/rbac/kubelet-binding.yaml
|
||||
# The GKE environments don't have kubelets with certificates that
|
||||
# identify the system:nodes group. They use the kubelet identity
|
||||
# TODO: remove this once new nodes are granted individual identities and the
|
||||
# NodeAuthorizer is enabled.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubelet-cluster-admin
|
||||
labels:
|
||||
k8s-addon: rbac.addons.k8s.io
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:node
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: kubelet
|
||||
|
|
@ -1355,7 +1355,7 @@ func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, apiserverAddit
|
|||
return nil, nil, err
|
||||
}
|
||||
if keysets["etcd-clients-ca-cilium"] != nil {
|
||||
if err := loadCertificates(keysets, "etcd-clients-ca-cilium", config, hasAPIServer || apiModel.UseKopsControllerForNodeBootstrap(n.cluster.Spec.GetCloudProvider())); err != nil {
|
||||
if err := loadCertificates(keysets, "etcd-clients-ca-cilium", config, true); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -405,37 +405,6 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.CloudupModelBuilderContext)
|
|||
}
|
||||
}
|
||||
|
||||
// @check if bootstrap tokens are enabled an if so we can forgo applying
|
||||
// this manifest. For clusters whom are upgrading from RBAC to Node,RBAC the clusterrolebinding
|
||||
// will remain and have to be deleted manually once all the nodes have been upgraded.
|
||||
enableRBACAddon := true
|
||||
if b.UseKopsControllerForNodeBootstrap() {
|
||||
enableRBACAddon = false
|
||||
}
|
||||
if b.Cluster.Spec.KubeAPIServer != nil {
|
||||
if b.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken != nil && *b.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken {
|
||||
enableRBACAddon = false
|
||||
}
|
||||
}
|
||||
|
||||
if enableRBACAddon {
|
||||
{
|
||||
key := "rbac.addons.k8s.io"
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.8.yaml"
|
||||
id := "k8s-1.8"
|
||||
|
||||
addons.Add(&channelsapi.AddonSpec{
|
||||
Name: fi.PtrTo(key),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.PtrTo(location),
|
||||
Id: id,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// Adding the kubelet-api-admin binding: this is required when switching to webhook authorization on the kubelet
|
||||
// docs: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#other-component-roles
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ import (
|
|||
"k8s.io/kops/dnsprovider/pkg/dnsprovider"
|
||||
"k8s.io/kops/dnsprovider/pkg/dnsprovider/rrstype"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
apimodel "k8s.io/kops/pkg/apis/kops/model"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
)
|
||||
|
||||
|
|
@ -276,13 +275,11 @@ func buildPrecreateDNSHostnames(cluster *kops.Cluster) []recordKey {
|
|||
})
|
||||
}
|
||||
|
||||
if apimodel.UseKopsControllerForNodeBootstrap(cluster.Spec.GetCloudProvider()) {
|
||||
name := "kops-controller.internal." + cluster.ObjectMeta.Name
|
||||
recordKeys = append(recordKeys, recordKey{
|
||||
hostname: name,
|
||||
rrsType: internalType,
|
||||
})
|
||||
}
|
||||
name := "kops-controller.internal." + cluster.ObjectMeta.Name
|
||||
recordKeys = append(recordKeys, recordKey{
|
||||
hostname: name,
|
||||
rrsType: internalType,
|
||||
})
|
||||
|
||||
return recordKeys
|
||||
}
|
||||
|
|
|
|||
|
|
@ -91,7 +91,6 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
|
|||
dest["KubeObjectToApplyYAML"] = kubemanifest.KubeObjectToApplyYAML
|
||||
|
||||
dest["SharedVPC"] = tf.SharedVPC
|
||||
dest["UseBootstrapTokens"] = tf.UseBootstrapTokens
|
||||
// Remember that we may be on a different arch from the target. Hard-code for now.
|
||||
dest["replace"] = func(s, find, replace string) string {
|
||||
return strings.Replace(s, find, replace, -1)
|
||||
|
|
@ -171,9 +170,6 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
|
|||
dest["ProxyEnv"] = tf.ProxyEnv
|
||||
|
||||
dest["KopsSystemEnv"] = tf.KopsSystemEnv
|
||||
dest["UseKopsControllerForNodeBootstrap"] = func() bool {
|
||||
return tf.UseKopsControllerForNodeBootstrap()
|
||||
}
|
||||
|
||||
dest["DO_TOKEN"] = func() string {
|
||||
return os.Getenv("DIGITALOCEAN_ACCESS_TOKEN")
|
||||
|
|
@ -658,7 +654,7 @@ func (tf *TemplateFunctions) KopsControllerConfig() (string, error) {
|
|||
config.CacheNodeidentityInfo = true
|
||||
}
|
||||
|
||||
if tf.UseKopsControllerForNodeBootstrap() {
|
||||
{
|
||||
certNames := []string{"kubelet", "kubelet-server"}
|
||||
signingCAs := []string{fi.CertificateIDCA}
|
||||
if apiModel.UseCiliumEtcd(cluster) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue