Update nodes with an APIServer when APIServer spec changes

This commit is contained in:
John Gardiner Myers 2021-06-26 12:27:20 -07:00
parent 22c11c10f1
commit 1312163edd
17 changed files with 149 additions and 56 deletions

View File

@ -128,9 +128,20 @@ func TestContainerdBuilder_BuildFlags(t *testing.T) {
}
func runContainerdBuilderTest(t *testing.T, key string, distro distributions.Distribution) {
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.18.0")
h.SetupMockAWS()
basedir := path.Join("tests/containerdbuilder/", key)
nodeUpModelContext, err := BuildNodeupModelContext(basedir)
model, err := testutils.LoadModel(basedir)
if err != nil {
t.Fatal(err)
}
nodeUpModelContext, err := BuildNodeupModelContext(model)
if err != nil {
t.Fatalf("error parsing cluster yaml %q: %v", basedir, err)
return
@ -148,6 +159,10 @@ func runContainerdBuilderTest(t *testing.T, key string, distro distributions.Dis
nodeUpModelContext.Assets.AddForTest("ctr", "usr/local/bin/ctr", "testing containerd content")
nodeUpModelContext.Assets.AddForTest("runc", "usr/local/sbin/runc", "testing containerd content")
if err := nodeUpModelContext.Init(); err != nil {
t.Fatalf("error from nodeupModelContext.Init(): %v", err)
return
}
context := &fi.ModelBuilderContext{
Tasks: make(map[string]fi.Task),
}

View File

@ -388,7 +388,7 @@ func (c *NodeupModelContext) UsesSecondaryIP() bool {
// UseBootstrapTokens checks if we are using bootstrap tokens
func (c *NodeupModelContext) UseBootstrapTokens() bool {
if c.HasAPIServer {
return fi.BoolValue(c.Cluster.Spec.KubeAPIServer.EnableBootstrapAuthToken)
return fi.BoolValue(c.NodeupConfig.APIServerConfig.KubeAPIServer.EnableBootstrapAuthToken)
}
return c.Cluster.Spec.Kubelet != nil && c.Cluster.Spec.Kubelet.BootstrapKubeconfig != ""

View File

@ -120,9 +120,20 @@ func TestDockerBuilder_BuildFlags(t *testing.T) {
}
func runDockerBuilderTest(t *testing.T, key string) {
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.18.0")
h.SetupMockAWS()
basedir := path.Join("tests/dockerbuilder/", key)
nodeUpModelContext, err := BuildNodeupModelContext(basedir)
model, err := testutils.LoadModel(basedir)
if err != nil {
t.Fatal(err)
}
nodeUpModelContext, err := BuildNodeupModelContext(model)
if err != nil {
t.Fatalf("error parsing cluster yaml %q: %v", basedir, err)
return
@ -163,6 +174,9 @@ func runDockerBuilderTest(t *testing.T, key string) {
}
}
if err := nodeUpModelContext.Init(); err != nil {
t.Fatalf("error from nodeUpModelContext.Init(): %v", err)
}
context := &fi.ModelBuilderContext{
Tasks: make(map[string]fi.Task),
}

View File

@ -55,7 +55,12 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
if err := b.writeAuthenticationConfig(c); err != nil {
var kubeAPIServer kops.KubeAPIServerConfig
if b.NodeupConfig.APIServerConfig.KubeAPIServer != nil {
kubeAPIServer = *b.NodeupConfig.APIServerConfig.KubeAPIServer
}
if err := b.writeAuthenticationConfig(c, &kubeAPIServer); err != nil {
return err
}
@ -63,7 +68,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
if *b.Cluster.Spec.EncryptionConfig {
encryptionConfigPath := fi.String(filepath.Join(b.PathSrvKubernetes(), "encryptionconfig.yaml"))
b.Cluster.Spec.KubeAPIServer.EncryptionProviderConfig = encryptionConfigPath
kubeAPIServer.EncryptionProviderConfig = encryptionConfigPath
key := "encryptionconfig"
encryptioncfg, err := b.SecretStore.Secret(key)
@ -104,7 +109,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
})
}
{
pod, err := b.buildPod()
pod, err := b.buildPod(&kubeAPIServer)
if err != nil {
return fmt.Errorf("error building kube-apiserver manifest: %v", err)
}
@ -151,7 +156,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderContext) error {
func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderContext, kubeAPIServer *kops.KubeAPIServerConfig) error {
if b.Cluster.Spec.Authentication == nil || b.Cluster.Spec.Authentication.IsEmpty() {
return nil
}
@ -198,7 +203,7 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
if b.Cluster.Spec.Authentication.Aws != nil {
id := "aws-iam-authenticator"
b.Cluster.Spec.KubeAPIServer.AuthenticationTokenWebhookConfigFile = fi.String(PathAuthnConfig)
kubeAPIServer.AuthenticationTokenWebhookConfigFile = fi.String(PathAuthnConfig)
{
caCertificate, _, err := b.NodeupModelContext.KeyStore.FindPrimaryKeypair(fi.CertificateIDCA)
@ -302,9 +307,7 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
}
// buildPod is responsible for generating the kube-apiserver pod and thus manifest file
func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
kubeAPIServer := b.Cluster.Spec.KubeAPIServer
func (b *KubeAPIServerBuilder) buildPod(kubeAPIServer *kops.KubeAPIServerConfig) (*v1.Pod, error) {
kubeAPIServer.ServiceAccountKeyFile = append(kubeAPIServer.ServiceAccountKeyFile, filepath.Join(b.PathSrvKubernetes(), "service-account.pub"))
// Set the signing key if we're using Service Account Token VolumeProjection
@ -393,15 +396,14 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
// @note: note sure if this is the best place to put it, I could place into the validation.go which has the benefit of
// fixing up the manifests itself, but that feels VERY hacky
// @note: it's fine to use AdmissionControl here and it's not populated by the model, thus the only data could have come from the cluster spec
c := b.Cluster.Spec.KubeAPIServer
if len(c.AdmissionControl) > 0 {
c.EnableAdmissionPlugins = append([]string(nil), c.AdmissionControl...)
c.AdmissionControl = []string{}
if len(kubeAPIServer.AdmissionControl) > 0 {
kubeAPIServer.EnableAdmissionPlugins = append([]string(nil), kubeAPIServer.AdmissionControl...)
kubeAPIServer.AdmissionControl = []string{}
}
}
// build the kube-apiserver flags for the service
flags, err := flagbuilder.BuildFlagsList(b.Cluster.Spec.KubeAPIServer)
flags, err := flagbuilder.BuildFlagsList(kubeAPIServer)
if err != nil {
return nil, fmt.Errorf("error building kube-apiserver flags: %v", err)
}
@ -501,8 +503,8 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
Ports: []v1.ContainerPort{
{
Name: "https",
ContainerPort: b.Cluster.Spec.KubeAPIServer.SecurePort,
HostPort: b.Cluster.Spec.KubeAPIServer.SecurePort,
ContainerPort: kubeAPIServer.SecurePort,
HostPort: kubeAPIServer.SecurePort,
},
},
Resources: v1.ResourceRequirements{
@ -563,7 +565,7 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
addHostPathMapping(pod, container, "srvsshproxy", pathSrvSshproxy)
}
auditLogPath := b.Cluster.Spec.KubeAPIServer.AuditLogPath
auditLogPath := kubeAPIServer.AuditLogPath
// Don't mount a volume if the mount path is set to '-' for stdout logging
// See https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-backends
if auditLogPath != nil && *auditLogPath != "-" {

View File

@ -149,12 +149,24 @@ func stringSlicesEqual(exp, other []string) bool {
}
func Test_RunKubeletBuilder(t *testing.T) {
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.18.0")
h.SetupMockAWS()
basedir := "tests/kubelet/featuregates"
context := &fi.ModelBuilderContext{
Tasks: make(map[string]fi.Task),
}
nodeUpModelContext, err := BuildNodeupModelContext(basedir)
model, err := testutils.LoadModel(basedir)
if err != nil {
t.Fatal(err)
}
nodeUpModelContext, err := BuildNodeupModelContext(model)
if err != nil {
t.Fatalf("error loading model %q: %v", basedir, err)
return
@ -166,12 +178,24 @@ func Test_RunKubeletBuilder(t *testing.T) {
}
func Test_RunKubeletBuilderWarmPool(t *testing.T) {
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.18.0")
h.SetupMockAWS()
basedir := "tests/kubelet/warmpool"
context := &fi.ModelBuilderContext{
Tasks: make(map[string]fi.Task),
}
nodeUpModelContext, err := BuildNodeupModelContext(basedir)
model, err := testutils.LoadModel(basedir)
if err != nil {
t.Fatal(err)
}
nodeUpModelContext, err := BuildNodeupModelContext(model)
if err != nil {
t.Fatalf("error loading model %q: %v", basedir, err)
return
@ -186,6 +210,10 @@ func Test_RunKubeletBuilderWarmPool(t *testing.T) {
}
func runKubeletBuilder(t *testing.T, context *fi.ModelBuilderContext, nodeupModelContext *NodeupModelContext) {
if err := nodeupModelContext.Init(); err != nil {
t.Fatalf("error from nodeupModelContext.Init(): %v", err)
}
builder := KubeletBuilder{NodeupModelContext: nodeupModelContext}
kubeletConfig, err := builder.buildKubeletConfig()
@ -221,18 +249,12 @@ func runKubeletBuilder(t *testing.T, context *fi.ModelBuilderContext, nodeupMode
}
func BuildNodeupModelContext(basedir string) (*NodeupModelContext, error) {
model, err := testutils.LoadModel(basedir)
if err != nil {
return nil, err
}
func BuildNodeupModelContext(model *testutils.Model) (*NodeupModelContext, error) {
if model.Cluster == nil {
return nil, fmt.Errorf("no cluster found in %s", basedir)
return nil, fmt.Errorf("no cluster found in model")
}
nodeUpModelContext := &NodeupModelContext{
Cluster: model.Cluster,
nodeupModelContext := &NodeupModelContext{
Architecture: "amd64",
BootConfig: &nodeup.BootConfig{},
NodeupConfig: &nodeup.Config{
@ -241,22 +263,34 @@ func BuildNodeupModelContext(basedir string) (*NodeupModelContext, error) {
},
}
// Populate the cluster
cloud, err := cloudup.BuildCloud(model.Cluster)
if err != nil {
return nil, fmt.Errorf("error from BuildCloud: %v", err)
}
err = cloudup.PerformAssignments(model.Cluster, cloud)
if err != nil {
return nil, fmt.Errorf("error from PerformAssignments: %v", err)
}
nodeupModelContext.Cluster, err = mockedPopulateClusterSpec(model.Cluster, cloud)
if err != nil {
return nil, fmt.Errorf("unexpected error from mockedPopulateClusterSpec: %v", err)
}
if len(model.InstanceGroups) == 0 {
// We tolerate this - not all tests need an instance group
} else if len(model.InstanceGroups) == 1 {
nodeUpModelContext.NodeupConfig, nodeUpModelContext.BootConfig = nodeup.NewConfig(model.Cluster, model.InstanceGroups[0])
nodeupModelContext.NodeupConfig, nodeupModelContext.BootConfig = nodeup.NewConfig(nodeupModelContext.Cluster, model.InstanceGroups[0])
} else {
return nil, fmt.Errorf("unexpected number of instance groups in %s, found %d", basedir, len(model.InstanceGroups))
return nil, fmt.Errorf("unexpected number of instance groups: found %d", len(model.InstanceGroups))
}
nodeUpModelContext.NodeupConfig.CAs["ca"] = dummyCertificate + nextCertificate
nodeUpModelContext.NodeupConfig.KeypairIDs["ca"] = "3"
nodeupModelContext.NodeupConfig.CAs["ca"] = dummyCertificate + nextCertificate
nodeupModelContext.NodeupConfig.KeypairIDs["ca"] = "3"
if err := nodeUpModelContext.Init(); err != nil {
return nil, err
}
return nodeUpModelContext, nil
return nodeupModelContext, nil
}
func mockedPopulateClusterSpec(c *kops.Cluster, cloud fi.Cloud) (*kops.Cluster, error) {
@ -339,9 +373,10 @@ func RunGoldenTest(t *testing.T, basedir string, key string, builder func(*Nodeu
context := &fi.ModelBuilderContext{
Tasks: make(map[string]fi.Task),
}
nodeupModelContext, err := BuildNodeupModelContext(basedir)
model, err := testutils.LoadModel(basedir)
if err != nil {
t.Fatalf("error loading model %q: %v", basedir, err)
t.Fatal(err)
}
keystore := &fakeCAStore{}
@ -362,25 +397,15 @@ func RunGoldenTest(t *testing.T, basedir string, key string, builder func(*Nodeu
"kube-scheduler": mustParseCertificate(dummyCertificate),
}
nodeupModelContext.KeyStore = keystore
// Populate the cluster
cloud, err := cloudup.BuildCloud(nodeupModelContext.Cluster)
nodeupModelContext, err := BuildNodeupModelContext(model)
if err != nil {
t.Fatalf("error from BuildCloud: %v", err)
t.Fatalf("error loading model %q: %v", basedir, err)
}
{
err := cloudup.PerformAssignments(nodeupModelContext.Cluster, cloud)
if err != nil {
t.Fatalf("error from PerformAssignments: %v", err)
}
nodeupModelContext.KeyStore = keystore
full, err := mockedPopulateClusterSpec(nodeupModelContext.Cluster, cloud)
if err != nil {
t.Fatalf("unexpected error from mockedPopulateClusterSpec: %v", err)
}
nodeupModelContext.Cluster = full
if err := nodeupModelContext.Init(); err != nil {
t.Fatalf("error from nodeupModelContext.Init(): %v", err)
}
if err := builder(nodeupModelContext, context); err != nil {

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.19.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.19.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.19.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.19.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.16.3
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.13.6
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -21,6 +21,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.19.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -21,6 +21,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.4.6
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.17.0
masterInternalName: api.internal.logflags.example.com
masterPublicName: api.logflags.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.14.6
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -20,6 +20,8 @@ spec:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.14.6
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com

View File

@ -73,6 +73,9 @@ type Config struct {
Hooks [][]kops.HookSpec
// ContainerdConfig config holds the configuration for containerd
ContainerdConfig string `json:"containerdConfig,omitempty"`
// APIServerConfig is additional configuration for nodes running an APIServer.
APIServerConfig *APIServerConfig `json:",omitempty"`
}
// BootConfig is the configuration for the nodeup binary that might be too big to fit in userdata.
@ -116,6 +119,12 @@ type StaticManifest struct {
Path string `json:"path,omitempty"`
}
// APIServerConfig is additional configuration for nodes running an APIServer.
type APIServerConfig struct {
// KubeAPIServer is a copy of the KubeAPIServerConfig from the cluster spec.
KubeAPIServer *kops.KubeAPIServerConfig
}
func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Config, *BootConfig) {
role := instanceGroup.Spec.Role
isMaster := role == kops.InstanceGroupRoleMaster
@ -155,6 +164,12 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi
reflectutils.JSONMergeStruct(&config.KubeletConfig, cluster.Spec.Kubelet)
}
if isMaster || role == kops.InstanceGroupRoleAPIServer {
config.APIServerConfig = &APIServerConfig{
KubeAPIServer: cluster.Spec.KubeAPIServer,
}
}
if instanceGroup.Spec.Kubelet != nil {
useSecureKubelet := config.KubeletConfig.AnonymousAuth != nil && !*config.KubeletConfig.AnonymousAuth