diff --git a/nodeup/pkg/model/kube_proxy.go b/nodeup/pkg/model/kube_proxy.go index aca6efd03f..53069d9435 100644 --- a/nodeup/pkg/model/kube_proxy.go +++ b/nodeup/pkg/model/kube_proxy.go @@ -61,6 +61,14 @@ func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error { } } + if b.ConfigurationMode == "Warming" { + pullTask := &nodetasks.PullImageTask{ + Name: kubeProxyImage(b.NodeupModelContext), + Runtime: b.Cluster.Spec.ContainerRuntime, + } + c.AddTask(pullTask) + } + { pod, err := b.buildPod() if err != nil { @@ -185,11 +193,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) { flags = append(flags, `--resource-container=""`) } - image := c.Image - if b.Architecture != architectures.ArchitectureAmd64 { - image = strings.Replace(image, "-amd64", "-"+string(b.Architecture), 1) - } - + image := kubeProxyImage(b.NodeupModelContext) container := &v1.Container{ Name: "kube-proxy", Image: image, @@ -312,3 +316,11 @@ func tolerateMasterTaints() []v1.Toleration { return tolerations } + +func kubeProxyImage(b *NodeupModelContext) string { + image := b.Cluster.Spec.KubeProxy.Image + if b.Architecture != architectures.ArchitectureAmd64 { + image = strings.Replace(image, "-amd64", "-"+string(b.Architecture), 1) + } + return image +} diff --git a/nodeup/pkg/model/kube_proxy_test.go b/nodeup/pkg/model/kube_proxy_test.go index 05118e5545..1b235b2780 100644 --- a/nodeup/pkg/model/kube_proxy_test.go +++ b/nodeup/pkg/model/kube_proxy_test.go @@ -177,3 +177,11 @@ func TestKubeProxyBuilderARM64(t *testing.T) { return builder.Build(target) }) } +func TestKubeProxyBuilderWarmPool(t *testing.T) { + RunGoldenTest(t, "tests/golden/minimal", "warmpool", func(nodeupModelContext *NodeupModelContext, target *fi.ModelBuilderContext) error { + nodeupModelContext.ConfigurationMode = "Warming" + builder := KubeProxyBuilder{NodeupModelContext: nodeupModelContext} + builder.Architecture = architectures.ArchitectureArm64 + return builder.Build(target) + }) +} diff --git a/nodeup/pkg/model/networking/cilium.go b/nodeup/pkg/model/networking/cilium.go index 6289b5d4e6..53d4218d36 100644 --- a/nodeup/pkg/model/networking/cilium.go +++ b/nodeup/pkg/model/networking/cilium.go @@ -36,7 +36,7 @@ var _ fi.ModelBuilder = &CiliumBuilder{} // Build is responsible for configuring the network cni func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error { - networking := b.Cluster.Spec.Networking + cilium := b.Cluster.Spec.Networking.Cilium // As long as the Cilium Etcd cluster exists, we should do this if apiModel.UseCiliumEtcd(b.Cluster) { @@ -45,7 +45,7 @@ func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error { } } - if networking.Cilium == nil { + if cilium == nil { return nil } @@ -53,6 +53,14 @@ func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error { return err } + if b.ConfigurationMode == "Warming" { + image := &nodetasks.PullImageTask{ + Name: "docker.io/cilium/cilium:" + cilium.Version, + Runtime: b.Cluster.Spec.ContainerRuntime, + } + c.AddTask(image) + } + return nil } diff --git a/nodeup/pkg/model/tests/golden/minimal/tasks-warmpool.yaml b/nodeup/pkg/model/tests/golden/minimal/tasks-warmpool.yaml new file mode 100644 index 0000000000..d0c2f4f266 --- /dev/null +++ b/nodeup/pkg/model/tests/golden/minimal/tasks-warmpool.yaml @@ -0,0 +1,143 @@ +contents: | + apiVersion: v1 + kind: Pod + metadata: + annotations: + scheduler.alpha.kubernetes.io/critical-pod: "" + creationTimestamp: null + labels: + k8s-app: kube-proxy + tier: node + name: kube-proxy + namespace: kube-system + spec: + containers: + - args: + - --cluster-cidr=100.96.0.0/11 + - --conntrack-max-per-core=131072 + - --hostname-override=@aws + - --kubeconfig=/var/lib/kube-proxy/kubeconfig + - --master=https://127.0.0.1 + - --oom-score-adj=-998 + - --v=2 + - --logtostderr=false + - --alsologtostderr + - --log-file=/var/log/kube-proxy.log + command: + - /usr/local/bin/kube-proxy + image: k8s.gcr.io/kube-proxy:v1.18.0 + name: kube-proxy + resources: + requests: + cpu: 100m + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log/kube-proxy.log + name: logfile + - mountPath: /var/lib/kube-proxy/kubeconfig + name: kubeconfig + readOnly: true + - mountPath: /lib/modules + name: modules + readOnly: true + - mountPath: /etc/ssl/certs + name: ssl-certs-hosts + readOnly: true + - mountPath: /run/xtables.lock + name: iptableslock + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - key: CriticalAddonsOnly + operator: Exists + volumes: + - hostPath: + path: /var/log/kube-proxy.log + name: logfile + - hostPath: + path: /var/lib/kube-proxy/kubeconfig + name: kubeconfig + - hostPath: + path: /lib/modules + name: modules + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-hosts + - hostPath: + path: /run/xtables.lock + type: FileOrCreate + name: iptableslock + status: {} +path: /etc/kubernetes/manifests/kube-proxy.manifest +type: file +--- +beforeServices: +- kubelet.service +contents: + task: + CA: + task: + Name: kube-proxy + signer: ca + subject: + CommonName: system:kube-proxy + type: client + Cert: + task: + Name: kube-proxy + signer: ca + subject: + CommonName: system:kube-proxy + type: client + Key: + task: + Name: kube-proxy + signer: ca + subject: + CommonName: system:kube-proxy + type: client + Name: kube-proxy + ServerURL: https://127.0.0.1 +mode: "0400" +path: /var/lib/kube-proxy/kubeconfig +type: file +--- +contents: "" +ifNotExists: true +mode: "0400" +path: /var/log/kube-proxy.log +type: file +--- +Name: kube-proxy +signer: ca +subject: + CommonName: system:kube-proxy +type: client +--- +CA: + task: + Name: kube-proxy + signer: ca + subject: + CommonName: system:kube-proxy + type: client +Cert: + task: + Name: kube-proxy + signer: ca + subject: + CommonName: system:kube-proxy + type: client +Key: + task: + Name: kube-proxy + signer: ca + subject: + CommonName: system:kube-proxy + type: client +Name: kube-proxy +ServerURL: https://127.0.0.1 +--- +Name: k8s.gcr.io/kube-proxy:v1.18.0 +Runtime: docker diff --git a/upup/pkg/fi/nodeup/nodetasks/BUILD.bazel b/upup/pkg/fi/nodeup/nodetasks/BUILD.bazel index f21435d268..1daa662d71 100644 --- a/upup/pkg/fi/nodeup/nodetasks/BUILD.bazel +++ b/upup/pkg/fi/nodeup/nodetasks/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "kubeconfig.go", "load_image.go", "package.go", + "pull_image.go", "service.go", "update_packages.go", "user.go", diff --git a/upup/pkg/fi/nodeup/nodetasks/pull_image.go b/upup/pkg/fi/nodeup/nodetasks/pull_image.go new file mode 100644 index 0000000000..a9f5569380 --- /dev/null +++ b/upup/pkg/fi/nodeup/nodetasks/pull_image.go @@ -0,0 +1,100 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodetasks + +import ( + "fmt" + "os/exec" + "strings" + + "k8s.io/klog/v2" + "k8s.io/kops/upup/pkg/fi" + "k8s.io/kops/upup/pkg/fi/nodeup/local" +) + +// PullImageTask is responsible for pulling a docker image +type PullImageTask struct { + Name string + Runtime string +} + +var _ fi.Task = &PullImageTask{} +var _ fi.HasDependencies = &PullImageTask{} + +func (t *PullImageTask) GetDependencies(tasks map[string]fi.Task) []fi.Task { + // LoadImageTask depends on the docker service to ensure we + // sideload images after docker is completely updated and + // configured. + var deps []fi.Task + for _, v := range tasks { + if svc, ok := v.(*Service); ok && svc.Name == containerdService { + deps = append(deps, v) + } + if svc, ok := v.(*Service); ok && svc.Name == dockerService { + deps = append(deps, v) + } + } + return deps +} + +func (e *PullImageTask) Find(c *fi.Context) (*PullImageTask, error) { + klog.Warningf("LoadImageTask checking if image present not yet implemented") + return nil, nil +} + +func (e *PullImageTask) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (t *PullImageTask) GetName() *string { + if t.Name == "" { + return nil + } + return &t.Name +} + +func (*PullImageTask) CheckChanges(a, e, changes *PullImageTask) error { + return nil +} + +func (*PullImageTask) RenderLocal(t *local.LocalTarget, a, e, changes *PullImageTask) error { + runtime := e.Runtime + if runtime != "docker" && runtime != "containerd" { + return fmt.Errorf("no runtime specified") + } + + // Load the container image + var args []string + switch runtime { + case "docker": + args = []string{"docker", "pull", e.Name} + case "containerd": + args = []string{"ctr", "--namespace", "k8s.io", "images", "pull", e.Name} + default: + return fmt.Errorf("unknown container runtime: %s", runtime) + } + human := strings.Join(args, " ") + + klog.Infof("running command %s", human) + cmd := exec.Command(args[0], args[1:]...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error pulling docker image with '%s': %v: %s", human, err, string(output)) + } + + return nil +} diff --git a/upup/pkg/fi/nodeup/nodetasks/service.go b/upup/pkg/fi/nodeup/nodetasks/service.go index 32337780c1..f02770bb4e 100644 --- a/upup/pkg/fi/nodeup/nodetasks/service.go +++ b/upup/pkg/fi/nodeup/nodetasks/service.go @@ -75,7 +75,7 @@ func (p *Service) GetDependencies(tasks map[string]fi.Task) []fi.Task { switch v := v.(type) { case *Package, *UpdatePackages, *UserTask, *GroupTask, *Chattr, *BindMount, *Archive: deps = append(deps, v) - case *Service, *LoadImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig: + case *Service, *LoadImageTask, *PullImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig: // ignore case *File: if len(v.BeforeServices) > 0 {