Merge pull request #11258 from olemarkus/prewarm-cilium

Pre-pull cilium and kube-proxy in warming mode
This commit is contained in:
Kubernetes Prow Robot 2021-04-18 23:48:36 -07:00 committed by GitHub
commit 7dc29de781
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 266 additions and 8 deletions

View File

@ -644,3 +644,13 @@ func (c *NodeupModelContext) CNIBinDir() string {
func (c *NodeupModelContext) CNIConfDir() string {
return "/etc/cni/net.d/"
}
func (c *NodeupModelContext) WarmPullImage(ctx *fi.ModelBuilderContext, imageName string) {
if c.ConfigurationMode == "Warming" {
image := &nodetasks.PullImageTask{
Name: imageName,
Runtime: c.Cluster.Spec.ContainerRuntime,
}
ctx.AddTask(image)
}
}

View File

@ -61,6 +61,8 @@ func (b *KubeProxyBuilder) Build(c *fi.ModelBuilderContext) error {
}
}
b.WarmPullImage(c, kubeProxyImage(b.NodeupModelContext))
{
pod, err := b.buildPod()
if err != nil {
@ -185,11 +187,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
flags = append(flags, `--resource-container=""`)
}
image := c.Image
if b.Architecture != architectures.ArchitectureAmd64 {
image = strings.Replace(image, "-amd64", "-"+string(b.Architecture), 1)
}
image := kubeProxyImage(b.NodeupModelContext)
container := &v1.Container{
Name: "kube-proxy",
Image: image,
@ -312,3 +310,11 @@ func tolerateMasterTaints() []v1.Toleration {
return tolerations
}
func kubeProxyImage(b *NodeupModelContext) string {
image := b.Cluster.Spec.KubeProxy.Image
if b.Architecture != architectures.ArchitectureAmd64 {
image = strings.Replace(image, "-amd64", "-"+string(b.Architecture), 1)
}
return image
}

View File

@ -177,3 +177,11 @@ func TestKubeProxyBuilderARM64(t *testing.T) {
return builder.Build(target)
})
}
func TestKubeProxyBuilderWarmPool(t *testing.T) {
RunGoldenTest(t, "tests/golden/minimal", "warmpool", func(nodeupModelContext *NodeupModelContext, target *fi.ModelBuilderContext) error {
nodeupModelContext.ConfigurationMode = "Warming"
builder := KubeProxyBuilder{NodeupModelContext: nodeupModelContext}
builder.Architecture = architectures.ArchitectureArm64
return builder.Build(target)
})
}

View File

@ -36,7 +36,7 @@ var _ fi.ModelBuilder = &CiliumBuilder{}
// Build is responsible for configuring the network cni
func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error {
networking := b.Cluster.Spec.Networking
cilium := b.Cluster.Spec.Networking.Cilium
// As long as the Cilium Etcd cluster exists, we should do this
if apiModel.UseCiliumEtcd(b.Cluster) {
@ -45,7 +45,7 @@ func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error {
}
}
if networking.Cilium == nil {
if cilium == nil {
return nil
}
@ -53,6 +53,10 @@ func (b *CiliumBuilder) Build(c *fi.ModelBuilderContext) error {
return err
}
image := "docker.io/cilium/cilium:" + cilium.Version
b.WarmPullImage(c, image)
return nil
}

View File

@ -0,0 +1,143 @@
contents: |
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: kube-proxy
tier: node
name: kube-proxy
namespace: kube-system
spec:
containers:
- args:
- --cluster-cidr=100.96.0.0/11
- --conntrack-max-per-core=131072
- --hostname-override=@aws
- --kubeconfig=/var/lib/kube-proxy/kubeconfig
- --master=https://127.0.0.1
- --oom-score-adj=-998
- --v=2
- --logtostderr=false
- --alsologtostderr
- --log-file=/var/log/kube-proxy.log
command:
- /usr/local/bin/kube-proxy
image: k8s.gcr.io/kube-proxy:v1.18.0
name: kube-proxy
resources:
requests:
cpu: 100m
securityContext:
privileged: true
volumeMounts:
- mountPath: /var/log/kube-proxy.log
name: logfile
- mountPath: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
readOnly: true
- mountPath: /lib/modules
name: modules
readOnly: true
- mountPath: /etc/ssl/certs
name: ssl-certs-hosts
readOnly: true
- mountPath: /run/xtables.lock
name: iptableslock
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /var/log/kube-proxy.log
name: logfile
- hostPath:
path: /var/lib/kube-proxy/kubeconfig
name: kubeconfig
- hostPath:
path: /lib/modules
name: modules
- hostPath:
path: /usr/share/ca-certificates
name: ssl-certs-hosts
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: iptableslock
status: {}
path: /etc/kubernetes/manifests/kube-proxy.manifest
type: file
---
beforeServices:
- kubelet.service
contents:
task:
CA:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Cert:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Key:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Name: kube-proxy
ServerURL: https://127.0.0.1
mode: "0400"
path: /var/lib/kube-proxy/kubeconfig
type: file
---
contents: ""
ifNotExists: true
mode: "0400"
path: /var/log/kube-proxy.log
type: file
---
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
---
CA:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Cert:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Key:
task:
Name: kube-proxy
signer: ca
subject:
CommonName: system:kube-proxy
type: client
Name: kube-proxy
ServerURL: https://127.0.0.1
---
Name: k8s.gcr.io/kube-proxy:v1.18.0
Runtime: docker

View File

@ -14,6 +14,7 @@ go_library(
"kubeconfig.go",
"load_image.go",
"package.go",
"pull_image.go",
"service.go",
"update_packages.go",
"user.go",

View File

@ -0,0 +1,86 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodetasks
import (
"fmt"
"os/exec"
"strings"
"k8s.io/klog/v2"
"k8s.io/kops/upup/pkg/fi"
)
// PullImageTask is responsible for pulling a docker image
type PullImageTask struct {
Name string
Runtime string
}
var _ fi.Task = &PullImageTask{}
var _ fi.HasDependencies = &PullImageTask{}
func (t *PullImageTask) GetDependencies(tasks map[string]fi.Task) []fi.Task {
// ImagePullTask depends on the container runtime service to ensure we
// sideload images after the container runtime is completely updated and
// configured.
var deps []fi.Task
for _, v := range tasks {
if svc, ok := v.(*Service); ok && svc.Name == containerdService {
deps = append(deps, v)
}
if svc, ok := v.(*Service); ok && svc.Name == dockerService {
deps = append(deps, v)
}
}
return deps
}
func (t *PullImageTask) GetName() *string {
if t.Name == "" {
return nil
}
return &t.Name
}
func (e *PullImageTask) Run(c *fi.Context) error {
runtime := e.Runtime
if runtime != "docker" && runtime != "containerd" {
return fmt.Errorf("no runtime specified")
}
// Pull the container image
var args []string
switch runtime {
case "docker":
args = []string{"docker", "pull", e.Name}
case "containerd":
args = []string{"ctr", "--namespace", "k8s.io", "images", "pull", e.Name}
default:
return fmt.Errorf("unknown container runtime: %s", runtime)
}
human := strings.Join(args, " ")
klog.Infof("running command %s", human)
cmd := exec.Command(args[0], args[1:]...)
output, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("error pulling docker image with '%s': %v: %s", human, err, string(output))
}
return nil
}

View File

@ -75,7 +75,7 @@ func (p *Service) GetDependencies(tasks map[string]fi.Task) []fi.Task {
switch v := v.(type) {
case *Package, *UpdatePackages, *UserTask, *GroupTask, *Chattr, *BindMount, *Archive:
deps = append(deps, v)
case *Service, *LoadImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig:
case *Service, *LoadImageTask, *PullImageTask, *IssueCert, *BootstrapClientTask, *KubeConfig:
// ignore
case *File:
if len(v.BeforeServices) > 0 {