mirror of https://github.com/kubernetes/kops.git
Use NodeupConfig for ContainerRuntime
This commit is contained in:
parent
991ab04201
commit
25a897b691
|
@ -57,7 +57,7 @@ func (b *ContainerdBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
|||
case distributions.DistributionFlatcar:
|
||||
klog.Infof("Detected Flatcar; won't install containerd")
|
||||
installContainerd = false
|
||||
if b.Cluster.Spec.ContainerRuntime == "containerd" {
|
||||
if b.NodeupConfig.ContainerRuntime == "containerd" {
|
||||
b.buildSystemdServiceOverrideFlatcar(c)
|
||||
}
|
||||
case distributions.DistributionContainerOS:
|
||||
|
@ -66,7 +66,7 @@ func (b *ContainerdBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
|||
b.buildSystemdServiceOverrideContainerOS(c)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.ContainerRuntime == "containerd" {
|
||||
if b.NodeupConfig.ContainerRuntime == "containerd" {
|
||||
// Using containerd with Kubenet requires special configuration.
|
||||
// This is a temporary backwards-compatible solution for kubenet users and will be deprecated when Kubenet is deprecated:
|
||||
// https://github.com/containerd/containerd/blob/master/docs/cri/config.md#cni-config-template
|
||||
|
@ -106,7 +106,7 @@ func (b *ContainerdBuilder) installContainerd(c *fi.NodeupModelBuilderContext) e
|
|||
}
|
||||
|
||||
// Add binaries from assets
|
||||
if b.Cluster.Spec.ContainerRuntime == "containerd" {
|
||||
if b.NodeupConfig.ContainerRuntime == "containerd" {
|
||||
// Add containerd binaries from containerd release package
|
||||
f := b.Assets.FindMatches(regexp.MustCompile(`^bin/(containerd|ctr)`))
|
||||
if len(f) == 0 {
|
||||
|
@ -161,7 +161,7 @@ func (b *ContainerdBuilder) installContainerd(c *fi.NodeupModelBuilderContext) e
|
|||
}
|
||||
|
||||
var containerRuntimeVersion string
|
||||
if b.Cluster.Spec.ContainerRuntime == "containerd" {
|
||||
if b.NodeupConfig.ContainerRuntime == "containerd" {
|
||||
if b.Cluster.Spec.Containerd != nil {
|
||||
containerRuntimeVersion = fi.ValueOf(b.NodeupConfig.ContainerdConfig.Version)
|
||||
} else {
|
||||
|
@ -207,7 +207,7 @@ func (b *ContainerdBuilder) buildSystemdService(sv semver.Version) *nodetasks.Se
|
|||
manifest.Set("Service", "ExecStart", "/usr/bin/containerd -c "+b.containerdConfigFilePath()+" \"$CONTAINERD_OPTS\"")
|
||||
|
||||
// notify the daemon's readiness to systemd
|
||||
if (b.Cluster.Spec.ContainerRuntime == "containerd" && sv.GTE(semver.MustParse("1.3.4"))) || sv.GTE(semver.MustParse("19.3.13")) {
|
||||
if (b.NodeupConfig.ContainerRuntime == "containerd" && sv.GTE(semver.MustParse("1.3.4"))) || sv.GTE(semver.MustParse("19.3.13")) {
|
||||
manifest.Set("Service", "Type", "notify")
|
||||
}
|
||||
|
||||
|
@ -476,7 +476,7 @@ func (b *ContainerdBuilder) buildCNIConfigTemplateFile(c *fi.NodeupModelBuilderC
|
|||
}
|
||||
|
||||
func (b *ContainerdBuilder) buildContainerdConfig() (string, error) {
|
||||
if b.Cluster.Spec.ContainerRuntime != "containerd" {
|
||||
if b.NodeupConfig.ContainerRuntime != "containerd" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
|
|
|
@ -197,6 +197,7 @@ func TestContainerdConfig(t *testing.T) {
|
|||
NodeupModelContext: &NodeupModelContext{
|
||||
Cluster: cluster,
|
||||
NodeupConfig: &nodeup.Config{
|
||||
ContainerRuntime: "containerd",
|
||||
ContainerdConfig: &kops.ContainerdConfig{},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -56,7 +56,7 @@ func (b *DockerBuilder) dockerVersion() (string, error) {
|
|||
|
||||
// Build is responsible for configuring the docker daemon
|
||||
func (b *DockerBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
||||
if b.Cluster.Spec.ContainerRuntime != "docker" {
|
||||
if b.NodeupConfig.ContainerRuntime != "docker" {
|
||||
return nil
|
||||
}
|
||||
if b.skipInstall() {
|
||||
|
|
|
@ -124,7 +124,7 @@ func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*no
|
|||
case nil:
|
||||
unit.SetSection("Service", hook.Manifest)
|
||||
default:
|
||||
switch h.Cluster.Spec.ContainerRuntime {
|
||||
switch h.NodeupConfig.ContainerRuntime {
|
||||
case "containerd":
|
||||
if err := h.buildContainerdService(unit, hook, name); err != nil {
|
||||
return nil, err
|
||||
|
@ -134,7 +134,7 @@ func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*no
|
|||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown container runtime %q", h.Cluster.Spec.ContainerRuntime)
|
||||
return nil, fmt.Errorf("unknown container runtime %q", h.NodeupConfig.ContainerRuntime)
|
||||
}
|
||||
}
|
||||
definition = s(unit.Render())
|
||||
|
|
|
@ -161,7 +161,7 @@ func (b *KubeletBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if kubeletConfig.CgroupDriver == "systemd" && b.Cluster.Spec.ContainerRuntime == "containerd" {
|
||||
if kubeletConfig.CgroupDriver == "systemd" && b.NodeupConfig.ContainerRuntime == "containerd" {
|
||||
|
||||
{
|
||||
cgroup := kubeletConfig.KubeletCgroups
|
||||
|
@ -304,7 +304,7 @@ func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.Kubelet
|
|||
}
|
||||
|
||||
// Add container runtime spcific flags
|
||||
switch b.Cluster.Spec.ContainerRuntime {
|
||||
switch b.NodeupConfig.ContainerRuntime {
|
||||
case "docker":
|
||||
if b.IsKubernetesLT("1.24") {
|
||||
flags += " --container-runtime=docker"
|
||||
|
@ -354,13 +354,13 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
|
|||
manifest := &systemd.Manifest{}
|
||||
manifest.Set("Unit", "Description", "Kubernetes Kubelet Server")
|
||||
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kubernetes")
|
||||
switch b.Cluster.Spec.ContainerRuntime {
|
||||
switch b.NodeupConfig.ContainerRuntime {
|
||||
case "docker":
|
||||
manifest.Set("Unit", "After", "docker.service")
|
||||
case "containerd":
|
||||
manifest.Set("Unit", "After", "containerd.service")
|
||||
default:
|
||||
klog.Warningf("unknown container runtime %q", b.Cluster.Spec.ContainerRuntime)
|
||||
klog.Warningf("unknown container runtime %q", b.NodeupConfig.ContainerRuntime)
|
||||
}
|
||||
|
||||
manifest.Set("Service", "EnvironmentFile", "/etc/sysconfig/kubelet")
|
||||
|
@ -382,7 +382,7 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
|
|||
|
||||
manifest.Set("Install", "WantedBy", "multi-user.target")
|
||||
|
||||
if b.NodeupConfig.KubeletConfig.CgroupDriver == "systemd" && b.Cluster.Spec.ContainerRuntime == "containerd" {
|
||||
if b.NodeupConfig.KubeletConfig.CgroupDriver == "systemd" && b.NodeupConfig.ContainerRuntime == "containerd" {
|
||||
cgroup := b.NodeupConfig.KubeletConfig.KubeletCgroups
|
||||
if cgroup != "" {
|
||||
manifest.Set("Service", "Slice", strings.Trim(cgroup, "/")+".slice")
|
||||
|
|
|
@ -39,7 +39,7 @@ func (b *WarmPoolBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
|||
for _, image := range b.NodeupConfig.WarmPoolImages {
|
||||
c.AddTask(&nodetasks.PullImageTask{
|
||||
Name: image,
|
||||
Runtime: b.Cluster.Spec.ContainerRuntime,
|
||||
Runtime: b.NodeupConfig.ContainerRuntime,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,6 +69,8 @@ type Config struct {
|
|||
FileAssets []kops.FileAssetSpec `json:",omitempty"`
|
||||
// Hooks are for custom actions, for example on first installation.
|
||||
Hooks [][]kops.HookSpec
|
||||
// ContainerRuntime is the container runtime to use for Kubernetes.
|
||||
ContainerRuntime string
|
||||
// ContainerdConfig config holds the configuration for containerd
|
||||
ContainerdConfig *kops.ContainerdConfig `json:"containerdConfig,omitempty"`
|
||||
|
||||
|
@ -168,6 +170,7 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi
|
|||
VolumeMounts: instanceGroup.Spec.VolumeMounts,
|
||||
FileAssets: append(filterFileAssets(instanceGroup.Spec.FileAssets, role), filterFileAssets(cluster.Spec.FileAssets, role)...),
|
||||
Hooks: [][]kops.HookSpec{igHooks, clusterHooks},
|
||||
ContainerRuntime: cluster.Spec.ContainerRuntime,
|
||||
}
|
||||
|
||||
bootConfig := BootConfig{
|
||||
|
|
|
@ -378,7 +378,6 @@ func (b *BootstrapScript) Run(c *fi.CloudupContext) error {
|
|||
|
||||
spec := make(map[string]interface{})
|
||||
spec["cloudConfig"] = cs.CloudConfig
|
||||
spec["containerRuntime"] = cs.ContainerRuntime
|
||||
spec["containerd"] = cs.Containerd
|
||||
spec["docker"] = cs.Docker
|
||||
spec["kubeProxy"] = cs.KubeProxy
|
||||
|
|
|
@ -354,7 +354,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
taskMap["LoadImage."+strconv.Itoa(i)] = &nodetasks.LoadImageTask{
|
||||
Sources: image.Sources,
|
||||
Hash: image.Hash,
|
||||
Runtime: c.cluster.Spec.ContainerRuntime,
|
||||
Runtime: nodeupConfig.ContainerRuntime,
|
||||
}
|
||||
}
|
||||
// Protokube load image task is in ProtokubeBuilder
|
||||
|
@ -466,7 +466,7 @@ func evaluateSpec(c *NodeUpCommand, nodeupConfig *nodeup.Config, cloudProvider a
|
|||
return err
|
||||
}
|
||||
|
||||
if c.cluster.Spec.ContainerRuntime == "docker" {
|
||||
if nodeupConfig.ContainerRuntime == "docker" {
|
||||
err = evaluateDockerSpecStorage(c.cluster.Spec.Docker)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in New Issue