mirror of https://github.com/kubernetes/kops.git
Support overlay2 in docker
We also have to stop passing the flag on ContainerOS, because it's set in /etc/docker/default.json and it's now an error to pass the flag. That in turn means we move those options to code, which are the last of those legacy config options. (We still have a few tasks declaratively defined though)
This commit is contained in:
parent
36feaeb8fb
commit
ef5936d8b5
|
@ -86,7 +86,7 @@ kops create cluster [flags]
|
|||
--master-tenancy string The tenancy of the master group on AWS. Can either be default or dedicated.
|
||||
--master-volume-size int32 Set instance volume size (in GB) for masters
|
||||
--master-zones strings Zones in which to run masters (must be an odd number)
|
||||
--model string Models to apply (separate multiple models with commas) (default "config,proto,cloudup")
|
||||
--model string Models to apply (separate multiple models with commas) (default "proto,cloudup")
|
||||
--network-cidr string Set to override the default network CIDR
|
||||
--networking string Networking mode to use. kubenet (default), classic, external, kopeio-vxlan (or kopeio), weave, flannel-vxlan (or flannel), flannel-udp, calico, canal, kube-router, romana, amazon-vpc-routed-eni, cilium. (default "kubenet")
|
||||
--node-count int32 Set the number of nodes
|
||||
|
|
|
@ -28,7 +28,7 @@ kops update cluster [flags]
|
|||
--create-kube-config Will control automatically creating the kube config file on your local filesystem (default true)
|
||||
-h, --help help for cluster
|
||||
--lifecycle-overrides strings comma separated list of phase overrides, example: SecurityGroups=Ignore,InternetGateway=ExistsAndWarnIfChanges
|
||||
--model string Models to apply (separate multiple models with commas) (default "config,proto,cloudup")
|
||||
--model string Models to apply (separate multiple models with commas) (default "proto,cloudup")
|
||||
--out string Path to write any local output
|
||||
--phase string Subset of tasks to run: assets, cluster, network, security
|
||||
--ssh-public-key string SSH public key to use (deprecated: use kops create secret instead)
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
|
||||
"k8s.io/kops/nodeup/pkg/distros"
|
||||
"k8s.io/kops/nodeup/pkg/model/resources"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/flagbuilder"
|
||||
"k8s.io/kops/pkg/systemd"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
|
@ -764,7 +765,17 @@ func (b *DockerBuilder) buildContainerOSConfigurationDropIn(c *fi.ModelBuilderCo
|
|||
|
||||
// buildSysconfig is responsible for extracting the docker configuration and writing the sysconfig file
|
||||
func (b *DockerBuilder) buildSysconfig(c *fi.ModelBuilderContext) error {
|
||||
flagsString, err := flagbuilder.BuildFlags(b.Cluster.Spec.Docker)
|
||||
var docker kops.DockerConfig
|
||||
if b.Cluster.Spec.Docker != nil {
|
||||
docker = *b.Cluster.Spec.Docker
|
||||
}
|
||||
|
||||
// ContainerOS now sets the storage flag in /etc/docker/daemon.json, and it is an error to set it twice
|
||||
if b.Distribution == distros.DistributionContainerOS {
|
||||
docker.Storage = nil
|
||||
}
|
||||
|
||||
flagsString, err := flagbuilder.BuildFlags(&docker)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building docker flags: %v", err)
|
||||
}
|
||||
|
|
|
@ -66,6 +66,19 @@ func TestDockerBuilder_BuildFlags(t *testing.T) {
|
|||
},
|
||||
"--log-driver=json-file --log-opt=max-file=5 --log-opt=max-size=10m",
|
||||
},
|
||||
// nil bridge & empty bridge are the same
|
||||
{
|
||||
kops.DockerConfig{Bridge: nil},
|
||||
"",
|
||||
},
|
||||
{
|
||||
kops.DockerConfig{Bridge: fi.String("")},
|
||||
"",
|
||||
},
|
||||
{
|
||||
kops.DockerConfig{Bridge: fi.String("br0")},
|
||||
"--bridge=br0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, g := range grid {
|
||||
|
|
|
@ -19,6 +19,7 @@ package components
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/loader"
|
||||
|
@ -26,7 +27,7 @@ import (
|
|||
|
||||
// DockerOptionsBuilder adds options for docker to the model
|
||||
type DockerOptionsBuilder struct {
|
||||
Context *OptionsContext
|
||||
*OptionsContext
|
||||
}
|
||||
|
||||
var _ loader.OptionsBuilder = &DockerOptionsBuilder{}
|
||||
|
@ -44,6 +45,8 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
clusterSpec.Docker = &kops.DockerConfig{}
|
||||
}
|
||||
|
||||
docker := clusterSpec.Docker
|
||||
|
||||
if fi.StringValue(clusterSpec.Docker.Version) == "" {
|
||||
if clusterSpec.KubernetesVersion == "" {
|
||||
return fmt.Errorf("KubernetesVersion is required")
|
||||
|
@ -78,5 +81,22 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error {
|
|||
}
|
||||
}
|
||||
|
||||
docker.LogLevel = fi.String("warn")
|
||||
docker.IPTables = fi.Bool(false)
|
||||
docker.IPMasq = fi.Bool(false)
|
||||
|
||||
// Note the alternative syntax... with a comma nodeup will try each of the filesystems in turn
|
||||
if b.IsKubernetesGTE("1.10") {
|
||||
docker.Storage = fi.String("overlay2,overlay,aufs")
|
||||
} else {
|
||||
docker.Storage = fi.String("overlay,aufs")
|
||||
}
|
||||
|
||||
networking := clusterSpec.Networking
|
||||
if networking == nil || networking.Classic != nil {
|
||||
glog.Warningf("using deprecated (classic) networking")
|
||||
docker.Bridge = fi.String("cbr0")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -141,7 +141,6 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1bmastersadditionalcidrex
|
|||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
|
@ -418,7 +417,6 @@ Resources.AWSAutoScalingLaunchConfigurationnodesadditionalcidrexamplecom.Propert
|
|||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
|
|
|
@ -150,7 +150,6 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersadditionaluserda
|
|||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
|
@ -448,7 +447,6 @@ Resources.AWSAutoScalingLaunchConfigurationnodesadditionaluserdataexamplecom.Pro
|
|||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
|
|
|
@ -141,7 +141,6 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexampleco
|
|||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
|
@ -418,7 +417,6 @@ Resources.AWSAutoScalingLaunchConfigurationnodesminimalexamplecom.Properties.Use
|
|||
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
|
||||
cloudConfig: null
|
||||
docker:
|
||||
bridge: ""
|
||||
ipMasq: false
|
||||
ipTables: false
|
||||
logLevel: warn
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
Docker:
|
||||
Storage: devicemapper
|
|
@ -1,2 +0,0 @@
|
|||
Docker:
|
||||
Bridge: ""
|
|
@ -1,2 +0,0 @@
|
|||
Docker:
|
||||
Bridge: ""
|
|
@ -1,7 +0,0 @@
|
|||
Docker:
|
||||
Bridge: cbr0
|
||||
LogLevel: warn
|
||||
IPTables: false
|
||||
IPMasq: false
|
||||
# Note the alternative syntax... with a comma we will try each of the filesystems in turn
|
||||
Storage: overlay,aufs
|
|
@ -87,7 +87,7 @@ var (
|
|||
// AlphaAllowALI is a feature flag that gates aliyun support while it is alpha
|
||||
AlphaAllowALI = featureflag.New("AlphaAllowALI", featureflag.Bool(false))
|
||||
// CloudupModels a list of supported models
|
||||
CloudupModels = []string{"config", "proto", "cloudup"}
|
||||
CloudupModels = []string{"proto", "cloudup"}
|
||||
)
|
||||
|
||||
type ApplyClusterCmd struct {
|
||||
|
|
|
@ -48,11 +48,6 @@ type populateClusterSpec struct {
|
|||
// We build it up into a complete config, but we write the values as input
|
||||
InputCluster *api.Cluster
|
||||
|
||||
// ModelStore is the location where models are found
|
||||
ModelStore vfs.Path
|
||||
// Models is a list of cloudup models to apply
|
||||
Models []string
|
||||
|
||||
// fullCluster holds the built completed cluster spec
|
||||
fullCluster *api.Cluster
|
||||
|
||||
|
@ -68,18 +63,11 @@ func findModelStore() (vfs.Path, error) {
|
|||
// PopulateClusterSpec takes a user-specified cluster spec, and computes the full specification that should be set on the cluster.
|
||||
// We do this so that we don't need any real "brains" on the node side.
|
||||
func PopulateClusterSpec(clientset simple.Clientset, cluster *api.Cluster, assetBuilder *assets.AssetBuilder) (*api.Cluster, error) {
|
||||
modelStore, err := findModelStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &populateClusterSpec{
|
||||
InputCluster: cluster,
|
||||
ModelStore: modelStore,
|
||||
Models: []string{"config"},
|
||||
assetBuilder: assetBuilder,
|
||||
}
|
||||
err = c.run(clientset)
|
||||
err := c.run(clientset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -296,27 +284,21 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
|
|||
AssetBuilder: c.assetBuilder,
|
||||
}
|
||||
|
||||
var fileModels []string
|
||||
var codeModels []loader.OptionsBuilder
|
||||
for _, m := range c.Models {
|
||||
switch m {
|
||||
case "config":
|
||||
{
|
||||
{
|
||||
// Note: DefaultOptionsBuilder comes first
|
||||
codeModels = append(codeModels, &components.DefaultsOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &components.EtcdOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &etcdmanager.EtcdManagerOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &components.KubeAPIServerOptionsBuilder{OptionsContext: optionsContext})
|
||||
codeModels = append(codeModels, &components.DockerOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &components.DockerOptionsBuilder{OptionsContext: optionsContext})
|
||||
codeModels = append(codeModels, &components.NetworkingOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &components.KubeDnsOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &components.KubeletOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &components.KubeControllerManagerOptionsBuilder{Context: optionsContext})
|
||||
codeModels = append(codeModels, &components.KubeSchedulerOptionsBuilder{OptionsContext: optionsContext})
|
||||
codeModels = append(codeModels, &components.KubeProxyOptionsBuilder{Context: optionsContext})
|
||||
fileModels = append(fileModels, m)
|
||||
|
||||
default:
|
||||
fileModels = append(fileModels, m)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -325,7 +307,7 @@ func (c *populateClusterSpec) run(clientset simple.Clientset) error {
|
|||
Tags: tags,
|
||||
}
|
||||
|
||||
completed, err := specBuilder.BuildCompleteSpec(&cluster.Spec, c.ModelStore, fileModels)
|
||||
completed, err := specBuilder.BuildCompleteSpec(&cluster.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building complete spec: %v", err)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/loader"
|
||||
"k8s.io/kops/upup/pkg/fi/utils"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
type SpecBuilder struct {
|
||||
|
@ -32,25 +31,7 @@ type SpecBuilder struct {
|
|||
Tags sets.String
|
||||
}
|
||||
|
||||
func (l *SpecBuilder) BuildCompleteSpec(clusterSpec *api.ClusterSpec, modelStore vfs.Path, models []string) (*api.ClusterSpec, error) {
|
||||
// First pass over models: load options
|
||||
tw := &loader.TreeWalker{
|
||||
DefaultHandler: ignoreHandler,
|
||||
Contexts: map[string]loader.Handler{
|
||||
"resources": ignoreHandler,
|
||||
},
|
||||
Extensions: map[string]loader.Handler{
|
||||
".options": l.OptionsLoader.HandleOptions,
|
||||
},
|
||||
Tags: l.Tags,
|
||||
}
|
||||
for _, model := range models {
|
||||
modelDir := modelStore.Join(model)
|
||||
err := tw.Walk(modelDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
func (l *SpecBuilder) BuildCompleteSpec(clusterSpec *api.ClusterSpec) (*api.ClusterSpec, error) {
|
||||
|
||||
loaded, err := l.OptionsLoader.Build(clusterSpec)
|
||||
if err != nil {
|
||||
|
|
|
@ -36,26 +36,6 @@ import (
|
|||
func buildCloudupTags(cluster *api.Cluster) (sets.String, error) {
|
||||
tags := sets.NewString()
|
||||
|
||||
networking := cluster.Spec.Networking
|
||||
|
||||
if networking == nil || networking.Classic != nil {
|
||||
tags.Insert("_networking_classic")
|
||||
} else if networking.Kubenet != nil {
|
||||
tags.Insert("_networking_kubenet")
|
||||
} else if networking.External != nil {
|
||||
// external is based on kubenet
|
||||
tags.Insert("_networking_kubenet", "_networking_external")
|
||||
} else if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil || networking.Kuberouter != nil || networking.Romana != nil || networking.AmazonVPC != nil || networking.Cilium != nil {
|
||||
tags.Insert("_networking_cni")
|
||||
} else if networking.Kopeio != nil {
|
||||
// TODO combine with the External
|
||||
// Kopeio is based on kubenet / external
|
||||
// TODO combine with External
|
||||
tags.Insert("_networking_kubenet", "_networking_external")
|
||||
} else {
|
||||
return nil, fmt.Errorf("no networking mode set")
|
||||
}
|
||||
|
||||
switch api.CloudProviderID(cluster.Spec.CloudProvider) {
|
||||
case api.CloudProviderGCE:
|
||||
{
|
||||
|
@ -116,17 +96,6 @@ func buildCloudupTags(cluster *api.Cluster) (sets.String, error) {
|
|||
func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTags sets.String) (sets.String, error) {
|
||||
tags := sets.NewString()
|
||||
|
||||
networking := cluster.Spec.Networking
|
||||
|
||||
if networking == nil {
|
||||
return nil, fmt.Errorf("Networking is not set, and should not be nil here")
|
||||
}
|
||||
|
||||
if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil || networking.Kuberouter != nil || networking.Romana != nil || networking.AmazonVPC != nil || networking.Cilium != nil {
|
||||
// external is based on cni, weave, flannel, calico, etc
|
||||
tags.Insert("_networking_cni")
|
||||
}
|
||||
|
||||
switch role {
|
||||
case api.InstanceGroupRoleNode:
|
||||
// No tags
|
||||
|
|
|
@ -79,14 +79,6 @@ func TestBuildTags_CloudProvider_AWS_Weave(t *testing.T) {
|
|||
t.Fatal("tag _aws not found")
|
||||
}
|
||||
|
||||
if !tags.Has("_networking_cni") {
|
||||
t.Fatal("tag _networking_cni not found")
|
||||
}
|
||||
|
||||
if tags.Has("_networking_kubenet") {
|
||||
t.Fatal("tag _networking_kubenet found")
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||
if err != nil {
|
||||
t.Fatalf("buildNodeupTags error: %v", err)
|
||||
|
@ -113,14 +105,6 @@ func TestBuildTags_CloudProvider_AWS_Flannel(t *testing.T) {
|
|||
t.Fatal("tag _aws not found")
|
||||
}
|
||||
|
||||
if !tags.Has("_networking_cni") {
|
||||
t.Fatal("tag _networking_cni not found")
|
||||
}
|
||||
|
||||
if tags.Has("_networking_kubenet") {
|
||||
t.Fatal("tag _networking_kubenet found")
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||
if err != nil {
|
||||
t.Fatalf("buildNodeupTags error: %v", err)
|
||||
|
@ -147,14 +131,6 @@ func TestBuildTags_CloudProvider_AWS_Calico(t *testing.T) {
|
|||
t.Fatal("tag _aws not found")
|
||||
}
|
||||
|
||||
if !tags.Has("_networking_cni") {
|
||||
t.Fatal("tag _networking_cni not found")
|
||||
}
|
||||
|
||||
if tags.Has("_networking_kubenet") {
|
||||
t.Fatal("tag _networking_kubenet found")
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||
if err != nil {
|
||||
t.Fatalf("buildNodeupTags error: %v", err)
|
||||
|
@ -181,14 +157,6 @@ func TestBuildTags_CloudProvider_AWS_Canal(t *testing.T) {
|
|||
t.Fatal("tag _aws not found")
|
||||
}
|
||||
|
||||
if !tags.Has("_networking_cni") {
|
||||
t.Fatal("tag _networking_cni not found")
|
||||
}
|
||||
|
||||
if tags.Has("_networking_kubenet") {
|
||||
t.Fatal("tag _networking_kubenet found")
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||
if err != nil {
|
||||
t.Fatalf("buildNodeupTags error: %v", err)
|
||||
|
@ -215,14 +183,6 @@ func TestBuildTags_CloudProvider_AWS_Romana(t *testing.T) {
|
|||
t.Fatal("tag _aws not found")
|
||||
}
|
||||
|
||||
if !tags.Has("_networking_cni") {
|
||||
t.Fatal("tag _networking_cni not found")
|
||||
}
|
||||
|
||||
if tags.Has("_networking_kubenet") {
|
||||
t.Fatal("tag _networking_kubenet found")
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||
if err != nil {
|
||||
t.Fatalf("buildNodeupTags error: %v", err)
|
||||
|
@ -246,10 +206,6 @@ func TestBuildTags_CloudProvider_AWS(t *testing.T) {
|
|||
t.Fatal("tag _aws not found")
|
||||
}
|
||||
|
||||
if !tags.Has("_networking_cni") {
|
||||
t.Fatal("tag _networking_cni not found")
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||
if err != nil {
|
||||
t.Fatalf("buildNodeupTags error: %v", err)
|
||||
|
@ -333,14 +289,6 @@ func TestBuildTags_CloudProvider_AWS_Cilium(t *testing.T) {
|
|||
t.Fatal("tag _aws not found")
|
||||
}
|
||||
|
||||
if !tags.Has("_networking_cni") {
|
||||
t.Fatal("tag _networking_cni not found")
|
||||
}
|
||||
|
||||
if tags.Has("_networking_kubenet") {
|
||||
t.Fatal("tag _networking_kubenet found")
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
|
||||
if err != nil {
|
||||
t.Fatalf("buildNodeupTags error: %v", err)
|
||||
|
|
Loading…
Reference in New Issue