Merge branch 'master' into critical-pod

Conflicts:
	upup/pkg/fi/cloudup/bootstrapchannelbuilder.go
	upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml
	upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml
	upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml
	upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml
This commit is contained in:
John Gardiner Myers 2020-01-29 09:15:56 -08:00
commit 9f3e31c73b
95 changed files with 2506 additions and 3897 deletions

View File

@ -366,21 +366,21 @@ push: crossbuild-nodeup
.PHONY: push-gce-dry
push-gce-dry: push
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=metadata://gce/config --dryrun --v=8
ssh ${TARGET} sudo /tmp/nodeup --conf=metadata://gce/config --dryrun --v=8
.PHONY: push-gce-dry
push-aws-dry: push
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --dryrun --v=8
ssh ${TARGET} sudo /tmp/nodeup --conf=/opt/kops/conf/kube_env.yaml --dryrun --v=8
.PHONY: push-gce-run
push-gce-run: push
ssh ${TARGET} sudo cp /tmp/nodeup /var/lib/toolbox/kubernetes-install/nodeup
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /var/lib/toolbox/kubernetes-install/nodeup --conf=/var/lib/toolbox/kubernetes-install/kube_env.yaml --v=8
ssh ${TARGET} sudo /var/lib/toolbox/kubernetes-install/nodeup --conf=/var/lib/toolbox/kubernetes-install/kube_env.yaml --v=8
# -t is for CentOS http://unix.stackexchange.com/questions/122616/why-do-i-need-a-tty-to-run-sudo-if-i-can-sudo-without-a-password
.PHONY: push-aws-run
push-aws-run: push
ssh -t ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8
ssh -t ${TARGET} sudo /tmp/nodeup --conf=/opt/kops/conf/kube_env.yaml --v=8
.PHONY: ${PROTOKUBE}
${PROTOKUBE}:
@ -585,10 +585,11 @@ ${CHANNELS}:
.PHONY: release-tag
release-tag:
git tag ${KOPS_RELEASE_VERSION}
git tag v${KOPS_RELEASE_VERSION}
.PHONY: release-github
release-github:
shipbot -tag ${KOPS_RELEASE_VERSION} -config .shipbot.yaml
shipbot -tag v${KOPS_RELEASE_VERSION} -config .shipbot.yaml
# --------------------------------------------------
# API / embedding examples
@ -692,7 +693,7 @@ bazel-push-gce-run: bazel-push
.PHONY: bazel-push-aws-run
bazel-push-aws-run: bazel-push
ssh ${TARGET} chmod +x /tmp/nodeup
ssh -t ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8
ssh -t ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/opt/kops/conf/kube_env.yaml --v=8
.PHONY: gazelle
gazelle:

View File

@ -56,13 +56,13 @@ spec:
kubenet: {}
kubernetesVersions:
- range: ">=1.17.0"
recommendedVersion: 1.17.0
recommendedVersion: 1.17.2
requiredVersion: 1.17.0
- range: ">=1.16.0"
recommendedVersion: 1.16.4
recommendedVersion: 1.16.6
requiredVersion: 1.16.0
- range: ">=1.15.0"
recommendedVersion: 1.15.7
recommendedVersion: 1.15.9
requiredVersion: 1.15.0
- range: ">=1.14.0"
recommendedVersion: 1.14.10
@ -83,15 +83,15 @@ spec:
- range: ">=1.17.0-alpha.1"
#recommendedVersion: "1.17.0"
#requiredVersion: 1.17.0
kubernetesVersion: 1.17.0
kubernetesVersion: 1.17.2
- range: ">=1.16.0-alpha.1"
#recommendedVersion: "1.16.0"
#requiredVersion: 1.16.0
kubernetesVersion: 1.16.4
kubernetesVersion: 1.16.6
- range: ">=1.15.0-alpha.1"
#recommendedVersion: "1.15.0"
#requiredVersion: 1.15.0
kubernetesVersion: 1.15.7
kubernetesVersion: 1.15.9
- range: ">=1.14.0-alpha.1"
#recommendedVersion: "1.14.0"
#requiredVersion: 1.14.0

View File

@ -49,13 +49,13 @@ spec:
recommendedVersion: 1.17.0
requiredVersion: 1.17.0
- range: ">=1.16.0"
recommendedVersion: 1.16.3
recommendedVersion: 1.16.4
requiredVersion: 1.16.0
- range: ">=1.15.0"
recommendedVersion: 1.15.6
recommendedVersion: 1.15.7
requiredVersion: 1.15.0
- range: ">=1.14.0"
recommendedVersion: 1.14.9
recommendedVersion: 1.14.10
requiredVersion: 1.14.0
- range: ">=1.13.0"
recommendedVersion: 1.13.12
@ -77,15 +77,15 @@ spec:
- range: ">=1.16.0-alpha.1"
#recommendedVersion: "1.16.0"
#requiredVersion: 1.16.0
kubernetesVersion: 1.16.3
kubernetesVersion: 1.16.4
- range: ">=1.15.0-alpha.1"
#recommendedVersion: "1.15.0"
#requiredVersion: 1.15.0
kubernetesVersion: 1.15.6
kubernetesVersion: 1.15.7
- range: ">=1.14.0-alpha.1"
#recommendedVersion: "1.14.0"
#requiredVersion: 1.14.0
kubernetesVersion: 1.14.9
kubernetesVersion: 1.14.10
- range: ">=1.13.0-alpha.1"
#recommendedVersion: "1.13.0"
#requiredVersion: 1.13.0

View File

@ -52,20 +52,20 @@ const updateClusterTestBase = "../../tests/integration/update_cluster/"
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestMinimal(t *testing.T) {
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true, false, nil, true)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true, false, nil, true)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha0", false, 1, true, false, nil, true, false)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha1", false, 1, true, false, nil, true, false)
runTestAWS(t, "minimal.example.com", "minimal", "v1alpha2", false, 1, true, false, nil, true, false)
}
// TestRestrictAccess runs the test on a simple SG configuration, similar to kops create cluster minimal.example.com --ssh-access=$(IPS) --admin-access=$(IPS) --master-count=3
func TestRestrictAccess(t *testing.T) {
runTestAWS(t, "restrictaccess.example.com", "restrict_access", "v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "restrictaccess.example.com", "restrict_access", "v1alpha2", false, 1, true, false, nil, true, false)
}
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true, false, nil, true)
runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true, false, nil, true)
runTestAWS(t, "ha.example.com", "ha", "v1alpha1", false, 3, true, false, nil, true, false)
runTestAWS(t, "ha.example.com", "ha", "v1alpha2", false, 3, true, false, nil, true, false)
}
// TestHighAvailabilityGCE runs the test on a simple HA GCE configuration, similar to kops create cluster ha-gce.example.com
@ -76,14 +76,14 @@ func TestHighAvailabilityGCE(t *testing.T) {
// TestComplex runs the test on a more complex configuration, intended to hit more of the edge cases
func TestComplex(t *testing.T) {
runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "complex.example.com", "complex", "legacy-v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "complex.example.com", "complex", "v1alpha2", false, 1, true, false, nil, true, false)
runTestAWS(t, "complex.example.com", "complex", "legacy-v1alpha2", false, 1, true, false, nil, true, false)
runTestCloudformation(t, "complex.example.com", "complex", "v1alpha2", false, nil, true)
}
func TestNoSSHKey(t *testing.T) {
runTestAWS(t, "nosshkey.example.com", "nosshkey", "v1alpha2", false, 1, true, false, nil, false)
runTestAWS(t, "nosshkey.example.com", "nosshkey", "v1alpha2", false, 1, true, false, nil, false, false)
}
func TestNoSSHKeyCloudformation(t *testing.T) {
@ -92,7 +92,7 @@ func TestNoSSHKeyCloudformation(t *testing.T) {
// TestCrossZone tests that the cross zone setting on the API ELB is set properly
func TestCrossZone(t *testing.T) {
runTestAWS(t, "crosszone.example.com", "api_elb_cross_zone", "v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "crosszone.example.com", "api_elb_cross_zone", "v1alpha2", false, 1, true, false, nil, true, false)
}
// TestMinimalCloudformation runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
@ -108,7 +108,7 @@ func TestExistingIAMCloudformation(t *testing.T) {
// TestExistingSG runs the test with existing Security Group, similar to kops create cluster minimal.example.com --zones us-west-1a
func TestExistingSG(t *testing.T) {
runTestAWS(t, "existingsg.example.com", "existing_sg", "v1alpha2", false, 3, true, false, nil, true)
runTestAWS(t, "existingsg.example.com", "existing_sg", "v1alpha2", false, 3, true, false, nil, true, false)
}
// TestAdditionalUserData runs the test on passing additional user-data to an instance at bootstrap.
@ -118,83 +118,93 @@ func TestAdditionalUserData(t *testing.T) {
// TestBastionAdditionalUserData runs the test on passing additional user-data to a bastion instance group
func TestBastionAdditionalUserData(t *testing.T) {
runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "bastionuserdata.example.com", "bastionadditional_user-data", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestMinimal_JSON runs the test on a minimal data set and outputs JSON
func TestMinimal_json(t *testing.T) {
featureflag.ParseFlags("+TerraformJSON")
unsetFeaureFlag := func() {
featureflag.ParseFlags("-TerraformJSON")
}
defer unsetFeaureFlag()
runTestAWS(t, "minimal-json.example.com", "minimal-json", "v1alpha0", false, 1, true, false, nil, true, true)
}
// TestMinimal_141 runs the test on a configuration from 1.4.1 release
func TestMinimal_141(t *testing.T) {
runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true, false, nil, true)
runTestAWS(t, "minimal-141.example.com", "minimal-141", "v1alpha0", false, 1, true, false, nil, true, false)
}
// TestPrivateWeave runs the test on a configuration with private topology, weave networking
func TestPrivateWeave(t *testing.T) {
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true, false, nil, true)
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha1", true, 1, true, false, nil, true, false)
runTestAWS(t, "privateweave.example.com", "privateweave", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestPrivateFlannel runs the test on a configuration with private topology, flannel networking
func TestPrivateFlannel(t *testing.T) {
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true, false, nil, true)
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha1", true, 1, true, false, nil, true, false)
runTestAWS(t, "privateflannel.example.com", "privateflannel", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestPrivateCalico runs the test on a configuration with private topology, calico networking
func TestPrivateCalico(t *testing.T) {
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true, false, nil, true)
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha1", true, 1, true, false, nil, true, false)
runTestAWS(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, 1, true, false, nil, true, false)
runTestCloudformation(t, "privatecalico.example.com", "privatecalico", "v1alpha2", true, nil, true)
}
// TestPrivateCanal runs the test on a configuration with private topology, canal networking
func TestPrivateCanal(t *testing.T) {
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true, false, nil, true)
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha1", true, 1, true, false, nil, true, false)
runTestAWS(t, "privatecanal.example.com", "privatecanal", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestPrivateKopeio runs the test on a configuration with private topology, kopeio networking
func TestPrivateKopeio(t *testing.T) {
runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "privatekopeio.example.com", "privatekopeio", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestUnmanaged is a test where all the subnets opt-out of route management
func TestUnmanaged(t *testing.T) {
runTestAWS(t, "unmanaged.example.com", "unmanaged", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "unmanaged.example.com", "unmanaged", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestPrivateSharedSubnet runs the test on a configuration with private topology & shared subnets
func TestPrivateSharedSubnet(t *testing.T) {
runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "private-shared-subnet.example.com", "private-shared-subnet", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestPrivateDns1 runs the test on a configuration with private topology, private dns
func TestPrivateDns1(t *testing.T) {
runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "privatedns1.example.com", "privatedns1", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestPrivateDns2 runs the test on a configuration with private topology, private dns, extant vpc
func TestPrivateDns2(t *testing.T) {
runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true, false, nil, true)
runTestAWS(t, "privatedns2.example.com", "privatedns2", "v1alpha2", true, 1, true, false, nil, true, false)
}
// TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC)
func TestSharedSubnet(t *testing.T) {
runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "sharedsubnet.example.com", "shared_subnet", "v1alpha2", false, 1, true, false, nil, true, false)
}
// TestSharedVPC runs the test on a configuration with a shared VPC
func TestSharedVPC(t *testing.T) {
runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "sharedvpc.example.com", "shared_vpc", "v1alpha2", false, 1, true, false, nil, true, false)
}
// TestExistingIAM runs the test on a configuration with existing IAM instance profiles
func TestExistingIAM(t *testing.T) {
lifecycleOverrides := []string{"IAMRole=ExistsAndWarnIfChanges", "IAMRolePolicy=ExistsAndWarnIfChanges", "IAMInstanceProfileRole=ExistsAndWarnIfChanges"}
runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false, false, lifecycleOverrides, true)
runTestAWS(t, "existing-iam.example.com", "existing_iam", "v1alpha2", false, 3, false, false, lifecycleOverrides, true, false)
}
// TestAdditionalCIDR runs the test on a configuration with a shared VPC
func TestAdditionalCIDR(t *testing.T) {
runTestAWS(t, "additionalcidr.example.com", "additional_cidr", "v1alpha3", false, 3, true, false, nil, true)
runTestAWS(t, "additionalcidr.example.com", "additional_cidr", "v1alpha3", false, 3, true, false, nil, true, false)
runTestCloudformation(t, "additionalcidr.example.com", "additional_cidr", "v1alpha2", false, nil, true)
}
@ -204,7 +214,7 @@ func TestPhaseNetwork(t *testing.T) {
}
func TestExternalLoadBalancer(t *testing.T) {
runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true, false, nil, true)
runTestAWS(t, "externallb.example.com", "externallb", "v1alpha2", false, 1, true, false, nil, true, false)
runTestCloudformation(t, "externallb.example.com", "externallb", "v1alpha2", false, nil, true)
}
@ -223,13 +233,13 @@ func TestPhaseCluster(t *testing.T) {
// TestMixedInstancesASG tests ASGs using a mixed instance policy
func TestMixedInstancesASG(t *testing.T) {
runTestAWS(t, "mixedinstances.example.com", "mixed_instances", "v1alpha2", false, 3, true, true, nil, true)
runTestAWS(t, "mixedinstances.example.com", "mixed_instances", "v1alpha2", false, 3, true, true, nil, true, false)
runTestCloudformation(t, "mixedinstances.example.com", "mixed_instances", "v1alpha2", false, nil, true)
}
// TestMixedInstancesSpotASG tests ASGs using a mixed instance policy and spot instances
func TestMixedInstancesSpotASG(t *testing.T) {
runTestAWS(t, "mixedinstances.example.com", "mixed_instances_spot", "v1alpha2", false, 3, true, true, nil, true)
runTestAWS(t, "mixedinstances.example.com", "mixed_instances_spot", "v1alpha2", false, 3, true, true, nil, true, false)
runTestCloudformation(t, "mixedinstances.example.com", "mixed_instances_spot", "v1alpha2", false, nil, true)
}
@ -238,7 +248,7 @@ func TestContainerdCloudformation(t *testing.T) {
runTestCloudformation(t, "containerd.example.com", "containerd-cloudformation", "v1alpha2", false, nil, true)
}
func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedDataFilenames []string, tfFileName string, phase *cloudup.Phase, lifecycleOverrides []string, sshKey bool) {
func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName string, srcDir string, version string, private bool, zones int, expectedDataFilenames []string, tfFileName string, expectedTfFileName string, phase *cloudup.Phase, lifecycleOverrides []string, sshKey bool) {
var stdout bytes.Buffer
srcDir = updateClusterTestBase + srcDir
@ -250,6 +260,10 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri
testDataTFPath = tfFileName
}
if expectedTfFileName != "" {
actualTFPath = expectedTfFileName
}
factoryOptions := &util.FactoryOptions{}
factoryOptions.RegistryPath = "memfs://tests"
@ -312,10 +326,10 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri
sort.Strings(fileNames)
actualFilenames := strings.Join(fileNames, ",")
expectedFilenames := "kubernetes.tf"
expectedFilenames := actualTFPath
if len(expectedDataFilenames) > 0 {
expectedFilenames = "data,kubernetes.tf"
expectedFilenames = "data," + actualTFPath
}
if actualFilenames != expectedFilenames {
@ -392,10 +406,15 @@ func runTest(t *testing.T, h *testutils.IntegrationTestHarness, clusterName stri
}
}
func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool, launchTemplate bool, lifecycleOverrides []string, sshKey bool) {
func runTestAWS(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, expectPolicies bool, launchTemplate bool, lifecycleOverrides []string, sshKey bool, jsonOutput bool) {
tfFileName := ""
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
if jsonOutput {
tfFileName = "kubernetes.tf.json"
}
h.MockKopsVersion("1.15.0")
h.SetupMockAWS()
@ -431,7 +450,7 @@ func runTestAWS(t *testing.T, clusterName string, srcDir string, version string,
}...)
}
}
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, lifecycleOverrides, sshKey)
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, tfFileName, nil, lifecycleOverrides, sshKey)
}
func runTestPhase(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, phase cloudup.Phase, sshKey bool) {
@ -475,7 +494,7 @@ func runTestPhase(t *testing.T, clusterName string, srcDir string, version strin
}
}
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, &phase, nil, sshKey)
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, tfFileName, "", &phase, nil, sshKey)
}
func runTestGCE(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int, sshKey bool) {
@ -504,7 +523,7 @@ func runTestGCE(t *testing.T, clusterName string, srcDir string, version string,
expectedFilenames = append(expectedFilenames, prefix+"kops-k8s-io-instance-group-name")
}
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", nil, nil, sshKey)
runTest(t, h, clusterName, srcDir, version, private, zones, expectedFilenames, "", "", nil, nil, sshKey)
}
func runTestCloudformation(t *testing.T, clusterName string, srcDir string, version string, private bool, lifecycleOverrides []string, sshKey bool) {

View File

@ -111,7 +111,14 @@ func runToolBoxTemplate(f *util.Factory, out io.Writer, options *toolboxTemplate
if err != nil {
return err
}
context["clusterName"] = options.clusterName
// @step: set clusterName from template's values or cli flag
value, ok := context["clusterName"].(string)
if ok {
options.clusterName = value
} else {
context["clusterName"] = options.clusterName
}
// @check if we are just rendering the config value
if options.configValue != "" {

View File

@ -21,3 +21,4 @@ The following experimental features are currently available:
* `+Spotinst` - Enables the use of the Spotinst cloud provider
* `+SpotinstOcean` - Enables the use of Spotinst Ocean instance groups
* `+SkipEtcdVersionCheck` - Bypasses the check that etcd-manager is using a supported etcd version
* `+TerraformJSON` - Produce kubernetes.ts.json file instead of writing HCL v1 syntax. Can be consumed by terraform 0.12

View File

@ -39,6 +39,10 @@ the notes prior to the release).
PodPriority: "true"
```
# Deprecations
* The `kops/v1alpha1` API is deprecated and will be removed in kops 1.18. Users of `kops replace` will need to supply v1alpha2 resources.
# Full change list since 1.15.0 release
## 1.15.0-alpha.1 to 1.16.0-alpha.1

View File

@ -55,6 +55,10 @@ the notes prior to the release).
a kops-controller Deployment may have been created that should get deleted because it has been replaced with a DaemonSet.
Run `kubectl -n kube-system delete deployment kops-controller` after upgrading to Kops 1.17.0-alpha.2 or later.
# Deprecations
* The `kops/v1alpha1` API is deprecated and will be removed in kops 1.18. Users of `kops replace` will need to supply v1alpha2 resources.
# Full change list since 1.16.0 release
## 1.16.0-alpha.1 to 1.17.0-alpha.1
@ -112,3 +116,41 @@ the notes prior to the release).
* DOCS: fix simple typo in readme [@lpmi-13](https://github.com/lpmi-13) [#8005](https://github.com/kubernetes/kops/pull/8005)
* Spotinst: Upgrade the Spotinst SDK to version 1.36 [@liranp](https://github.com/liranp) [#8003](https://github.com/kubernetes/kops/pull/8003)
* Release 1.17.0-alpha.1 [@justinsb](https://github.com/justinsb) [#7985](https://github.com/kubernetes/kops/pull/7985)
## 1.17.0-alpha.1 to 1.17.0-alpha.2
* Fix mounting Calico "flexvol-driver-host" in CoreOS [@hakman](https://github.com/hakman) [#8062](https://github.com/kubernetes/kops/pull/8062)
* Cherry-pick #8074 to release-1.17 [@johngmyers](https://github.com/johngmyers) [#8084](https://github.com/kubernetes/kops/pull/8084)
* Bump cilium version to 1.6.4 [@olemarkus](https://github.com/olemarkus) [#8022](https://github.com/kubernetes/kops/pull/8022)
* Complete support for Flatcar [@mazzy89](https://github.com/mazzy89) [#7545](https://github.com/kubernetes/kops/pull/7545)
* Canal v3.10 manifest for k8s v1.15+ [@KashifSaadat](https://github.com/KashifSaadat),[@hakman](https://github.com/hakman) [#7917](https://github.com/kubernetes/kops/pull/7917)
* Cherry pick #8095 [@zetaab](https://github.com/zetaab) [#8096](https://github.com/kubernetes/kops/pull/8096)
* test validateCluster twice to make sure it does not flap [@zetaab](https://github.com/zetaab),[@johngmyers](https://github.com/johngmyers) [#8088](https://github.com/kubernetes/kops/pull/8088)
* Add inf1 isntances [@mikesplain](https://github.com/mikesplain) [#8128](https://github.com/kubernetes/kops/pull/8128)
* Add CapacityOptimized to list of supported spot allocation strategies [@gjtempleton](https://github.com/gjtempleton) [#7406](https://github.com/kubernetes/kops/pull/7406)
* Update Calico to v3.10.2 [@hakman](https://github.com/hakman) [#8104](https://github.com/kubernetes/kops/pull/8104)
* Openstack: Fix cluster floating ips [@mitch000001](https://github.com/mitch000001) [#8115](https://github.com/kubernetes/kops/pull/8115)
* cilium: don't try to mount sys/fs/bpf if already mounted [@justinsb](https://github.com/justinsb) [#7832](https://github.com/kubernetes/kops/pull/7832)
* Update copyrights for 2020 [@johngmyers](https://github.com/johngmyers) [#8241](https://github.com/kubernetes/kops/pull/8241)
* Fix protokube osx build [@mikesplain](https://github.com/mikesplain) [#8263](https://github.com/kubernetes/kops/pull/8263)
* Set CLUSTER_NAME env var on amazon-vpc-cni pods [@rifelpet](https://github.com/rifelpet) [#8274](https://github.com/kubernetes/kops/pull/8274)
* Add deprecation warning for older k8s versions [@rifelpet](https://github.com/rifelpet) [#8176](https://github.com/kubernetes/kops/pull/8176)
* Remove kops-controller deployment [@rifelpet](https://github.com/rifelpet) [#8273](https://github.com/kubernetes/kops/pull/8273)
* Don't output empty sections in the manifests [@justinsb](https://github.com/justinsb) [#8317](https://github.com/kubernetes/kops/pull/8317)
* Cloud controller template function [@DavidSie](https://github.com/DavidSie) [#7992](https://github.com/kubernetes/kops/pull/7992)
* Configuration to specify no SSH key [@austinmoore-](https://github.com/austinmoore-) [#7096](https://github.com/kubernetes/kops/pull/7096)
* tests: increase timeout in rolling update tests [@justinsb](https://github.com/justinsb) [#8139](https://github.com/kubernetes/kops/pull/8139)
* Fix crossbuild-nodeup-in-docker [@johngmyers](https://github.com/johngmyers) [#8343](https://github.com/kubernetes/kops/pull/8343)
* update gophercloud dependency [@zetaab](https://github.com/zetaab) [#8347](https://github.com/kubernetes/kops/pull/8347)
* Update Terraform resource names to be 0.12 compatible. [@rifelpet](https://github.com/rifelpet) [#7957](https://github.com/kubernetes/kops/pull/7957)
* Allow local filesystem state stores (to aid CI pull-request workflows) [@ari-becker](https://github.com/ari-becker),[@rifelpet](https://github.com/rifelpet) [#6465](https://github.com/kubernetes/kops/pull/6465)
* Fix issues with older versions of k8s for basic clusters [@hakman](https://github.com/hakman) [#8248](https://github.com/kubernetes/kops/pull/8248)
* Use IAMPrefix() for hostedzone [@lazzarello](https://github.com/lazzarello) [#8366](https://github.com/kubernetes/kops/pull/8366)
* Fix scheduler policy configmap args [@vvbogdanov87](https://github.com/vvbogdanov87) [#8386](https://github.com/kubernetes/kops/pull/8386)
* Add Cilium.EnablePolicy back into templates [@olemarkus](https://github.com/olemarkus) [#8379](https://github.com/kubernetes/kops/pull/8379)
* Bump etcd-manager to 3.0.20200116 (#8310) [@mmerrill3](https://github.com/mmerrill3) [#8399](https://github.com/kubernetes/kops/pull/8399)
* CoreDNS default image bump to 1.6.6 to resolve CVE [@gjtempleton](https://github.com/gjtempleton) [#8333](https://github.com/kubernetes/kops/pull/8333)
* Don't load nonexistent calico-client cert when CNI is Cilium [@johngmyers](https://github.com/johngmyers) [#8338](https://github.com/kubernetes/kops/pull/8338)
* Kops releases - prefix git tags with v [@rifelpet](https://github.com/rifelpet) [#8373](https://github.com/kubernetes/kops/pull/8373)
* EBS Root Volume Termination [@tioxy](https://github.com/tioxy) [#7865](https://github.com/kubernetes/kops/pull/7865)
* Alicloud: etcd-manager support [@bittopaz](https://github.com/bittopaz) [#8016](https://github.com/kubernetes/kops/pull/8016)

View File

@ -483,11 +483,11 @@ None known at this time
* Add go 1.10 testing to travis CI [@tvi](https://github.com/tvi) [#4926](https://github.com/kubernetes/kops/pull/4926)
* digitalocean: use pagination for all list requests [@andrewsykim](https://github.com/andrewsykim) [#4923](https://github.com/kubernetes/kops/pull/4923)
* Fix spelling [@inthecloud247](https://github.com/inthecloud247) [#4939](https://github.com/kubernetes/kops/pull/4939)
* Fix grammar mistake [@mycapatin](https://github.com/mycapatin) [#4936](https://github.com/kubernetes/kops/pull/4936)
* Fix grammar mistake [@mahuihuang](https://github.com/mahuihuang) [#4936](https://github.com/kubernetes/kops/pull/4936)
* Update the recommended Ubuntu Image [@ofersadgat](https://github.com/ofersadgat) [#4934](https://github.com/kubernetes/kops/pull/4934)
* Typo fix dont'->don't [@AdamDang](https://github.com/AdamDang) [#4929](https://github.com/kubernetes/kops/pull/4929)
* Update rules go and use more recent debian snapshot [@mikesplain](https://github.com/mikesplain) [#4948](https://github.com/kubernetes/kops/pull/4948)
* fix typo [@mycapatin](https://github.com/mycapatin) [#4943](https://github.com/kubernetes/kops/pull/4943)
* fix typo [@mahuihuang](https://github.com/mahuihuang) [#4943](https://github.com/kubernetes/kops/pull/4943)
* digitalocean: external cloud controller manager avoid circular dependencies [@andrewsykim](https://github.com/andrewsykim) [#4947](https://github.com/kubernetes/kops/pull/4947)
* implement subnet task for OpenStack platform [@zengchen1024](https://github.com/zengchen1024) [#4945](https://github.com/kubernetes/kops/pull/4945)
* Add warning about google cloud repository versions [@tombull](https://github.com/tombull) [#4944](https://github.com/kubernetes/kops/pull/4944)

View File

@ -159,3 +159,16 @@ $ terraform apply
```
You should still run `kops delete cluster ${CLUSTER_NAME}`, to remove the kops cluster specification and any dynamically created Kubernetes resources (ELBs or volumes), but under this workaround also to remove the primary ELB volumes from the `proto` phase.
#### Terraform JSON output
With terraform 0.12 JSON is now officially supported as configuration language. To enable JSON output instead of HCLv1 output you need to enable it through a feature flag.
```
export KOPS_FEATURE_FLAGS=TerraformJSON
kops update cluster .....
```
This is an alternative to of using terraforms own configuration syntax HCL. Be sure to delete the existing kubernetes.tf file. Terraform will otherwise use both and then complain.
Kops will require terraform 0.12 for JSON configuration. Inofficially (partially) it was also supported with terraform 0.11, so you can try and remove the `required_version` in `kubernetes.tf.json`.

View File

@ -82,6 +82,7 @@ k8s.io/kops/pkg/client/simple/api
k8s.io/kops/pkg/client/simple/vfsclientset
k8s.io/kops/pkg/cloudinstances
k8s.io/kops/pkg/commands
k8s.io/kops/pkg/configbuilder
k8s.io/kops/pkg/diff
k8s.io/kops/pkg/dns
k8s.io/kops/pkg/drain

View File

@ -1609,6 +1609,11 @@ spec:
kubeScheduler:
description: KubeSchedulerConfig is the configuration for the kube-scheduler
properties:
burst:
description: Burst sets the maximum qps to send to apiserver after
the burst quota is exhausted
format: int32
type: integer
featureGates:
additionalProperties:
type: string
@ -1677,6 +1682,10 @@ spec:
and the cloud provider as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/'
format: int32
type: integer
qps:
description: Qps sets the maximum qps to send to apiserver after
the burst quota is exhausted
type: string
usePolicyConfigMap:
description: UsePolicyConfigMap enable setting the scheduler policy
from a configmap
@ -2876,6 +2885,25 @@ spec:
description: Project is the cloud project we should use, required on
GCE
type: string
rollingUpdate:
description: RollingUpdate defines the default rolling-update settings
for instance groups
properties:
maxUnavailable:
anyOf:
- type: string
- type: integer
description: 'MaxUnavailable is the maximum number of nodes that
can be unavailable during the update. The value can be an absolute
number (for example 5) or a percentage of desired nodes (for example
10%). The absolute number is calculated from a percentage by rounding
down. A value of 0 disables rolling updates. Defaults to 1. Example:
when this is set to 30%, the InstanceGroup can be scaled down
to 70% of desired nodes immediately when the rolling update starts.
Once new nodes are ready, more old nodes can be drained, ensuring
that the total number of nodes available at all times during the
update is at least 70% of desired nodes.'
type: object
secretStore:
description: SecretStore is the VFS path to where secrets are stored
type: string

View File

@ -624,6 +624,24 @@ spec:
description: 'Type determines the role of instances in this group: masters
or nodes'
type: string
rollingUpdate:
description: RollingUpdate defines the rolling-update behavior
properties:
maxUnavailable:
anyOf:
- type: string
- type: integer
description: 'MaxUnavailable is the maximum number of nodes that
can be unavailable during the update. The value can be an absolute
number (for example 5) or a percentage of desired nodes (for example
10%). The absolute number is calculated from a percentage by rounding
down. A value of 0 disables rolling updates. Defaults to 1. Example:
when this is set to 30%, the InstanceGroup can be scaled down
to 70% of desired nodes immediately when the rolling update starts.
Once new nodes are ready, more old nodes can be drained, ensuring
that the total number of nodes available at all times during the
update is at least 70% of desired nodes.'
type: object
rootVolumeDeleteOnTermination:
description: 'RootVolumeDeleteOnTermination configures root volume retention
policy upon instance termination. The root volume is deleted by default.

View File

@ -24,18 +24,19 @@ import (
type Distribution string
var (
DistributionJessie Distribution = "jessie"
DistributionDebian9 Distribution = "debian9"
DistributionDebian10 Distribution = "buster"
DistributionXenial Distribution = "xenial"
DistributionBionic Distribution = "bionic"
DistributionRhel7 Distribution = "rhel7"
DistributionCentos7 Distribution = "centos7"
DistributionRhel8 Distribution = "rhel8"
DistributionCentos8 Distribution = "centos8"
DistributionCoreOS Distribution = "coreos"
DistributionFlatcar Distribution = "flatcar"
DistributionContainerOS Distribution = "containeros"
DistributionJessie Distribution = "jessie"
DistributionDebian9 Distribution = "debian9"
DistributionDebian10 Distribution = "buster"
DistributionXenial Distribution = "xenial"
DistributionBionic Distribution = "bionic"
DistributionAmazonLinux2 Distribution = "amazonlinux2"
DistributionRhel7 Distribution = "rhel7"
DistributionCentos7 Distribution = "centos7"
DistributionRhel8 Distribution = "rhel8"
DistributionCentos8 Distribution = "centos8"
DistributionCoreOS Distribution = "coreos"
DistributionFlatcar Distribution = "flatcar"
DistributionContainerOS Distribution = "containeros"
)
func (d Distribution) BuildTags() []string {
@ -50,6 +51,8 @@ func (d Distribution) BuildTags() []string {
t = []string{"_xenial"}
case DistributionBionic:
t = []string{"_bionic"}
case DistributionAmazonLinux2:
t = []string{"_amazonlinux2"}
case DistributionCentos7:
t = []string{"_centos7"}
case DistributionRhel7:
@ -88,7 +91,7 @@ func (d Distribution) IsDebianFamily() bool {
return true
case DistributionXenial, DistributionBionic:
return true
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
return false
case DistributionCoreOS, DistributionFlatcar, DistributionContainerOS:
return false
@ -104,7 +107,7 @@ func (d Distribution) IsUbuntu() bool {
return false
case DistributionXenial, DistributionBionic:
return true
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
return false
case DistributionCoreOS, DistributionFlatcar, DistributionContainerOS:
return false
@ -116,7 +119,7 @@ func (d Distribution) IsUbuntu() bool {
func (d Distribution) IsRHELFamily() bool {
switch d {
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
return true
case DistributionJessie, DistributionXenial, DistributionBionic, DistributionDebian9, DistributionDebian10:
return false
@ -132,7 +135,7 @@ func (d Distribution) IsSystemd() bool {
switch d {
case DistributionJessie, DistributionXenial, DistributionBionic, DistributionDebian9, DistributionDebian10:
return true
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8:
case DistributionCentos7, DistributionRhel7, DistributionCentos8, DistributionRhel8, DistributionAmazonLinux2:
return true
case DistributionCoreOS, DistributionFlatcar:
return true

View File

@ -113,7 +113,7 @@ func FindDistribution(rootfs string) (Distribution, error) {
return DistributionContainerOS, nil
}
if strings.HasPrefix(line, "PRETTY_NAME=\"Amazon Linux 2") {
return DistributionCentos7, nil
return DistributionAmazonLinux2, nil
}
}
klog.Warningf("unhandled /etc/os-release info %q", string(osRelease))

View File

@ -46,6 +46,7 @@ go_library(
"//pkg/apis/kops/util:go_default_library",
"//pkg/apis/nodeup:go_default_library",
"//pkg/assets:go_default_library",
"//pkg/configbuilder:go_default_library",
"//pkg/dns:go_default_library",
"//pkg/flagbuilder:go_default_library",
"//pkg/k8scodecs:go_default_library",
@ -88,6 +89,7 @@ go_test(
"docker_test.go",
"kube_apiserver_test.go",
"kube_proxy_test.go",
"kube_scheduler_test.go",
"kubelet_test.go",
"protokube_test.go",
],
@ -97,6 +99,7 @@ go_test(
"//nodeup/pkg/distros:go_default_library",
"//pkg/apis/kops:go_default_library",
"//pkg/apis/nodeup:go_default_library",
"//pkg/configbuilder:go_default_library",
"//pkg/flagbuilder:go_default_library",
"//pkg/testutils:go_default_library",
"//upup/pkg/fi:go_default_library",

View File

@ -47,7 +47,6 @@ var containerdVersions = []packageVersion{
Version: "1.2.4-1",
Source: "https://download.docker.com/linux/debian/dists/stretch/pool/stable/amd64/containerd.io_1.2.4-1_amd64.deb",
Hash: "48c6ab0c908316af9a183de5aad64703bc516bdf",
Dependencies: []string{"libseccomp2", "pigz"},
},
// 1.2.10 - Debian Stretch
@ -59,7 +58,6 @@ var containerdVersions = []packageVersion{
Version: "1.2.10-3",
Source: "https://download.docker.com/linux/debian/dists/stretch/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb",
Hash: "186f2f2c570f37b363102e6b879073db6dec671d",
Dependencies: []string{"libseccomp2", "pigz"},
},
// 1.2.10 - Debian Buster
@ -71,7 +69,6 @@ var containerdVersions = []packageVersion{
Version: "1.2.10-3",
Source: "https://download.docker.com/linux/debian/dists/buster/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb",
Hash: "365e4a7541ce2cf3c3036ea2a9bf6b40a50893a8",
Dependencies: []string{"libseccomp2", "pigz"},
},
// 1.2.10 - Ubuntu Xenial
@ -83,7 +80,6 @@ var containerdVersions = []packageVersion{
Version: "1.2.10-3",
Source: "https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb",
Hash: "b64e7170d9176bc38967b2e12147c69b65bdd0fc",
Dependencies: []string{"libseccomp2", "pigz"},
},
// 1.2.10 - Ubuntu Bionic
@ -95,26 +91,17 @@ var containerdVersions = []packageVersion{
Version: "1.2.10-3",
Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb",
Hash: "f4c941807310e3fa470dddfb068d599174a3daec",
Dependencies: []string{"libseccomp2", "pigz"},
},
// 1.2.10 - CentOS / Rhel 7
{
PackageVersion: "1.2.10",
Name: "containerd.io",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.2.10",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.10-3.2.el7.x86_64.rpm",
Hash: "f6447e84479df3a58ce04a3da87ccc384663493b",
ExtraPackages: map[string]packageInfo{
"container-selinux": {
Version: "2.107",
Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm",
Hash: "7de4211fa0dfd240d8827b93763e1eb5f0d56411",
},
},
Dependencies: []string{"libseccomp", "policycoreutils-python"},
},
// 1.2.10 - CentOS / Rhel 8
@ -126,7 +113,26 @@ var containerdVersions = []packageVersion{
Version: "1.2.10",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.10-3.2.el7.x86_64.rpm",
Hash: "f6447e84479df3a58ce04a3da87ccc384663493b",
Dependencies: []string{"container-selinux", "libseccomp", "pigz"},
},
// 1.2.11 - Linux Generic
{
PackageVersion: "1.2.11",
PlainBinary: true,
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.2.11",
Source: "https://storage.googleapis.com/cri-containerd-release/cri-containerd-1.2.11.linux-amd64.tar.gz",
Hash: "c98c9fdfd0984557e5b1a1f209213d2d8ad8471c",
},
// 1.3.2 - Linux Generic
{
PackageVersion: "1.3.2",
PlainBinary: true,
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.3.2",
Source: "https://storage.googleapis.com/cri-containerd-release/cri-containerd-1.3.2.linux-amd64.tar.gz",
Hash: "f451d46280104588f236bee277bca1da8babc0e8",
},
// TIP: When adding the next version, copy the previous version, string replace the version and run:
@ -220,11 +226,14 @@ func (b *ContainerdBuilder) Build(c *fi.ModelBuilderContext) error {
var packageTask fi.Task
if dv.PlainBinary {
packageTask = &nodetasks.Archive{
Name: "containerd",
Source: dv.Source,
Hash: dv.Hash,
TargetDir: "/usr/bin/",
StripComponents: 1,
Name: "containerd.io",
Source: dv.Source,
Hash: dv.Hash,
TargetDir: "/",
MapFiles: map[string]string{
"./usr/local/bin": "/usr",
"./usr/local/sbin": "/usr",
},
}
c.AddTask(packageTask)
} else {
@ -283,6 +292,8 @@ func (b *ContainerdBuilder) Build(c *fi.ModelBuilderContext) error {
}
func (b *ContainerdBuilder) buildSystemdService() *nodetasks.Service {
// Based on https://github.com/containerd/cri/blob/master/contrib/systemd-units/containerd.service
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "containerd container runtime")
manifest.Set("Unit", "Documentation", "https://containerd.io")
@ -293,21 +304,21 @@ func (b *ContainerdBuilder) buildSystemdService() *nodetasks.Service {
manifest.Set("Service", "ExecStartPre", "-/sbin/modprobe overlay")
manifest.Set("Service", "ExecStart", "/usr/bin/containerd -c /etc/containerd/config-kops.toml \"$CONTAINERD_OPTS\"")
// kill only the containerd process, not all processes in the cgroup
manifest.Set("Service", "KillMode", "process")
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "5")
// set delegate yes so that systemd does not reset the cgroups of containerd containers
manifest.Set("Service", "Delegate", "yes")
// kill only the containerd process, not all processes in the cgroup
manifest.Set("Service", "KillMode", "process")
// make killing of processes of this unit under memory pressure very unlikely
manifest.Set("Service", "OOMScoreAdjust", "-999")
manifest.Set("Service", "LimitNOFILE", "1048576")
manifest.Set("Service", "LimitNPROC", "infinity")
manifest.Set("Service", "LimitCORE", "infinity")
manifest.Set("Service", "TasksMax", "infinity")
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "2s")
manifest.Set("Service", "StartLimitInterval", "0")
manifest.Set("Service", "TimeoutStartSec", "0")
manifest.Set("Install", "WantedBy", "multi-user.target")
manifestString := manifest.Render()

View File

@ -158,10 +158,15 @@ func (d *packageVersion) matches(arch Architecture, packageVersion string, distr
return false
}
foundDistro := false
for _, d := range d.Distros {
if d == distro {
foundDistro = true
if len(d.Distros) > 0 {
for _, d := range d.Distros {
if d == distro {
foundDistro = true
}
}
} else {
// Distro list is empty, assuming ANY
foundDistro = true
}
if !foundDistro {
return false

View File

@ -74,7 +74,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "1.11.2",
Name: "docker-engine",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.11.2",
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.11.2-1.el7.centos.x86_64.rpm",
@ -117,7 +117,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "1.12.1",
Name: "docker-engine",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.12.1",
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.1-1.el7.centos.x86_64.rpm",
@ -176,7 +176,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "1.12.3",
Name: "docker-engine",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.12.3",
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.3-1.el7.centos.x86_64.rpm",
@ -188,7 +188,7 @@ var dockerVersions = []packageVersion{
Hash: "a6b0243af348140236ed96f2e902b259c590eefa",
},
},
Dependencies: []string{"libtool-ltdl", "libseccomp"},
Dependencies: []string{"libtool-ltdl"},
},
// 1.12.6 - k8s 1.6
@ -216,7 +216,7 @@ var dockerVersions = []packageVersion{
Version: "1.12.6-0~debian-stretch",
Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.12.6-0~debian-stretch_amd64.deb",
Hash: "18bb7d024658f27a1221eae4de78d792bf00611b",
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl", "libseccomp2"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
//Depends: iptables, init-system-helpers (>= 1.18~), libapparmor1 (>= 2.6~devel), libc6 (>= 2.17), libdevmapper1.02.1 (>= 2:1.02.97), libltdl7 (>= 2.4.6), libseccomp2 (>= 2.1.0), libsystemd0
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils
},
@ -242,7 +242,7 @@ var dockerVersions = []packageVersion{
Version: "1.12.6-0~ubuntu-xenial",
Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.12.6-0~ubuntu-xenial_amd64.deb",
Hash: "fffc22da4ad5b20715bbb6c485b2d2bb7e84fd33",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
// Depends: iptables, init-system-helpers (>= 1.18~), lsb-base (>= 4.1+Debian11ubuntu7), libapparmor1 (>= 2.6~devel), libc6 (>= 2.17), libdevmapper1.02.1 (>= 2:1.02.97), libltdl7 (>= 2.4.6), libseccomp2 (>= 2.1.0), libsystemd0
},
@ -250,7 +250,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "1.12.6",
Name: "docker-engine",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.12.6",
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.6-1.el7.centos.x86_64.rpm",
@ -262,7 +262,7 @@ var dockerVersions = []packageVersion{
Hash: "9a6ee0d631ca911b6927450a3c396e9a5be75047",
},
},
Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"},
Dependencies: []string{"libtool-ltdl", "libcgroup"},
},
// 1.13.1 - k8s 1.8
@ -316,7 +316,7 @@ var dockerVersions = []packageVersion{
Version: "1.13.1-0~ubuntu-xenial",
Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.13.1-0~ubuntu-xenial_amd64.deb",
Hash: "d12cbd686f44536c679a03cf0137df163f0bba5f",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
// Depends: iptables, init-system-helpers (>= 1.18~), lsb-base (>= 4.1+Debian11ubuntu7), libapparmor1 (>= 2.6~devel), libc6 (>= 2.17), libdevmapper1.02.1 (>= 2:1.02.97), libltdl7 (>= 2.4.6), libseccomp2 (>= 2.1.0), libsystemd0
},
@ -324,7 +324,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "1.13.1",
Name: "docker-engine",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "1.13.1",
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.13.1-1.el7.centos.x86_64.rpm",
@ -336,7 +336,7 @@ var dockerVersions = []packageVersion{
Hash: "948c518a610af631fa98aa32d9bcd43e9ddd5ebc",
},
},
Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python", "selinux-policy-base", "selinux-policy-targeted"},
Dependencies: []string{"libtool-ltdl", "libcgroup", "selinux-policy-base", "selinux-policy-targeted"},
},
// 17.03.2 - k8s 1.8
@ -389,7 +389,7 @@ var dockerVersions = []packageVersion{
Version: "17.03.2~ce-0~ubuntu-xenial",
Source: "http://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.03.2~ce-0~ubuntu-xenial_amd64.deb",
Hash: "4dcee1a05ec592e8a76e53e5b464ea43085a2849",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
MarkImmutable: []string{"/usr/bin/docker-runc"},
},
@ -401,7 +401,7 @@ var dockerVersions = []packageVersion{
Architectures: []Architecture{ArchitectureAmd64},
Source: "http://download.docker.com/linux/static/stable/x86_64/docker-17.03.2-ce.tgz",
Hash: "141716ae046016a1792ce232a0f4c8eed7fe37d1",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
MarkImmutable: []string{"/usr/bin/docker-runc"},
},
@ -409,7 +409,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "17.03.2",
Name: "docker-ce",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "17.03.2.ce",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm",
@ -421,7 +421,7 @@ var dockerVersions = []packageVersion{
Hash: "4659c937b66519c88ef2a82a906bb156db29d191",
},
},
Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"},
Dependencies: []string{"libtool-ltdl", "libcgroup"},
MarkImmutable: []string{"/usr/bin/docker-runc"},
},
// 17.09.0 - k8s 1.8
@ -471,7 +471,7 @@ var dockerVersions = []packageVersion{
Version: "17.09.0~ce-0~ubuntu",
Source: "http://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.09.0~ce-0~ubuntu_amd64.deb",
Hash: "94f6e89be6d45d9988269a237eb27c7d6a844d7f",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
//Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor
},
@ -485,7 +485,7 @@ var dockerVersions = []packageVersion{
Version: "18.06.2~ce~3-0~ubuntu",
Source: "https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_18.06.2~ce~3-0~ubuntu_amd64.deb",
Hash: "03e5eaae9c84b144e1140d9b418e43fce0311892",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
//Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor
},
@ -499,7 +499,7 @@ var dockerVersions = []packageVersion{
Version: "18.06.3~ce~3-0~ubuntu",
Source: "https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_18.06.3~ce~3-0~ubuntu_amd64.deb",
Hash: "c06eda4e934cce6a7941a6af6602d4315b500a22",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
//Depends: iptables, init-system-helpers, lsb-base, libc6, libdevmapper1.02.1, libltdl7, libseccomp2, libsystemd0
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, pigz, xz-utils, apparmor
},
@ -508,19 +508,12 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "17.09.0",
Name: "docker-ce",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "17.09.0.ce",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.09.0.ce-1.el7.centos.x86_64.rpm",
Hash: "b4ce72e80ff02926de943082821bbbe73958f87a",
ExtraPackages: map[string]packageInfo{
"container-selinux": {
Version: "2.68",
Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm",
Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0",
},
},
Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"},
Dependencies: []string{"libtool-ltdl", "libcgroup"},
},
// 18.03.1 - Bionic
@ -532,7 +525,7 @@ var dockerVersions = []packageVersion{
Version: "18.03.1~ce~3-0~ubuntu",
Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/docker-ce_18.03.1~ce~3-0~ubuntu_amd64.deb",
Hash: "b55b32bd0e9176dd32b1e6128ad9fda10a65cc8b",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
//Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor
},
@ -546,7 +539,7 @@ var dockerVersions = []packageVersion{
Version: "18.06.2~ce~3-0~ubuntu",
Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/docker-ce_18.06.2~ce~3-0~ubuntu_amd64.deb",
Hash: "9607c67644e3e1ad9661267c99499004f2e84e05",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
//Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor
},
@ -605,19 +598,12 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "18.06.1",
Name: "docker-ce",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "18.06.1.ce",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.1.ce-3.el7.x86_64.rpm",
Hash: "0a1325e570c5e54111a79623c9fd0c0c714d3a11",
ExtraPackages: map[string]packageInfo{
"container-selinux": {
Version: "2.68",
Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm",
Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0",
},
},
Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"},
Dependencies: []string{"libtool-ltdl", "libcgroup"},
},
// 18.09.3 - Debian Stretch
@ -639,25 +625,15 @@ var dockerVersions = []packageVersion{
},
// 18.06.2 - CentOS / Rhel7 (two packages)
{
PackageVersion: "18.06.2",
Name: "container-selinux",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Architectures: []Architecture{ArchitectureAmd64},
Version: "2.68",
Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm",
Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0",
Dependencies: []string{"policycoreutils-python"},
},
{
PackageVersion: "18.06.2",
Name: "docker-ce",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "18.06.2.ce",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.2.ce-3.el7.x86_64.rpm",
Hash: "456eb7c5bfb37fac342e9ade21b602c076c5b367",
Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup"},
Dependencies: []string{"libtool-ltdl", "libcgroup"},
},
// 18.06.3 (contains fix for CVE-2019-5736)
@ -671,7 +647,7 @@ var dockerVersions = []packageVersion{
Version: "18.06.3~ce~3-0~ubuntu",
Source: "https://download.docker.com/linux/ubuntu/dists/bionic/pool/stable/amd64/docker-ce_18.06.3~ce~3-0~ubuntu_amd64.deb",
Hash: "b396678a8b70f0503a7b944fa6e3297ab27b345b",
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
//Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor
},
@ -705,19 +681,12 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "18.06.3",
Name: "docker-ce",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "18.06.3.ce",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.3.ce-3.el7.x86_64.rpm",
Hash: "5369602f88406d4fb9159dc1d3fd44e76fb4cab8",
ExtraPackages: map[string]packageInfo{
"container-selinux": {
Version: "2.68",
Source: "http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.68-1.el7.noarch.rpm",
Hash: "d9f87f7f4f2e8e611f556d873a17b8c0c580fec0",
},
},
Dependencies: []string{"libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python"},
Dependencies: []string{"libtool-ltdl", "libcgroup"},
},
// 18.06.3 - CentOS / Rhel8 (two packages)
{
@ -728,7 +697,7 @@ var dockerVersions = []packageVersion{
Version: "18.06.3.ce",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.06.3.ce-3.el7.x86_64.rpm",
Hash: "5369602f88406d4fb9159dc1d3fd44e76fb4cab8",
Dependencies: []string{"container-selinux", "libtool-ltdl", "libseccomp", "libcgroup", "policycoreutils-python-utils", "python3-policycoreutils"},
Dependencies: []string{"libtool-ltdl", "libcgroup", "policycoreutils-python-utils", "python3-policycoreutils"},
},
// 18.09.9 - k8s 1.14 - https://github.com/kubernetes/kubernetes/pull/72823
@ -749,7 +718,7 @@ var dockerVersions = []packageVersion{
Hash: "88f8f3103d2e5011e2f1a73b9e6dbf03d6e6698a",
},
},
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
},
// 18.09.9 - Debian Buster
@ -768,7 +737,7 @@ var dockerVersions = []packageVersion{
Hash: "510eee5b6884867be0d2b360f8ff8cf7f0c0d11a",
},
},
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
},
// 18.09.9 - Xenial
@ -813,7 +782,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "18.09.9",
Name: "docker-ce",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "18.09.9",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-18.09.9-3.el7.x86_64.rpm",
@ -825,7 +794,7 @@ var dockerVersions = []packageVersion{
Hash: "0c51b1339a95bd732ca305f07b7bcc95f132b9c8",
},
},
Dependencies: []string{"libtool-ltdl", "iptables"},
Dependencies: []string{"libtool-ltdl"},
},
// 18.09.9 - CentOS / Rhel8
@ -844,7 +813,7 @@ var dockerVersions = []packageVersion{
Hash: "0c51b1339a95bd732ca305f07b7bcc95f132b9c8",
},
},
Dependencies: []string{"libtool-ltdl", "iptables"},
Dependencies: []string{"libtool-ltdl"},
},
// 19.03.4 - k8s 1.17 - https://github.com/kubernetes/kubernetes/pull/84476
@ -865,7 +834,7 @@ var dockerVersions = []packageVersion{
Hash: "57f71ee764abb19a0b4c580ff14b1eb3de3a9e08",
},
},
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
},
// 19.03.4 - Debian Buster
@ -884,7 +853,7 @@ var dockerVersions = []packageVersion{
Hash: "2549a364f0e5ce489c79b292b78e349751385dd5",
},
},
Dependencies: []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
},
// 19.03.4 - Xenial
@ -929,7 +898,7 @@ var dockerVersions = []packageVersion{
{
PackageVersion: "19.03.4",
Name: "docker-ce",
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7, distros.DistributionAmazonLinux2},
Architectures: []Architecture{ArchitectureAmd64},
Version: "19.03.4",
Source: "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-19.03.4-3.el7.x86_64.rpm",
@ -941,7 +910,7 @@ var dockerVersions = []packageVersion{
Hash: "1fffcc716e74a59f753f8898ba96693a00e79e26",
},
},
Dependencies: []string{"libtool-ltdl", "iptables"},
Dependencies: []string{"libtool-ltdl"},
},
// 19.03.4 - CentOS / Rhel8
@ -960,7 +929,7 @@ var dockerVersions = []packageVersion{
Hash: "1fffcc716e74a59f753f8898ba96693a00e79e26",
},
},
Dependencies: []string{"libtool-ltdl", "iptables"},
Dependencies: []string{"libtool-ltdl"},
},
// TIP: When adding the next version, copy the previous version, string replace the version and run:

View File

@ -20,6 +20,7 @@ import (
"fmt"
"strconv"
"k8s.io/kops/pkg/configbuilder"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/k8scodecs"
"k8s.io/kops/pkg/kubemanifest"
@ -34,6 +35,20 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
// ClientConnectionConfig is used by kube-scheduler to talk to the api server
type ClientConnectionConfig struct {
Burst int32 `yaml:"burst,omitempty"`
Kubeconfig string `yaml:"kubeconfig"`
QPS *float64 `yaml:"qps,omitempty"`
}
// SchedulerConfig is used to generate the config file
type SchedulerConfig struct {
APIVersion string `yaml:"apiVersion"`
Kind string `yaml:"kind"`
ClientConnection ClientConnectionConfig `yaml:"clientConnection,omitempty"`
}
// KubeSchedulerBuilder install kube-scheduler
type KubeSchedulerBuilder struct {
*NodeupModelContext
@ -41,14 +56,16 @@ type KubeSchedulerBuilder struct {
var _ fi.ModelBuilder = &KubeSchedulerBuilder{}
const defaultKubeConfig = "/var/lib/kube-scheduler/kubeconfig"
// Build is responsible for building the manifest for the kube-scheduler
func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster {
return nil
}
useConfigFile := b.IsKubernetesGTE("1.12")
{
pod, err := b.buildPod()
pod, err := b.buildPod(useConfigFile)
if err != nil {
return fmt.Errorf("error building kube-scheduler pod: %v", err)
}
@ -78,6 +95,19 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
Mode: s("0400"),
})
}
if useConfigFile {
config, err := configbuilder.BuildConfigYaml(b.Cluster.Spec.KubeScheduler, NewSchedulerConfig())
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: "/var/lib/kube-scheduler/config.yaml",
Contents: fi.NewBytesResource(config),
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
}
{
c.AddTask(&nodetasks.File{
@ -92,16 +122,30 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
// NewSchedulerConfig initializes a new kube-scheduler config file
func NewSchedulerConfig() *SchedulerConfig {
schedConfig := new(SchedulerConfig)
schedConfig.APIVersion = "kubescheduler.config.k8s.io/v1alpha1"
schedConfig.Kind = "KubeSchedulerConfiguration"
schedConfig.ClientConnection = ClientConnectionConfig{}
schedConfig.ClientConnection.Kubeconfig = defaultKubeConfig
return schedConfig
}
// buildPod is responsible for constructing the pod specification
func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) {
func (b *KubeSchedulerBuilder) buildPod(useConfigFile bool) (*v1.Pod, error) {
c := b.Cluster.Spec.KubeScheduler
flags, err := flagbuilder.BuildFlagsList(c)
if err != nil {
return nil, fmt.Errorf("error building kube-scheduler flags: %v", err)
}
// Add kubeconfig flag
flags = append(flags, "--kubeconfig="+"/var/lib/kube-scheduler/kubeconfig")
if useConfigFile {
flags = append(flags, "--config="+"/var/lib/kube-scheduler/config.yaml")
} else {
// Add kubeconfig flag
flags = append(flags, "--kubeconfig="+defaultKubeConfig)
}
if c.UsePolicyConfigMap != nil {
flags = append(flags, "--policy-configmap=scheduler-policy", "--policy-configmap-namespace=kube-system")

View File

@ -0,0 +1,69 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"bytes"
"testing"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/configbuilder"
)
func TestParseDefault(t *testing.T) {
expect := []byte(
`apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
kubeconfig: /var/lib/kube-scheduler/kubeconfig
`)
s := &kops.KubeSchedulerConfig{}
yaml, err := configbuilder.BuildConfigYaml(s, NewSchedulerConfig())
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !bytes.Equal(yaml, expect) {
t.Errorf("unexpected result: \n%s, expected: \n%s", yaml, expect)
}
}
func TestParse(t *testing.T) {
expect := []byte(
`apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
clientConnection:
burst: 100
kubeconfig: /var/lib/kube-scheduler/kubeconfig
qps: 3.1
`)
qps, _ := resource.ParseQuantity("3.1")
s := &kops.KubeSchedulerConfig{Qps: &qps, Burst: 100}
yaml, err := configbuilder.BuildConfigYaml(s, NewSchedulerConfig())
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if !bytes.Equal(yaml, expect) {
t.Errorf("unexpected result: \n%s, expected: \n%s", yaml, expect)
}
}

View File

@ -50,7 +50,6 @@ func (b *MiscUtilsBuilder) Build(c *fi.ModelBuilderContext) error {
var packages []string
if b.Distribution.IsDebianFamily() {
packages = append(packages, "socat")
packages = append(packages, "curl")
packages = append(packages, "wget")
packages = append(packages, "nfs-common")

View File

@ -17,6 +17,7 @@ limitations under the License.
package model
import (
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
@ -37,17 +38,42 @@ func (b *PackagesBuilder) Build(c *fi.ModelBuilderContext) error {
// ebtables - kops #1711
// ethtool - kops #1830
if b.Distribution.IsDebianFamily() {
// From containerd: https://github.com/containerd/cri/blob/master/contrib/ansible/tasks/bootstrap_ubuntu.yaml
c.AddTask(&nodetasks.Package{Name: "conntrack"})
c.AddTask(&nodetasks.Package{Name: "ebtables"})
c.AddTask(&nodetasks.Package{Name: "ethtool"})
c.AddTask(&nodetasks.Package{Name: "iptables"})
c.AddTask(&nodetasks.Package{Name: "libseccomp2"})
c.AddTask(&nodetasks.Package{Name: "pigz"})
c.AddTask(&nodetasks.Package{Name: "socat"})
c.AddTask(&nodetasks.Package{Name: "util-linux"})
} else if b.Distribution.IsRHELFamily() {
// From containerd: https://github.com/containerd/cri/blob/master/contrib/ansible/tasks/bootstrap_centos.yaml
c.AddTask(&nodetasks.Package{Name: "conntrack-tools"})
c.AddTask(&nodetasks.Package{Name: "ebtables"})
c.AddTask(&nodetasks.Package{Name: "ethtool"})
c.AddTask(&nodetasks.Package{Name: "iptables"})
c.AddTask(&nodetasks.Package{Name: "libseccomp"})
c.AddTask(&nodetasks.Package{Name: "socat"})
c.AddTask(&nodetasks.Package{Name: "util-linux"})
// Handle some packages differently for each distro
switch b.Distribution {
case distros.DistributionRhel7:
// Easier to install container-selinux from CentOS than extras
c.AddTask(&nodetasks.Package{
Name: "container-selinux",
Source: s("http://vault.centos.org/7.6.1810/extras/x86_64/Packages/container-selinux-2.107-1.el7_6.noarch.rpm"),
Hash: s("7de4211fa0dfd240d8827b93763e1eb5f0d56411"),
})
case distros.DistributionAmazonLinux2:
// Amazon Linux 2 doesn't have SELinux enabled by default
default:
c.AddTask(&nodetasks.Package{Name: "container-selinux"})
c.AddTask(&nodetasks.Package{Name: "pigz"})
}
} else {
// Hopefully it's already installed
klog.Infof("ebtables package not known for distro %q", b.Distribution)
// Hopefully they are already installed
klog.Warningf("unknown distribution, skipping required packages install: %v", b.Distribution)
}
return nil

View File

@ -208,10 +208,6 @@ preventStart: true
source: https://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/containerd.io_1.2.10-3_amd64.deb
version: 1.2.10-3
---
Name: libseccomp2
---
Name: pigz
---
Name: containerd.service
definition: |
[Unit]
@ -224,16 +220,15 @@ definition: |
EnvironmentFile=/etc/environment
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd -c /etc/containerd/config-kops.toml "$CONTAINERD_OPTS"
KillMode=process
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Restart=always
RestartSec=2s
StartLimitInterval=0
TimeoutStartSec=0
[Install]
WantedBy=multi-user.target

View File

@ -32,6 +32,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kops/pkg/apis/kops/util"
)
@ -186,6 +187,8 @@ type ClusterSpec struct {
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
// RollingUpdate defines the default rolling-update settings for instance groups
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
}
// NodeAuthorizationSpec is used to node authorization
@ -654,3 +657,19 @@ type DNSControllerGossipConfig struct {
Secondary *DNSControllerGossipConfig `json:"secondary,omitempty"`
Seed *string `json:"seed,omitempty"`
}
type RollingUpdate struct {
// MaxUnavailable is the maximum number of nodes that can be unavailable during the update.
// The value can be an absolute number (for example 5) or a percentage of desired
// nodes (for example 10%).
// The absolute number is calculated from a percentage by rounding down.
// A value of 0 disables rolling updates.
// Defaults to 1.
// Example: when this is set to 30%, the InstanceGroup can be scaled
// down to 70% of desired nodes immediately when the rolling update
// starts. Once new nodes are ready, more old nodes can be drained,
// ensuring that the total number of nodes available at all times
// during the update is at least 70% of desired nodes.
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
}

View File

@ -609,6 +609,10 @@ type KubeSchedulerConfig struct {
// which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider
// as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/
MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"`
// Qps sets the maximum qps to send to apiserver after the burst quota is exhausted
Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"`
// Burst sets the maximum qps to send to apiserver after the burst quota is exhausted
Burst int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"`
}
// LeaderElectionConfiguration defines the configuration of leader election

View File

@ -157,6 +157,8 @@ type InstanceGroupSpec struct {
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
// RollingUpdate defines the rolling-update behavior
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
}
const (

View File

@ -29,6 +29,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -19,6 +19,7 @@ package v1alpha1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// +genclient
@ -184,6 +185,8 @@ type ClusterSpec struct {
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
// RollingUpdate defines the default rolling-update settings for instance groups
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
}
// NodeAuthorizationSpec is used to node authorization
@ -538,3 +541,19 @@ type DNSControllerGossipConfig struct {
Secondary *DNSControllerGossipConfig `json:"secondary,omitempty"`
Seed *string `json:"seed,omitempty"`
}
type RollingUpdate struct {
// MaxUnavailable is the maximum number of nodes that can be unavailable during the update.
// The value can be an absolute number (for example 5) or a percentage of desired
// nodes (for example 10%).
// The absolute number is calculated from a percentage by rounding down.
// A value of 0 disables rolling updates.
// Defaults to 1.
// Example: when this is set to 30%, the InstanceGroup can be scaled
// down to 70% of desired nodes immediately when the rolling update
// starts. Once new nodes are ready, more old nodes can be drained,
// ensuring that the total number of nodes available at all times
// during the update is at least 70% of desired nodes.
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
}

View File

@ -609,6 +609,10 @@ type KubeSchedulerConfig struct {
// which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider
// as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/
MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"`
// Qps sets the maximum qps to send to apiserver after the burst quota is exhausted
Qps *resource.Quantity `json:"qps,omitempty" configfile:"ClientConnection.QPS"`
// Burst sets the maximum qps to send to apiserver after the burst quota is exhausted
Burst int32 `json:"burst,omitempty" configfile:"ClientConnection.Burst"`
}
// LeaderElectionConfiguration defines the configuration of leader election

View File

@ -146,6 +146,8 @@ type InstanceGroupSpec struct {
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
// RollingUpdate defines the rolling-update behavior
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
}
const (

View File

@ -693,6 +693,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RollingUpdate)(nil), (*kops.RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(a.(*RollingUpdate), b.(*kops.RollingUpdate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kops.RollingUpdate)(nil), (*RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(a.(*kops.RollingUpdate), b.(*RollingUpdate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RomanaNetworkingSpec)(nil), (*kops.RomanaNetworkingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha1_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(a.(*RomanaNetworkingSpec), b.(*kops.RomanaNetworkingSpec), scope)
}); err != nil {
@ -1878,6 +1888,15 @@ func autoConvert_v1alpha1_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(kops.RollingUpdate)
if err := Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -2176,6 +2195,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha1_ClusterSpec(in *kops.ClusterSpec,
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
if err := Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -3058,6 +3086,15 @@ func autoConvert_v1alpha1_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(kops.RollingUpdate)
if err := Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -3181,6 +3218,15 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha1_InstanceGroupSpec(in *kops.I
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
if err := Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -3656,6 +3702,8 @@ func autoConvert_v1alpha1_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku
out.UsePolicyConfigMap = in.UsePolicyConfigMap
out.FeatureGates = in.FeatureGates
out.MaxPersistentVolumes = in.MaxPersistentVolumes
out.Qps = in.Qps
out.Burst = in.Burst
return nil
}
@ -3680,6 +3728,8 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha1_KubeSchedulerConfig(in *ko
out.UsePolicyConfigMap = in.UsePolicyConfigMap
out.FeatureGates = in.FeatureGates
out.MaxPersistentVolumes = in.MaxPersistentVolumes
out.Qps = in.Qps
out.Burst = in.Burst
return nil
}
@ -4602,6 +4652,26 @@ func Convert_kops_RBACAuthorizationSpec_To_v1alpha1_RBACAuthorizationSpec(in *ko
return autoConvert_kops_RBACAuthorizationSpec_To_v1alpha1_RBACAuthorizationSpec(in, out, s)
}
func autoConvert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error {
out.MaxUnavailable = in.MaxUnavailable
return nil
}
// Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate is an autogenerated conversion function.
func Convert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error {
return autoConvert_v1alpha1_RollingUpdate_To_kops_RollingUpdate(in, out, s)
}
func autoConvert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error {
out.MaxUnavailable = in.MaxUnavailable
return nil
}
// Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate is an autogenerated conversion function.
func Convert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error {
return autoConvert_kops_RollingUpdate_To_v1alpha1_RollingUpdate(in, out, s)
}
func autoConvert_v1alpha1_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(in *RomanaNetworkingSpec, out *kops.RomanaNetworkingSpec, s conversion.Scope) error {
out.DaemonServiceIP = in.DaemonServiceIP
out.EtcdServiceIP = in.EtcdServiceIP

View File

@ -23,6 +23,7 @@ package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -818,6 +819,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
(*in).DeepCopyInto(*out)
}
return
}
@ -1738,6 +1744,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
(*in).DeepCopyInto(*out)
}
return
}
@ -2415,6 +2426,11 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) {
*out = new(int32)
**out = **in
}
if in.Qps != nil {
in, out := &in.Qps, &out.Qps
x := (*in).DeepCopy()
*out = &x
}
return
}
@ -3234,6 +3250,27 @@ func (in *RBACAuthorizationSpec) DeepCopy() *RBACAuthorizationSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
func (in *RollingUpdate) DeepCopy() *RollingUpdate {
if in == nil {
return nil
}
out := new(RollingUpdate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RomanaNetworkingSpec) DeepCopyInto(out *RomanaNetworkingSpec) {
*out = *in

View File

@ -29,6 +29,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

View File

@ -19,6 +19,7 @@ package v1alpha2
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
// +genclient
@ -184,6 +185,8 @@ type ClusterSpec struct {
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
// RollingUpdate defines the default rolling-update settings for instance groups
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
}
// NodeAuthorizationSpec is used to node authorization
@ -551,3 +554,19 @@ type DNSControllerGossipConfig struct {
Secondary *DNSControllerGossipConfig `json:"secondary,omitempty"`
Seed *string `json:"seed,omitempty"`
}
type RollingUpdate struct {
// MaxUnavailable is the maximum number of nodes that can be unavailable during the update.
// The value can be an absolute number (for example 5) or a percentage of desired
// nodes (for example 10%).
// The absolute number is calculated from a percentage by rounding down.
// A value of 0 disables rolling updates.
// Defaults to 1.
// Example: when this is set to 30%, the InstanceGroup can be scaled
// down to 70% of desired nodes immediately when the rolling update
// starts. Once new nodes are ready, more old nodes can be drained,
// ensuring that the total number of nodes available at all times
// during the update is at least 70% of desired nodes.
// +optional
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
}

View File

@ -610,6 +610,10 @@ type KubeSchedulerConfig struct {
// which has been supported as far back as Kubernetes 1.7. The default depends on the version and the cloud provider
// as outlined: https://kubernetes.io/docs/concepts/storage/storage-limits/
MaxPersistentVolumes *int32 `json:"maxPersistentVolumes,omitempty"`
// Qps sets the maximum qps to send to apiserver after the burst quota is exhausted
Qps *resource.Quantity `json:"qps,omitempty"`
// Burst sets the maximum qps to send to apiserver after the burst quota is exhausted
Burst int32 `json:"burst,omitempty"`
}
// LeaderElectionConfiguration defines the configuration of leader election

View File

@ -153,6 +153,8 @@ type InstanceGroupSpec struct {
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
// RollingUpdate defines the rolling-update behavior
RollingUpdate *RollingUpdate `json:"rollingUpdate,omitempty"`
}
const (

View File

@ -753,6 +753,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RollingUpdate)(nil), (*kops.RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(a.(*RollingUpdate), b.(*kops.RollingUpdate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kops.RollingUpdate)(nil), (*RollingUpdate)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(a.(*kops.RollingUpdate), b.(*RollingUpdate), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*RomanaNetworkingSpec)(nil), (*kops.RomanaNetworkingSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(a.(*RomanaNetworkingSpec), b.(*kops.RomanaNetworkingSpec), scope)
}); err != nil {
@ -1931,6 +1941,15 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(kops.RollingUpdate)
if err := Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -2244,6 +2263,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
if err := Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -3176,6 +3204,15 @@ func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(kops.RollingUpdate)
if err := Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -3304,6 +3341,15 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.I
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
if err := Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(*in, *out, s); err != nil {
return err
}
} else {
out.RollingUpdate = nil
}
return nil
}
@ -3926,6 +3972,8 @@ func autoConvert_v1alpha2_KubeSchedulerConfig_To_kops_KubeSchedulerConfig(in *Ku
out.UsePolicyConfigMap = in.UsePolicyConfigMap
out.FeatureGates = in.FeatureGates
out.MaxPersistentVolumes = in.MaxPersistentVolumes
out.Qps = in.Qps
out.Burst = in.Burst
return nil
}
@ -3950,6 +3998,8 @@ func autoConvert_kops_KubeSchedulerConfig_To_v1alpha2_KubeSchedulerConfig(in *ko
out.UsePolicyConfigMap = in.UsePolicyConfigMap
out.FeatureGates = in.FeatureGates
out.MaxPersistentVolumes = in.MaxPersistentVolumes
out.Qps = in.Qps
out.Burst = in.Burst
return nil
}
@ -4872,6 +4922,26 @@ func Convert_kops_RBACAuthorizationSpec_To_v1alpha2_RBACAuthorizationSpec(in *ko
return autoConvert_kops_RBACAuthorizationSpec_To_v1alpha2_RBACAuthorizationSpec(in, out, s)
}
func autoConvert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error {
out.MaxUnavailable = in.MaxUnavailable
return nil
}
// Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate is an autogenerated conversion function.
func Convert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(in *RollingUpdate, out *kops.RollingUpdate, s conversion.Scope) error {
return autoConvert_v1alpha2_RollingUpdate_To_kops_RollingUpdate(in, out, s)
}
func autoConvert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error {
out.MaxUnavailable = in.MaxUnavailable
return nil
}
// Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate is an autogenerated conversion function.
func Convert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(in *kops.RollingUpdate, out *RollingUpdate, s conversion.Scope) error {
return autoConvert_kops_RollingUpdate_To_v1alpha2_RollingUpdate(in, out, s)
}
func autoConvert_v1alpha2_RomanaNetworkingSpec_To_kops_RomanaNetworkingSpec(in *RomanaNetworkingSpec, out *kops.RomanaNetworkingSpec, s conversion.Scope) error {
out.DaemonServiceIP = in.DaemonServiceIP
out.EtcdServiceIP = in.EtcdServiceIP

View File

@ -23,6 +23,7 @@ package v1alpha2
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -791,6 +792,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
(*in).DeepCopyInto(*out)
}
return
}
@ -1700,6 +1706,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
(*in).DeepCopyInto(*out)
}
return
}
@ -2486,6 +2497,11 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) {
*out = new(int32)
**out = **in
}
if in.Qps != nil {
in, out := &in.Qps, &out.Qps
x := (*in).DeepCopy()
*out = &x
}
return
}
@ -3305,6 +3321,27 @@ func (in *RBACAuthorizationSpec) DeepCopy() *RBACAuthorizationSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
func (in *RollingUpdate) DeepCopy() *RollingUpdate {
if in == nil {
return nil
}
out := new(RollingUpdate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RomanaNetworkingSpec) DeepCopyInto(out *RomanaNetworkingSpec) {
*out = *in

View File

@ -26,6 +26,7 @@ go_library(
"//vendor/github.com/aws/aws-sdk-go/aws/arn:go_default_library",
"//vendor/github.com/blang/semver:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
@ -46,6 +47,7 @@ go_test(
"//pkg/apis/kops:go_default_library",
"//upup/pkg/fi:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",

View File

@ -132,6 +132,12 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) error {
return err
}
if g.Spec.RollingUpdate != nil {
if errs := validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath("rollingUpdate")); len(errs) > 0 {
return errs.ToAggregate()
}
}
return nil
}

View File

@ -22,6 +22,7 @@ import (
"strings"
"github.com/blang/semver"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/api/validation"
utilnet "k8s.io/apimachinery/pkg/util/net"
@ -121,6 +122,10 @@ func validateClusterSpec(spec *kops.ClusterSpec, fieldPath *field.Path) field.Er
allErrs = append(allErrs, validateContainerRuntime(&spec.ContainerRuntime, fieldPath.Child("containerRuntime"))...)
}
if spec.RollingUpdate != nil {
allErrs = append(allErrs, validateRollingUpdate(spec.RollingUpdate, fieldPath.Child("rollingUpdate"))...)
}
return allErrs
}
@ -440,3 +445,19 @@ func validateContainerRuntime(runtime *string, fldPath *field.Path) field.ErrorL
return allErrs
}
func validateRollingUpdate(rollingUpdate *kops.RollingUpdate, fldpath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if rollingUpdate.MaxUnavailable != nil {
unavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, 1, false)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable,
fmt.Sprintf("Unable to parse: %v", err)))
}
if unavailable < 0 {
allErrs = append(allErrs, field.Invalid(fldpath.Child("MaxUnavailable"), rollingUpdate.MaxUnavailable, "Cannot be negative"))
}
}
return allErrs
}

View File

@ -19,6 +19,7 @@ package validation
import (
"testing"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/validation/field"
@ -400,3 +401,50 @@ func Test_Validate_Calico(t *testing.T) {
testErrors(t, g.Input, errs, g.ExpectedErrors)
}
}
func Test_Validate_RollingUpdate(t *testing.T) {
grid := []struct {
Input kops.RollingUpdate
ExpectedErrors []string
}{
{
Input: kops.RollingUpdate{},
},
{
Input: kops.RollingUpdate{
MaxUnavailable: intStr(intstr.FromInt(0)),
},
},
{
Input: kops.RollingUpdate{
MaxUnavailable: intStr(intstr.FromString("0%")),
},
},
{
Input: kops.RollingUpdate{
MaxUnavailable: intStr(intstr.FromString("nope")),
},
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
},
{
Input: kops.RollingUpdate{
MaxUnavailable: intStr(intstr.FromInt(-1)),
},
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
},
{
Input: kops.RollingUpdate{
MaxUnavailable: intStr(intstr.FromString("-1%")),
},
ExpectedErrors: []string{"Invalid value::TestField.MaxUnavailable"},
},
}
for _, g := range grid {
errs := validateRollingUpdate(&g.Input, field.NewPath("TestField"))
testErrors(t, g.Input, errs, g.ExpectedErrors)
}
}
func intStr(i intstr.IntOrString) *intstr.IntOrString {
return &i
}

View File

@ -23,6 +23,7 @@ package kops
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@ -891,6 +892,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
(*in).DeepCopyInto(*out)
}
return
}
@ -1866,6 +1872,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.RollingUpdate != nil {
in, out := &in.RollingUpdate, &out.RollingUpdate
*out = new(RollingUpdate)
(*in).DeepCopyInto(*out)
}
return
}
@ -2668,6 +2679,11 @@ func (in *KubeSchedulerConfig) DeepCopyInto(out *KubeSchedulerConfig) {
*out = new(int32)
**out = **in
}
if in.Qps != nil {
in, out := &in.Qps, &out.Qps
x := (*in).DeepCopy()
*out = &x
}
return
}
@ -3519,6 +3535,27 @@ func (in *RBACAuthorizationSpec) DeepCopy() *RBACAuthorizationSpec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RollingUpdate) DeepCopyInto(out *RollingUpdate) {
*out = *in
if in.MaxUnavailable != nil {
in, out := &in.MaxUnavailable, &out.MaxUnavailable
*out = new(intstr.IntOrString)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdate.
func (in *RollingUpdate) DeepCopy() *RollingUpdate {
if in == nil {
return nil
}
out := new(RollingUpdate)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RomanaNetworkingSpec) DeepCopyInto(out *RomanaNetworkingSpec) {
*out = *in

View File

@ -0,0 +1,21 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["buildconfigfile.go"],
importpath = "k8s.io/kops/pkg/configbuilder",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/kops:go_default_library",
"//util/pkg/reflectutils:go_default_library",
"//vendor/gopkg.in/yaml.v2:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["buildconfigfile_test.go"],
embed = [":go_default_library"],
)

View File

@ -0,0 +1,111 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configbuilder
import (
"fmt"
"reflect"
"strconv"
"strings"
"gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/util/pkg/reflectutils"
)
// BuildConfigYaml reflects the options interface and extracts the parameters for the config file
func BuildConfigYaml(options *kops.KubeSchedulerConfig, target interface{}) ([]byte, error) {
walker := func(path string, field *reflect.StructField, val reflect.Value) error {
if field == nil {
klog.V(8).Infof("ignoring non-field: %s", path)
return nil
}
tag := field.Tag.Get("configfile")
if tag == "" {
klog.V(4).Infof("not writing field with no configfile tag: %s", path)
// We want to descend - it could be a structure containing flags
return nil
}
if tag == "-" {
klog.V(4).Infof("skipping field with %q configfile tag: %s", tag, path)
return reflectutils.SkipReflection
}
tokens := strings.Split(tag, ",")
flagName := tokens[0]
targetValue, error := getValueFromStruct(flagName, target)
if error != nil {
return fmt.Errorf("conversion error for field %s: %s", flagName, error)
}
// We do have to do this, even though the recursive walk will do it for us
// because when we descend we won't have `field` set
if val.Kind() == reflect.Ptr {
if val.IsNil() {
return nil
}
}
switch v := val.Interface().(type) {
case *resource.Quantity:
floatVal, err := strconv.ParseFloat(v.AsDec().String(), 64)
if err != nil {
return fmt.Errorf("unable to convert from Quantity %v to float", v)
}
targetValue.Set(reflect.ValueOf(&floatVal))
default:
targetValue.Set(val)
}
return reflectutils.SkipReflection
}
err := reflectutils.ReflectRecursive(reflect.ValueOf(options), walker)
if err != nil {
return nil, fmt.Errorf("BuildFlagsList to reflect value: %s", err)
}
configFile, err := yaml.Marshal(target)
if err != nil {
return nil, err
}
return configFile, nil
}
func getValueFromStruct(keyWithDots string, object interface{}) (*reflect.Value, error) {
keySlice := strings.Split(keyWithDots, ".")
v := reflect.ValueOf(object)
// iterate through field names, ignoring the first name as it might be the current instance name
// you can make it recursive also if want to support types like slice,map etc along with struct
for _, key := range keySlice {
for v.Kind() == reflect.Ptr {
v = v.Elem()
}
// we only accept structs
if v.Kind() != reflect.Struct {
return nil, fmt.Errorf("only accepts structs; got %T", v)
}
v = v.FieldByName(key)
}
return &v, nil
}

View File

@ -0,0 +1,67 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package configbuilder
import (
"testing"
)
// ClientConnectionConfig is used by kube-scheduler to talk to the api server
type DummyNestedStruct struct {
Name *string `yaml:"name,omitempty"`
QPS *float64 `yaml:"qps,omitempty"`
}
// SchedulerConfig is used to generate the config file
type DummyStruct struct {
ClientConnection *DummyNestedStruct `yaml:"clientConnection,omitempty"`
}
func TestGetStructVal(t *testing.T) {
str := "test"
s := &DummyStruct{
ClientConnection: &DummyNestedStruct{
Name: &str,
},
}
v, err := getValueFromStruct("ClientConnection.Name", s)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
inStruct := v.Elem().String()
if inStruct != str {
t.Errorf("unexpected value: %s, %s, expected: %s", inStruct, err, str)
}
}
func TestWrongStructField(t *testing.T) {
str := "test"
s := &DummyStruct{
ClientConnection: &DummyNestedStruct{
Name: &str,
},
}
v, err := getValueFromStruct("ClientConnection.NotExistent", s)
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if v.IsValid() {
t.Errorf("unexpected Valid value from non-existent field lookup")
}
}

View File

@ -82,6 +82,8 @@ var (
VSphereCloudProvider = New("VSphereCloudProvider", Bool(false))
// SkipEtcdVersionCheck will bypass the check that etcd-manager is using a supported etcd version
SkipEtcdVersionCheck = New("SkipEtcdVersionCheck", Bool(false))
// Enable terraform JSON output instead of hcl output. JSON output can be also parsed by terraform 0.12
TerraformJSON = New("TerraformJSON", Bool(false))
)
// FeatureFlag defines a feature flag

View File

@ -6,6 +6,7 @@ go_library(
"delete.go",
"instancegroups.go",
"rollingupdate.go",
"settings.go",
],
importpath = "k8s.io/kops/pkg/instancegroups",
visibility = ["//visibility:public"],
@ -20,6 +21,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
@ -29,7 +31,10 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["rollingupdate_test.go"],
srcs = [
"rollingupdate_test.go",
"settings_test.go",
],
embed = [":go_default_library"],
deps = [
"//cloudmock/aws/mockautoscaling:go_default_library",
@ -39,9 +44,11 @@ go_test(
"//upup/pkg/fi/cloudup/awsup:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling:go_default_library",
"//vendor/github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",

View File

@ -103,7 +103,6 @@ func promptInteractive(upgradedHostId, upgradedHostName string) (stopPrompting b
// TODO: Temporarily increase size of ASG?
// TODO: Remove from ASG first so status is immediately updated?
// TODO: Batch termination, like a rolling-update
// RollingUpdate performs a rolling update on a list of ec2 instances.
func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpdateCluster, cluster *api.Cluster, isBastion bool, sleepAfterTerminate time.Duration, validationTimeout time.Duration) (err error) {
@ -118,6 +117,8 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
return fmt.Errorf("rollingUpdate is missing a k8s client")
}
noneReady := len(r.CloudGroup.Ready) == 0
numInstances := len(r.CloudGroup.Ready) + len(r.CloudGroup.NeedUpdate)
update := r.CloudGroup.NeedUpdate
if rollingUpdateData.Force {
update = append(update, r.CloudGroup.Ready...)
@ -148,76 +149,51 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
}
}
for _, u := range update {
instanceId := u.ID
settings := resolveSettings(cluster, r.CloudGroup.InstanceGroup, numInstances)
nodeName := ""
if u.Node != nil {
nodeName = u.Node.Name
runningDrains := 0
maxConcurrency := settings.MaxUnavailable.IntValue()
if maxConcurrency == 0 {
klog.Infof("Rolling updates for InstanceGroup %s are disabled", r.CloudGroup.InstanceGroup.Name)
return nil
}
if rollingUpdateData.Interactive {
maxConcurrency = 1
}
terminateChan := make(chan error, maxConcurrency)
for uIdx, u := range update {
go func(m *cloudinstances.CloudInstanceGroupMember) {
terminateChan <- r.drainTerminateAndWait(m, rollingUpdateData, isBastion, sleepAfterTerminate)
}(u)
runningDrains++
// Wait until after one node is deleted and its replacement validates before the concurrent draining
// in case the current spec does not result in usable nodes.
if runningDrains < maxConcurrency && (!noneReady || uIdx > 0) {
continue
}
if isBastion {
// We don't want to validate for bastions - they aren't part of the cluster
} else if rollingUpdateData.CloudOnly {
klog.Warning("Not draining cluster nodes as 'cloudonly' flag is set.")
} else {
if u.Node != nil {
klog.Infof("Draining the node: %q.", nodeName)
if err = r.DrainNode(u, rollingUpdateData); err != nil {
if rollingUpdateData.FailOnDrainError {
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
}
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
}
} else {
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
}
err = <-terminateChan
runningDrains--
if err != nil {
return waitForPendingBeforeReturningError(runningDrains, terminateChan, err)
}
// We unregister the node before deleting it; if the replacement comes up with the same name it would otherwise still be cordoned
// (It often seems like GCE tries to re-use names)
if !isBastion && !rollingUpdateData.CloudOnly {
if u.Node == nil {
klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId)
} else {
klog.Infof("deleting node %q from kubernetes", nodeName)
if err := r.deleteNode(u.Node, rollingUpdateData); err != nil {
return fmt.Errorf("error deleting node %q: %v", nodeName, err)
}
}
}
if err = r.DeleteInstance(u); err != nil {
klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err)
return err
}
// Wait for the minimum interval
klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate)
time.Sleep(sleepAfterTerminate)
if rollingUpdateData.CloudOnly {
klog.Warningf("Not validating cluster as cloudonly flag is set.")
} else {
klog.Info("Validating the cluster.")
if err = r.validateClusterWithDuration(rollingUpdateData, validationTimeout); err != nil {
if rollingUpdateData.FailOnValidate {
klog.Errorf("Cluster did not validate within %s", validationTimeout)
return fmt.Errorf("error validating cluster after removing a node: %v", err)
}
klog.Warningf("Cluster validation failed after removing instance, proceeding since fail-on-validate is set to false: %v", err)
}
err = r.maybeValidate(rollingUpdateData, validationTimeout)
if err != nil {
return waitForPendingBeforeReturningError(runningDrains, terminateChan, err)
}
if rollingUpdateData.Interactive {
nodeName := ""
if u.Node != nil {
nodeName = u.Node.Name
}
stopPrompting, err := promptInteractive(u.ID, nodeName)
if err != nil {
return err
@ -227,11 +203,49 @@ func (r *RollingUpdateInstanceGroup) RollingUpdate(rollingUpdateData *RollingUpd
rollingUpdateData.Interactive = false
}
}
// Validation tends to return failures from the start of drain until the replacement is
// fully ready, so sweep up as many completions as we can before starting the next drain.
sweep:
for runningDrains > 0 {
select {
case err = <-terminateChan:
runningDrains--
if err != nil {
return waitForPendingBeforeReturningError(runningDrains, terminateChan, err)
}
default:
break sweep
}
}
}
if runningDrains > 0 {
for runningDrains > 0 {
err = <-terminateChan
runningDrains--
if err != nil {
return waitForPendingBeforeReturningError(runningDrains, terminateChan, err)
}
}
err = r.maybeValidate(rollingUpdateData, validationTimeout)
if err != nil {
return err
}
}
return nil
}
func waitForPendingBeforeReturningError(runningDrains int, terminateChan chan error, err error) error {
for runningDrains > 0 {
<-terminateChan
runningDrains--
}
return err
}
func (r *RollingUpdateInstanceGroup) taintAllNeedUpdate(update []*cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster) error {
var toTaint []*corev1.Node
for _, u := range update {
@ -290,6 +304,81 @@ func (r *RollingUpdateInstanceGroup) patchTaint(rollingUpdateData *RollingUpdate
return err
}
func (r *RollingUpdateInstanceGroup) drainTerminateAndWait(u *cloudinstances.CloudInstanceGroupMember, rollingUpdateData *RollingUpdateCluster, isBastion bool, sleepAfterTerminate time.Duration) error {
instanceId := u.ID
nodeName := ""
if u.Node != nil {
nodeName = u.Node.Name
}
if isBastion {
// We don't want to validate for bastions - they aren't part of the cluster
} else if rollingUpdateData.CloudOnly {
klog.Warning("Not draining cluster nodes as 'cloudonly' flag is set.")
} else {
if u.Node != nil {
klog.Infof("Draining the node: %q.", nodeName)
if err := r.DrainNode(u, rollingUpdateData); err != nil {
if rollingUpdateData.FailOnDrainError {
return fmt.Errorf("failed to drain node %q: %v", nodeName, err)
}
klog.Infof("Ignoring error draining node %q: %v", nodeName, err)
}
} else {
klog.Warningf("Skipping drain of instance %q, because it is not registered in kubernetes", instanceId)
}
}
// We unregister the node before deleting it; if the replacement comes up with the same name it would otherwise still be cordoned
// (It often seems like GCE tries to re-use names)
if !isBastion && !rollingUpdateData.CloudOnly {
if u.Node == nil {
klog.Warningf("no kubernetes Node associated with %s, skipping node deletion", instanceId)
} else {
klog.Infof("deleting node %q from kubernetes", nodeName)
if err := r.deleteNode(u.Node, rollingUpdateData); err != nil {
return fmt.Errorf("error deleting node %q: %v", nodeName, err)
}
}
}
if err := r.DeleteInstance(u); err != nil {
klog.Errorf("error deleting instance %q, node %q: %v", instanceId, nodeName, err)
return err
}
// Wait for the minimum interval
klog.Infof("waiting for %v after terminating instance", sleepAfterTerminate)
time.Sleep(sleepAfterTerminate)
return nil
}
func (r *RollingUpdateInstanceGroup) maybeValidate(rollingUpdateData *RollingUpdateCluster, validationTimeout time.Duration) error {
if rollingUpdateData.CloudOnly {
klog.Warningf("Not validating cluster as cloudonly flag is set.")
} else {
klog.Info("Validating the cluster.")
if err := r.validateClusterWithDuration(rollingUpdateData, validationTimeout); err != nil {
if rollingUpdateData.FailOnValidate {
klog.Errorf("Cluster did not validate within %s", validationTimeout)
return fmt.Errorf("error validating cluster after removing a node: %v", err)
}
klog.Warningf("Cluster validation failed after removing instance, proceeding since fail-on-validate is set to false: %v", err)
}
}
return nil
}
// validateClusterWithDuration runs validation.ValidateCluster until either we get positive result or the timeout expires
func (r *RollingUpdateInstanceGroup) validateClusterWithDuration(rollingUpdateData *RollingUpdateCluster, duration time.Duration) error {
// Try to validate cluster at least once, this will handle durations that are lower

View File

@ -19,14 +19,17 @@ package instancegroups
import (
"errors"
"strings"
"sync"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
v1meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
testingclient "k8s.io/client-go/testing"
@ -42,7 +45,7 @@ const (
taintPatch = "{\"spec\":{\"taints\":[{\"effect\":\"PreferNoSchedule\",\"key\":\"kops.k8s.io/scheduled-for-update\"}]}}"
)
func getTestSetup() (*RollingUpdateCluster, awsup.AWSCloud, *kopsapi.Cluster) {
func getTestSetup() (*RollingUpdateCluster, *awsup.MockAWSCloud, *kopsapi.Cluster) {
k8sClient := fake.NewSimpleClientset()
mockcloud := awsup.BuildMockAWSCloud("us-east-1", "abc")
@ -602,6 +605,251 @@ func TestRollingUpdateTaintAllButOneNeedUpdate(t *testing.T) {
assertGroupInstanceCount(t, cloud, "node-1", 1)
}
func TestRollingUpdateDisabled(t *testing.T) {
c, cloud, cluster := getTestSetup()
zero := intstr.FromInt(0)
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
MaxUnavailable: &zero,
}
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
assert.NoError(t, err, "rolling update")
assertGroupInstanceCount(t, cloud, "node-1", 3)
assertGroupInstanceCount(t, cloud, "node-2", 3)
assertGroupInstanceCount(t, cloud, "master-1", 2)
assertGroupInstanceCount(t, cloud, "bastion-1", 1)
}
func TestRollingUpdateDisabledCloudonly(t *testing.T) {
c, cloud, cluster := getTestSetup()
c.CloudOnly = true
zero := intstr.FromInt(0)
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
MaxUnavailable: &zero,
}
groups := getGroupsAllNeedUpdate(c.K8sClient, cloud)
err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
assert.NoError(t, err, "rolling update")
assertGroupInstanceCount(t, cloud, "node-1", 3)
assertGroupInstanceCount(t, cloud, "node-2", 3)
assertGroupInstanceCount(t, cloud, "master-1", 2)
assertGroupInstanceCount(t, cloud, "bastion-1", 1)
}
// The concurrent update tests attempt to induce the following expected update sequence:
//
// (Only for "all need update" tests, to verify the toe-dipping behavior)
// Request validate (7) -->
// <-- validated
// Request terminate 1 node (7) -->
// <-- 1 node terminated, 6 left
// (end only for "all need update" tests)
// Request validate (6) -->
// <-- validated
// Request terminate 2 nodes (6,5) -->
// <-- 1 node terminated (5), 5 left
// Request validate (4) -->
// <-- 1 node terminated (6), 4 left
// <-- validated
// Request terminate 2 nodes (4,3) -->
// <-- 1 node terminated (3), 3 left
// Request validate (2) -->
// <-- validated
// Request terminate 1 node (2) -->
// <-- 1 node terminated (2), 2 left
// Request validate (1) -->
// <-- 1 node terminated (4), 1 left
// <-- validated
// Request terminate 1 node (1) -->
// <-- 1 node terminated, 0 left
// Request validate (0) -->
// <-- validated
type concurrentTest struct {
autoscalingiface.AutoScalingAPI
t *testing.T
mutex sync.Mutex
terminationRequestsLeft int
previousValidation int
validationChan chan bool
terminationChan chan bool
}
func (c *concurrentTest) Validate() (*validation.ValidationCluster, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
terminationRequestsLeft := c.terminationRequestsLeft
switch terminationRequestsLeft {
case 7, 6, 0:
assert.Equal(c.t, terminationRequestsLeft+1, c.previousValidation, "previous validation")
case 5, 3:
c.t.Errorf("unexpected call to Validate with %d termination requests left", terminationRequestsLeft)
case 4:
assert.Equal(c.t, 6, c.previousValidation, "previous validation")
select {
case c.terminationChan <- true:
default:
c.t.Error("terminationChan is full")
}
c.mutex.Unlock()
select {
case <-c.validationChan:
case <-time.After(1 * time.Second):
c.t.Error("timed out reading from validationChan")
}
c.mutex.Lock()
case 2:
assert.Equal(c.t, 4, c.previousValidation, "previous validation")
case 1:
assert.Equal(c.t, 2, c.previousValidation, "previous validation")
select {
case c.terminationChan <- true:
default:
c.t.Error("terminationChan is full")
}
c.mutex.Unlock()
select {
case <-c.validationChan:
case <-time.After(1 * time.Second):
c.t.Error("timed out reading from validationChan")
}
c.mutex.Lock()
}
c.previousValidation = terminationRequestsLeft
return &validation.ValidationCluster{}, nil
}
func (c *concurrentTest) TerminateInstanceInAutoScalingGroup(input *autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) {
c.mutex.Lock()
defer c.mutex.Unlock()
terminationRequestsLeft := c.terminationRequestsLeft
c.terminationRequestsLeft--
switch terminationRequestsLeft {
case 7, 2, 1:
assert.Equal(c.t, terminationRequestsLeft, c.previousValidation, "previous validation")
case 6, 4:
assert.Equal(c.t, terminationRequestsLeft, c.previousValidation, "previous validation")
c.mutex.Unlock()
select {
case <-c.terminationChan:
case <-time.After(1 * time.Second):
c.t.Error("timed out reading from terminationChan")
}
c.mutex.Lock()
go c.delayThenWakeValidation()
case 5, 3:
assert.Equal(c.t, terminationRequestsLeft+1, c.previousValidation, "previous validation")
}
return c.AutoScalingAPI.TerminateInstanceInAutoScalingGroup(input)
}
func (c *concurrentTest) delayThenWakeValidation() {
time.Sleep(20 * time.Millisecond) // NodeInterval plus some
select {
case c.validationChan <- true:
default:
c.t.Error("validationChan is full")
}
}
func (c *concurrentTest) AssertComplete() {
c.mutex.Lock()
defer c.mutex.Unlock()
assert.Equal(c.t, 0, c.previousValidation, "last validation")
}
func newConcurrentTest(t *testing.T, cloud *awsup.MockAWSCloud, allNeedUpdate bool) *concurrentTest {
test := concurrentTest{
AutoScalingAPI: cloud.MockAutoscaling,
t: t,
terminationRequestsLeft: 6,
validationChan: make(chan bool),
terminationChan: make(chan bool),
}
if allNeedUpdate {
test.terminationRequestsLeft = 7
}
test.previousValidation = test.terminationRequestsLeft + 1
return &test
}
func TestRollingUpdateMaxUnavailableAllNeedUpdate(t *testing.T) {
c, cloud, cluster := getTestSetup()
concurrentTest := newConcurrentTest(t, cloud, true)
c.ValidateSuccessDuration = 0
c.ClusterValidator = concurrentTest
cloud.MockAutoscaling = concurrentTest
two := intstr.FromInt(2)
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
MaxUnavailable: &two,
}
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 7)
err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
assert.NoError(t, err, "rolling update")
assertGroupInstanceCount(t, cloud, "node-1", 0)
concurrentTest.AssertComplete()
}
func TestRollingUpdateMaxUnavailableAllButOneNeedUpdate(t *testing.T) {
c, cloud, cluster := getTestSetup()
concurrentTest := newConcurrentTest(t, cloud, false)
c.ValidateSuccessDuration = 0
c.ClusterValidator = concurrentTest
cloud.MockAutoscaling = concurrentTest
two := intstr.FromInt(2)
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
MaxUnavailable: &two,
}
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
makeGroup(groups, c.K8sClient, cloud, "node-1", kopsapi.InstanceGroupRoleNode, 7, 6)
err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
assert.NoError(t, err, "rolling update")
assertGroupInstanceCount(t, cloud, "node-1", 1)
concurrentTest.AssertComplete()
}
func TestRollingUpdateMaxUnavailableAllNeedUpdateMaster(t *testing.T) {
c, cloud, cluster := getTestSetup()
concurrentTest := newConcurrentTest(t, cloud, true)
c.ValidateSuccessDuration = 0
c.ClusterValidator = concurrentTest
cloud.MockAutoscaling = concurrentTest
two := intstr.FromInt(2)
cluster.Spec.RollingUpdate = &kopsapi.RollingUpdate{
MaxUnavailable: &two,
}
groups := make(map[string]*cloudinstances.CloudInstanceGroup)
makeGroup(groups, c.K8sClient, cloud, "master-1", kopsapi.InstanceGroupRoleMaster, 7, 7)
err := c.RollingUpdate(groups, cluster, &kopsapi.InstanceGroupList{})
assert.NoError(t, err, "rolling update")
assertGroupInstanceCount(t, cloud, "master-1", 0)
concurrentTest.AssertComplete()
}
func assertCordon(t *testing.T, action testingclient.PatchAction) {
assert.Equal(t, "nodes", action.GetResource().Resource)
assert.Equal(t, cordonPatch, string(action.GetPatch()))

View File

@ -0,0 +1,56 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package instancegroups
import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kops/pkg/apis/kops"
)
func resolveSettings(cluster *kops.Cluster, group *kops.InstanceGroup, numInstances int) kops.RollingUpdate {
rollingUpdate := kops.RollingUpdate{}
if group.Spec.RollingUpdate != nil {
rollingUpdate = *group.Spec.RollingUpdate
}
if def := cluster.Spec.RollingUpdate; def != nil {
if rollingUpdate.MaxUnavailable == nil {
rollingUpdate.MaxUnavailable = def.MaxUnavailable
}
}
if rollingUpdate.MaxUnavailable == nil || rollingUpdate.MaxUnavailable.IntVal < 0 {
one := intstr.FromInt(1)
rollingUpdate.MaxUnavailable = &one
}
if rollingUpdate.MaxUnavailable.Type == intstr.String {
unavailable, err := intstr.GetValueFromIntOrPercent(rollingUpdate.MaxUnavailable, numInstances, false)
if err != nil {
// If unparseable use the default value
unavailable = 1
}
if unavailable <= 0 {
// While we round down, percentages should resolve to a minimum of 1
unavailable = 1
}
unavailableInt := intstr.FromInt(unavailable)
rollingUpdate.MaxUnavailable = &unavailableInt
}
return rollingUpdate
}

View File

@ -0,0 +1,167 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package instancegroups
import (
"fmt"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kops/pkg/apis/kops"
)
func TestSettings(t *testing.T) {
for _, tc := range []struct {
name string
defaultValue interface{}
nonDefaultValue interface{}
}{
{
name: "MaxUnavailable",
defaultValue: intstr.FromInt(1),
nonDefaultValue: intstr.FromInt(2),
},
} {
t.Run(tc.name, func(t *testing.T) {
defaultCluster := &kops.RollingUpdate{}
setFieldValue(defaultCluster, tc.name, tc.defaultValue)
nonDefaultCluster := &kops.RollingUpdate{}
setFieldValue(nonDefaultCluster, tc.name, tc.nonDefaultValue)
defaultGroup := &kops.RollingUpdate{}
setFieldValue(defaultGroup, tc.name, tc.defaultValue)
nonDefaultGroup := &kops.RollingUpdate{}
setFieldValue(nonDefaultGroup, tc.name, tc.nonDefaultValue)
assertResolvesValue(t, tc.name, tc.defaultValue, nil, nil, "nil nil")
assertResolvesValue(t, tc.name, tc.defaultValue, &kops.RollingUpdate{}, nil, "{nil} nil")
assertResolvesValue(t, tc.name, tc.defaultValue, defaultCluster, nil, "{default} nil")
assertResolvesValue(t, tc.name, tc.nonDefaultValue, nonDefaultCluster, nil, "{nonDefault} nil")
assertResolvesValue(t, tc.name, tc.defaultValue, nil, &kops.RollingUpdate{}, "nil {nil}")
assertResolvesValue(t, tc.name, tc.defaultValue, &kops.RollingUpdate{}, &kops.RollingUpdate{}, "{nil} {nil}")
assertResolvesValue(t, tc.name, tc.defaultValue, defaultCluster, &kops.RollingUpdate{}, "{default} {nil}")
assertResolvesValue(t, tc.name, tc.nonDefaultValue, nonDefaultCluster, &kops.RollingUpdate{}, "{nonDefault} {nil}")
assertResolvesValue(t, tc.name, tc.defaultValue, nil, defaultGroup, "nil {default}")
assertResolvesValue(t, tc.name, tc.defaultValue, &kops.RollingUpdate{}, defaultGroup, "{nil} {default}")
assertResolvesValue(t, tc.name, tc.defaultValue, defaultCluster, defaultGroup, "{default} {default}")
assertResolvesValue(t, tc.name, tc.defaultValue, nonDefaultCluster, defaultGroup, "{nonDefault} {default}")
assertResolvesValue(t, tc.name, tc.nonDefaultValue, nil, nonDefaultGroup, "nil {nonDefault}")
assertResolvesValue(t, tc.name, tc.nonDefaultValue, &kops.RollingUpdate{}, nonDefaultGroup, "{nil} {nonDefault}")
assertResolvesValue(t, tc.name, tc.nonDefaultValue, defaultCluster, nonDefaultGroup, "{default} {nonDefault}")
assertResolvesValue(t, tc.name, tc.nonDefaultValue, nonDefaultCluster, nonDefaultGroup, "{nonDefault} {nonDefault}")
})
}
}
func setFieldValue(aStruct interface{}, fieldName string, fieldValue interface{}) {
field := reflect.ValueOf(aStruct).Elem().FieldByName(fieldName)
value := reflect.New(field.Type().Elem())
value.Elem().Set(reflect.ValueOf(fieldValue))
field.Set(value)
}
func assertResolvesValue(t *testing.T, name string, expected interface{}, rollingUpdateDefault *kops.RollingUpdate, rollingUpdate *kops.RollingUpdate, msg interface{}) bool {
cluster := kops.Cluster{
Spec: kops.ClusterSpec{
RollingUpdate: rollingUpdateDefault,
},
}
instanceGroup := kops.InstanceGroup{
Spec: kops.InstanceGroupSpec{
RollingUpdate: rollingUpdate,
},
}
rollingUpdateDefaultCopy := rollingUpdateDefault.DeepCopy()
rollingUpdateCopy := rollingUpdate.DeepCopy()
resolved := resolveSettings(&cluster, &instanceGroup, 1)
value := reflect.ValueOf(resolved).FieldByName(name)
assert.Equal(t, rollingUpdateDefault, cluster.Spec.RollingUpdate, "cluster not modified")
assert.True(t, reflect.DeepEqual(rollingUpdateDefault, rollingUpdateDefaultCopy), "RollingUpdate not modified")
assert.Equal(t, rollingUpdate, instanceGroup.Spec.RollingUpdate, "instancegroup not modified")
assert.True(t, reflect.DeepEqual(rollingUpdate, rollingUpdateCopy), "RollingUpdate not modified")
return assert.NotNil(t, value.Interface(), msg) &&
assert.Equal(t, expected, value.Elem().Interface(), msg)
}
func TestMaxUnavailable(t *testing.T) {
for _, tc := range []struct {
numInstances int
value string
expected int32
}{
{
numInstances: 1,
value: "0",
expected: 0,
},
{
numInstances: 1,
value: "0%",
expected: 1,
},
{
numInstances: 10,
value: "39%",
expected: 3,
},
{
numInstances: 10,
value: "100%",
expected: 10,
},
{
numInstances: 5,
value: "fnord",
expected: 1,
},
{
numInstances: 5,
value: "-3",
expected: 1,
},
{
numInstances: 5,
value: "-3%",
expected: 1,
},
} {
t.Run(fmt.Sprintf("%s %d", tc.value, tc.numInstances), func(t *testing.T) {
value := intstr.Parse(tc.value)
rollingUpdate := kops.RollingUpdate{
MaxUnavailable: &value,
}
instanceGroup := kops.InstanceGroup{
Spec: kops.InstanceGroupSpec{
RollingUpdate: &rollingUpdate,
},
}
resolved := resolveSettings(&kops.Cluster{}, &instanceGroup, tc.numInstances)
assert.Equal(t, intstr.Int, resolved.MaxUnavailable.Type)
assert.Equal(t, tc.expected, resolved.MaxUnavailable.IntVal)
})
}
}

View File

@ -54,7 +54,7 @@ func (b *ContainerdOptionsBuilder) BuildOptions(o interface{}) error {
// Set containerd based on Kubernetes version
if fi.StringValue(containerd.Version) == "" {
if b.IsKubernetesGTE("1.17") {
containerd.Version = fi.String("1.2.10")
containerd.Version = fi.String("1.3.2")
} else if b.IsKubernetesGTE("1.11") {
return fmt.Errorf("containerd version is required")
}

View File

@ -24,7 +24,7 @@ import (
"k8s.io/kops/upup/pkg/fi/loader"
)
const DefaultBackupImage = "kopeio/etcd-backup:3.0.20191025"
const DefaultBackupImage = "kopeio/etcd-backup:3.0.20200116"
// EtcdOptionsBuilder adds options for etcd to the model
type EtcdOptionsBuilder struct {
@ -42,6 +42,8 @@ const (
DefaultEtcd3Version_1_13 = "3.2.24"
DefaultEtcd3Version_1_14 = "3.3.10"
DefaultEtcd3Version_1_17 = "3.4.3"
)
// BuildOptions is responsible for filling in the defaults for the etcd cluster model
@ -62,7 +64,9 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error {
// Ensure the version is set
if c.Version == "" && c.Provider == kops.EtcdProviderTypeLegacy {
// Even if in legacy mode, etcd version 2 is unsupported as of k8s 1.13
if b.IsKubernetesGTE("1.14") {
if b.IsKubernetesGTE("1.17") {
c.Version = DefaultEtcd3Version_1_17
} else if b.IsKubernetesGTE("1.14") {
c.Version = DefaultEtcd3Version_1_14
} else if b.IsKubernetesGTE("1.13") {
c.Version = DefaultEtcd3Version_1_13
@ -73,7 +77,9 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error {
if c.Version == "" && c.Provider == kops.EtcdProviderTypeManager {
// From 1.11, we run the k8s-recommended versions of etcd when using the manager
if b.IsKubernetesGTE("1.14") {
if b.IsKubernetesGTE("1.17") {
c.Version = DefaultEtcd3Version_1_17
} else if b.IsKubernetesGTE("1.14") {
c.Version = DefaultEtcd3Version_1_14
} else if b.IsKubernetesGTE("1.13") {
c.Version = DefaultEtcd3Version_1_13

View File

@ -21,6 +21,7 @@ go_library(
"//pkg/urls:go_default_library",
"//pkg/wellknownports:go_default_library",
"//upup/pkg/fi:go_default_library",
"//upup/pkg/fi/cloudup/aliup:go_default_library",
"//upup/pkg/fi/cloudup/awsup:go_default_library",
"//upup/pkg/fi/cloudup/do:go_default_library",
"//upup/pkg/fi/cloudup/gce:go_default_library",

View File

@ -39,6 +39,7 @@ import (
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/wellknownports"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
@ -189,7 +190,7 @@ metadata:
namespace: kube-system
spec:
containers:
- image: kopeio/etcd-manager:3.0.20191025
- image: kopeio/etcd-manager:3.0.20200116
name: etcd-manager
resources:
requests:
@ -376,6 +377,16 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster *kops.EtcdClusterSpec) (*v1.Po
}
config.VolumeNameTag = awsup.TagNameEtcdClusterPrefix + etcdCluster.Name
case kops.CloudProviderALI:
config.VolumeProvider = "alicloud"
config.VolumeTag = []string{
fmt.Sprintf("kubernetes.io/cluster/%s=owned", b.Cluster.Name),
aliup.TagNameEtcdClusterPrefix + etcdCluster.Name,
aliup.TagNameRolePrefix + "master=1",
}
config.VolumeNameTag = aliup.TagNameEtcdClusterPrefix + etcdCluster.Name
case kops.CloudProviderGCE:
config.VolumeProvider = "gce"

View File

@ -79,7 +79,7 @@ func (b *EtcdManagerOptionsBuilder) BuildOptions(o interface{}) error {
return nil
}
var supportedEtcdVersions = []string{"2.2.1", "3.1.12", "3.2.18", "3.2.24", "3.3.10", "3.3.13"}
var supportedEtcdVersions = []string{"2.2.1", "3.1.12", "3.2.18", "3.2.24", "3.3.10", "3.3.13", "3.4.3"}
func etcdVersionIsSupported(version string) bool {
version = strings.TrimPrefix(version, "v")

View File

@ -89,7 +89,7 @@ Contents:
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20191025
image: kopeio/etcd-manager:3.0.20200116
name: etcd-manager
resources:
requests:
@ -154,7 +154,7 @@ Contents:
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20191025
image: kopeio/etcd-manager:3.0.20200116
name: etcd-manager
resources:
requests:

View File

@ -89,7 +89,7 @@ Contents:
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20191025
image: kopeio/etcd-manager:3.0.20200116
name: etcd-manager
resources:
requests:
@ -160,7 +160,7 @@ Contents:
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
image: kopeio/etcd-manager:3.0.20191025
image: kopeio/etcd-manager:3.0.20200116
name: etcd-manager
resources:
requests:

View File

@ -98,7 +98,7 @@ Contents:
value: http://proxy.example.com
- name: no_proxy
value: noproxy.example.com
image: kopeio/etcd-manager:3.0.20191025
image: kopeio/etcd-manager:3.0.20200116
name: etcd-manager
resources:
requests:
@ -178,7 +178,7 @@ Contents:
value: http://proxy.example.com
- name: no_proxy
value: noproxy.example.com
image: kopeio/etcd-manager:3.0.20191025
image: kopeio/etcd-manager:3.0.20200116
name: etcd-manager
resources:
requests:

View File

@ -308,6 +308,8 @@ func (b *MasterVolumeBuilder) addALIVolume(c *fi.ModelBuilderContext, name strin
tags[aliup.TagNameEtcdClusterPrefix+etcd.Name] = m.Name + "/" + strings.Join(allMembers, ",")
// This says "only mount on a master"
tags[aliup.TagNameRolePrefix+"master"] = "1"
// We always add an owned tags (these can't be shared)
tags["kubernetes.io/cluster/"+b.Cluster.ObjectMeta.Name] = "owned"
encrypted := fi.BoolValue(m.EncryptedVolume)

View File

@ -502,16 +502,16 @@ func (s *dumpState) getImageInfo(imageID string) (*imageInfo, error) {
func guessSSHUser(image *ec2.Image) string {
owner := aws.StringValue(image.OwnerId)
switch owner {
case awsup.WellKnownAccountAmazonSystemLinux2:
case awsup.WellKnownAccountAmazonLinux2, awsup.WellKnownAccountRedhat:
return "ec2-user"
case awsup.WellKnownAccountRedhat:
return "ec2-user"
case awsup.WellKnownAccountCoreOS:
return "core"
case awsup.WellKnownAccountKopeio:
case awsup.WellKnownAccountCentOS:
return "centos"
case awsup.WellKnownAccountDebian9, awsup.WellKnownAccountDebian10, awsup.WellKnownAccountKopeio:
return "admin"
case awsup.WellKnownAccountUbuntu:
return "ubuntu"
case awsup.WellKnownAccountCoreOS, awsup.WellKnownAccountFlatcar:
return "core"
}
name := aws.StringValue(image.Name)

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,76 @@
apiVersion: kops.k8s.io/v1alpha1
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal-json.example.com
spec:
adminAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal-json.example.com
etcdClusters:
- etcdMembers:
- name: us-test-1a
zone: us-test-1a
name: main
- etcdMembers:
- name: us-test-1a
zone: us-test-1a
name: events
kubernetesVersion: v1.14.0
masterInternalName: api.internal.minimal-json.example.com
masterPublicName: api.minimal-json.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
topology:
bastion:
idleTimeout: 120
machineType: t2.medium
masters: public
nodes: public
zones:
- cidr: 172.20.32.0/19
name: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: minimal-json.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
zones:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: minimal-json.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
zones:
- us-test-1a

View File

@ -0,0 +1,507 @@
{
"locals": {
"cluster_name": "minimal-json.example.com",
"master_autoscaling_group_ids": [
"${aws_autoscaling_group.master-us-test-1a-masters-minimal-json-example-com.id}"
],
"master_security_group_ids": [
"${aws_security_group.masters-minimal-json-example-com.id}"
],
"masters_role_arn": "${aws_iam_role.masters-minimal-json-example-com.arn}",
"masters_role_name": "${aws_iam_role.masters-minimal-json-example-com.name}",
"node_autoscaling_group_ids": [
"${aws_autoscaling_group.nodes-minimal-json-example-com.id}"
],
"node_security_group_ids": [
"${aws_security_group.nodes-minimal-json-example-com.id}"
],
"node_subnet_ids": [
"${aws_subnet.us-test-1a-minimal-json-example-com.id}"
],
"nodes_role_arn": "${aws_iam_role.nodes-minimal-json-example-com.arn}",
"nodes_role_name": "${aws_iam_role.nodes-minimal-json-example-com.name}",
"region": "us-test-1",
"route_table_public_id": "${aws_route_table.minimal-json-example-com.id}",
"subnet_us-test-1a_id": "${aws_subnet.us-test-1a-minimal-json-example-com.id}",
"vpc_cidr_block": "${aws_vpc.minimal-json-example-com.cidr_block}",
"vpc_id": "${aws_vpc.minimal-json-example-com.id}"
},
"output": {
"cluster_name": {
"value": "minimal-json.example.com"
},
"master_autoscaling_group_ids": {
"value": [
"${aws_autoscaling_group.master-us-test-1a-masters-minimal-json-example-com.id}"
]
},
"master_security_group_ids": {
"value": [
"${aws_security_group.masters-minimal-json-example-com.id}"
]
},
"masters_role_arn": {
"value": "${aws_iam_role.masters-minimal-json-example-com.arn}"
},
"masters_role_name": {
"value": "${aws_iam_role.masters-minimal-json-example-com.name}"
},
"node_autoscaling_group_ids": {
"value": [
"${aws_autoscaling_group.nodes-minimal-json-example-com.id}"
]
},
"node_security_group_ids": {
"value": [
"${aws_security_group.nodes-minimal-json-example-com.id}"
]
},
"node_subnet_ids": {
"value": [
"${aws_subnet.us-test-1a-minimal-json-example-com.id}"
]
},
"nodes_role_arn": {
"value": "${aws_iam_role.nodes-minimal-json-example-com.arn}"
},
"nodes_role_name": {
"value": "${aws_iam_role.nodes-minimal-json-example-com.name}"
},
"region": {
"value": "us-test-1"
},
"route_table_public_id": {
"value": "${aws_route_table.minimal-json-example-com.id}"
},
"subnet_us-test-1a_id": {
"value": "${aws_subnet.us-test-1a-minimal-json-example-com.id}"
},
"vpc_cidr_block": {
"value": "${aws_vpc.minimal-json-example-com.cidr_block}"
},
"vpc_id": {
"value": "${aws_vpc.minimal-json-example-com.id}"
}
},
"provider": {
"aws": {
"region": "us-test-1"
}
},
"resource": {
"aws_autoscaling_group": {
"master-us-test-1a-masters-minimal-json-example-com": {
"name": "master-us-test-1a.masters.minimal-json.example.com",
"launch_configuration": "${aws_launch_configuration.master-us-test-1a-masters-minimal-json-example-com.id}",
"max_size": 1,
"min_size": 1,
"vpc_zone_identifier": [
"${aws_subnet.us-test-1a-minimal-json-example-com.id}"
],
"tag": [
{
"key": "KubernetesCluster",
"value": "minimal-json.example.com",
"propagate_at_launch": true
},
{
"key": "Name",
"value": "master-us-test-1a.masters.minimal-json.example.com",
"propagate_at_launch": true
},
{
"key": "k8s.io/role/master",
"value": "1",
"propagate_at_launch": true
},
{
"key": "kops.k8s.io/instancegroup",
"value": "master-us-test-1a",
"propagate_at_launch": true
}
],
"metrics_granularity": "1Minute",
"enabled_metrics": [
"GroupDesiredCapacity",
"GroupInServiceInstances",
"GroupMaxSize",
"GroupMinSize",
"GroupPendingInstances",
"GroupStandbyInstances",
"GroupTerminatingInstances",
"GroupTotalInstances"
]
},
"nodes-minimal-json-example-com": {
"name": "nodes.minimal-json.example.com",
"launch_configuration": "${aws_launch_configuration.nodes-minimal-json-example-com.id}",
"max_size": 2,
"min_size": 2,
"vpc_zone_identifier": [
"${aws_subnet.us-test-1a-minimal-json-example-com.id}"
],
"tag": [
{
"key": "KubernetesCluster",
"value": "minimal-json.example.com",
"propagate_at_launch": true
},
{
"key": "Name",
"value": "nodes.minimal-json.example.com",
"propagate_at_launch": true
},
{
"key": "k8s.io/role/node",
"value": "1",
"propagate_at_launch": true
},
{
"key": "kops.k8s.io/instancegroup",
"value": "nodes",
"propagate_at_launch": true
}
],
"metrics_granularity": "1Minute",
"enabled_metrics": [
"GroupDesiredCapacity",
"GroupInServiceInstances",
"GroupMaxSize",
"GroupMinSize",
"GroupPendingInstances",
"GroupStandbyInstances",
"GroupTerminatingInstances",
"GroupTotalInstances"
]
}
},
"aws_ebs_volume": {
"us-test-1a-etcd-events-minimal-json-example-com": {
"availability_zone": "us-test-1a",
"size": 20,
"type": "gp2",
"encrypted": false,
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "us-test-1a.etcd-events.minimal-json.example.com",
"k8s.io/etcd/events": "us-test-1a/us-test-1a",
"k8s.io/role/master": "1",
"kubernetes.io/cluster/minimal-json.example.com": "owned"
}
},
"us-test-1a-etcd-main-minimal-json-example-com": {
"availability_zone": "us-test-1a",
"size": 20,
"type": "gp2",
"encrypted": false,
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "us-test-1a.etcd-main.minimal-json.example.com",
"k8s.io/etcd/main": "us-test-1a/us-test-1a",
"k8s.io/role/master": "1",
"kubernetes.io/cluster/minimal-json.example.com": "owned"
}
}
},
"aws_iam_instance_profile": {
"masters-minimal-json-example-com": {
"name": "masters.minimal-json.example.com",
"role": "${aws_iam_role.masters-minimal-json-example-com.name}"
},
"nodes-minimal-json-example-com": {
"name": "nodes.minimal-json.example.com",
"role": "${aws_iam_role.nodes-minimal-json-example-com.name}"
}
},
"aws_iam_role": {
"masters-minimal-json-example-com": {
"name": "masters.minimal-json.example.com",
"assume_role_policy": "${file(\"${path.module}/data/aws_iam_role_masters.minimal-json.example.com_policy\")}"
},
"nodes-minimal-json-example-com": {
"name": "nodes.minimal-json.example.com",
"assume_role_policy": "${file(\"${path.module}/data/aws_iam_role_nodes.minimal-json.example.com_policy\")}"
}
},
"aws_iam_role_policy": {
"masters-minimal-json-example-com": {
"name": "masters.minimal-json.example.com",
"role": "${aws_iam_role.masters-minimal-json-example-com.name}",
"policy": "${file(\"${path.module}/data/aws_iam_role_policy_masters.minimal-json.example.com_policy\")}"
},
"nodes-minimal-json-example-com": {
"name": "nodes.minimal-json.example.com",
"role": "${aws_iam_role.nodes-minimal-json-example-com.name}",
"policy": "${file(\"${path.module}/data/aws_iam_role_policy_nodes.minimal-json.example.com_policy\")}"
}
},
"aws_internet_gateway": {
"minimal-json-example-com": {
"vpc_id": "${aws_vpc.minimal-json-example-com.id}",
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "minimal-json.example.com",
"kubernetes.io/cluster/minimal-json.example.com": "owned"
}
}
},
"aws_key_pair": {
"kubernetes-minimal-json-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157": {
"key_name": "kubernetes.minimal-json.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57",
"public_key": "${file(\"${path.module}/data/aws_key_pair_kubernetes.minimal-json.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key\")}"
}
},
"aws_launch_configuration": {
"master-us-test-1a-masters-minimal-json-example-com": {
"name_prefix": "master-us-test-1a.masters.minimal-json.example.com-",
"image_id": "ami-12345678",
"instance_type": "m3.medium",
"key_name": "${aws_key_pair.kubernetes-minimal-json-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}",
"iam_instance_profile": "${aws_iam_instance_profile.masters-minimal-json-example-com.id}",
"security_groups": [
"${aws_security_group.masters-minimal-json-example-com.id}"
],
"associate_public_ip_address": true,
"user_data": "${file(\"${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.minimal-json.example.com_user_data\")}",
"root_block_device": {
"volume_type": "gp2",
"volume_size": 64,
"delete_on_termination": true
},
"ephemeral_block_device": [
{
"device_name": "/dev/sdc",
"virtual_name": "ephemeral0"
}
],
"lifecycle": {
"create_before_destroy": true
},
"enable_monitoring": false
},
"nodes-minimal-json-example-com": {
"name_prefix": "nodes.minimal-json.example.com-",
"image_id": "ami-12345678",
"instance_type": "t2.medium",
"key_name": "${aws_key_pair.kubernetes-minimal-json-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}",
"iam_instance_profile": "${aws_iam_instance_profile.nodes-minimal-json-example-com.id}",
"security_groups": [
"${aws_security_group.nodes-minimal-json-example-com.id}"
],
"associate_public_ip_address": true,
"user_data": "${file(\"${path.module}/data/aws_launch_configuration_nodes.minimal-json.example.com_user_data\")}",
"root_block_device": {
"volume_type": "gp2",
"volume_size": 128,
"delete_on_termination": true
},
"lifecycle": {
"create_before_destroy": true
},
"enable_monitoring": false
}
},
"aws_route": {
"route-0-0-0-0--0": {
"route_table_id": "${aws_route_table.minimal-json-example-com.id}",
"destination_cidr_block": "0.0.0.0/0",
"gateway_id": "${aws_internet_gateway.minimal-json-example-com.id}"
}
},
"aws_route_table": {
"minimal-json-example-com": {
"vpc_id": "${aws_vpc.minimal-json-example-com.id}",
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "minimal-json.example.com",
"kubernetes.io/cluster/minimal-json.example.com": "owned",
"kubernetes.io/kops/role": "public"
}
}
},
"aws_route_table_association": {
"us-test-1a-minimal-json-example-com": {
"subnet_id": "${aws_subnet.us-test-1a-minimal-json-example-com.id}",
"route_table_id": "${aws_route_table.minimal-json-example-com.id}"
}
},
"aws_security_group": {
"masters-minimal-json-example-com": {
"name": "masters.minimal-json.example.com",
"vpc_id": "${aws_vpc.minimal-json-example-com.id}",
"description": "Security group for masters",
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "masters.minimal-json.example.com",
"kubernetes.io/cluster/minimal-json.example.com": "owned"
}
},
"nodes-minimal-json-example-com": {
"name": "nodes.minimal-json.example.com",
"vpc_id": "${aws_vpc.minimal-json-example-com.id}",
"description": "Security group for nodes",
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "nodes.minimal-json.example.com",
"kubernetes.io/cluster/minimal-json.example.com": "owned"
}
}
},
"aws_security_group_rule": {
"all-master-to-master": {
"type": "ingress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"source_security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"from_port": 0,
"to_port": 0,
"protocol": "-1"
},
"all-master-to-node": {
"type": "ingress",
"security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"source_security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"from_port": 0,
"to_port": 0,
"protocol": "-1"
},
"all-node-to-node": {
"type": "ingress",
"security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"from_port": 0,
"to_port": 0,
"protocol": "-1"
},
"https-external-to-master-0-0-0-0--0": {
"type": "ingress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"from_port": 443,
"to_port": 443,
"protocol": "tcp",
"cidr_blocks": [
"0.0.0.0/0"
]
},
"master-egress": {
"type": "egress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"from_port": 0,
"to_port": 0,
"protocol": "-1",
"cidr_blocks": [
"0.0.0.0/0"
]
},
"node-egress": {
"type": "egress",
"security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"from_port": 0,
"to_port": 0,
"protocol": "-1",
"cidr_blocks": [
"0.0.0.0/0"
]
},
"node-to-master-tcp-1-2379": {
"type": "ingress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"from_port": 1,
"to_port": 2379,
"protocol": "tcp"
},
"node-to-master-tcp-2382-4000": {
"type": "ingress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"from_port": 2382,
"to_port": 4000,
"protocol": "tcp"
},
"node-to-master-tcp-4003-65535": {
"type": "ingress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"from_port": 4003,
"to_port": 65535,
"protocol": "tcp"
},
"node-to-master-udp-1-65535": {
"type": "ingress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"source_security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"from_port": 1,
"to_port": 65535,
"protocol": "udp"
},
"ssh-external-to-master-0-0-0-0--0": {
"type": "ingress",
"security_group_id": "${aws_security_group.masters-minimal-json-example-com.id}",
"from_port": 22,
"to_port": 22,
"protocol": "tcp",
"cidr_blocks": [
"0.0.0.0/0"
]
},
"ssh-external-to-node-0-0-0-0--0": {
"type": "ingress",
"security_group_id": "${aws_security_group.nodes-minimal-json-example-com.id}",
"from_port": 22,
"to_port": 22,
"protocol": "tcp",
"cidr_blocks": [
"0.0.0.0/0"
]
}
},
"aws_subnet": {
"us-test-1a-minimal-json-example-com": {
"vpc_id": "${aws_vpc.minimal-json-example-com.id}",
"cidr_block": "172.20.32.0/19",
"availability_zone": "us-test-1a",
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "us-test-1a.minimal-json.example.com",
"SubnetType": "Public",
"kubernetes.io/cluster/minimal-json.example.com": "owned",
"kubernetes.io/role/elb": "1"
}
}
},
"aws_vpc": {
"minimal-json-example-com": {
"cidr_block": "172.20.0.0/16",
"enable_dns_hostnames": true,
"enable_dns_support": true,
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "minimal-json.example.com",
"kubernetes.io/cluster/minimal-json.example.com": "owned"
}
}
},
"aws_vpc_dhcp_options": {
"minimal-json-example-com": {
"domain_name": "us-test-1.compute.internal",
"domain_name_servers": [
"AmazonProvidedDNS"
],
"tags": {
"KubernetesCluster": "minimal-json.example.com",
"Name": "minimal-json.example.com",
"kubernetes.io/cluster/minimal-json.example.com": "owned"
}
}
},
"aws_vpc_dhcp_options_association": {
"minimal-json-example-com": {
"vpc_id": "${aws_vpc.minimal-json-example-com.id}",
"dhcp_options_id": "${aws_vpc_dhcp_options.minimal-json-example-com.id}"
}
}
},
"terraform": {
"required_version": "\u003e= 0.12.0"
}
}

View File

@ -64,7 +64,9 @@ data:
{{- else }}
.:53 {
errors
health
health {
lameduck 5s
}
kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa {
pods insecure
upstream
@ -166,7 +168,7 @@ spec:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.3.1{{ end }}
image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.6.6{{ end }}
imagePullPolicy: IfNotPresent
resources:
limits:
@ -176,9 +178,6 @@ spec:
memory: {{ KubeDNS.MemoryRequest }}
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
# Workaround for 1.3.1 bug, can be removed after bumping to 1.4+. See: https://github.com/coredns/coredns/pull/2529
- name: tmp
mountPath: /tmp
- name: config-volume
mountPath: /etc/coredns
readOnly: true
@ -216,9 +215,6 @@ spec:
scheme: HTTP
dnsPolicy: Default
volumes:
# Workaround for 1.3.1 bug, can be removed after bumping to 1.4+. See: https://github.com/coredns/coredns/pull/2529
- name: tmp
emptyDir: {}
- name: config-volume
configMap:
name: coredns

View File

@ -64,7 +64,9 @@ data:
{{- else }}
.:53 {
errors
health
health {
lameduck 5s
}
kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa {
pods insecure
upstream
@ -111,7 +113,7 @@ spec:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.3.1{{ end }}
image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.6.6{{ end }}
imagePullPolicy: IfNotPresent
resources:
limits:

View File

@ -1,39 +0,0 @@
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
name: dns-controller
namespace: kube-system
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.0-alpha.1
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.17.0-alpha.1
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]'
spec:
nodeSelector:
kubernetes.io/role: master
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
hostNetwork: true
containers:
- name: dns-controller
image: kope/dns-controller:1.17.0-alpha.1
command:
{{ range $arg := DnsControllerArgv }}
- "{{ $arg }}"
{{ end }}
resources:
requests:
cpu: 50m
memory: 50Mi

View File

@ -1,39 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: external-dns
namespace: kube-system
labels:
k8s-addon: external-dns.addons.k8s.io
k8s-app: external-dns
version: v0.4.4
spec:
replicas: 1
selector:
matchLabels:
k8s-app: external-dns
template:
metadata:
labels:
k8s-addon: external-dns.addons.k8s.io
k8s-app: external-dns
version: v0.4.4
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]'
spec:
nodeSelector:
kubernetes.io/role: master
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
hostNetwork: true
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:v0.4.4
args:
{{ range $arg := ExternalDnsArgv }}
- "{{ $arg }}"
{{ end }}
resources:
requests:
cpu: 50m
memory: 50Mi

View File

@ -1,226 +0,0 @@
# Vendored from https://github.com/aws/amazon-vpc-cni-k8s/blob/v1.3.3/config/v1.3/aws-k8s-cni.yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: aws-node
rules:
- apiGroups:
- crd.k8s.amazonaws.com
resources:
- "*"
- namespaces
verbs:
- "*"
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs: ["list", "watch", "get"]
- apiGroups: ["extensions"]
resources:
- daemonsets
verbs: ["list", "watch"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: aws-node
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: aws-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: aws-node
subjects:
- kind: ServiceAccount
name: aws-node
namespace: kube-system
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: aws-node
namespace: kube-system
labels:
k8s-app: aws-node
spec:
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: aws-node
template:
metadata:
labels:
k8s-app: aws-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: aws-node
hostNetwork: true
tolerations:
- operator: Exists
containers:
- image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.3.3" }}"
ports:
- containerPort: 61678
name: metrics
name: aws-node
env:
- name: CLUSTER_NAME
value: {{ ClusterName }}
- name: AWS_VPC_K8S_CNI_LOGLEVEL
value: DEBUG
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: WATCH_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
- mountPath: /host/var/log
name: log-dir
- mountPath: /var/run/docker.sock
name: dockersock
volumes:
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: log-dir
hostPath:
path: /var/log
- name: dockersock
hostPath:
path: /var/run/docker.sock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: eniconfigs.crd.k8s.amazonaws.com
spec:
scope: Cluster
group: crd.k8s.amazonaws.com
version: v1alpha1
names:
plural: eniconfigs
singular: eniconfig
kind: ENIConfig
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
node-role.kubernetes.io/master: ""

View File

@ -1,109 +0,0 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: flannel
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
k8s-app: flannel
role.kubernetes.io/networking: "1"
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"forceAddress": true,
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "{{ FlannelBackendType }}"
}
}
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
k8s-app: flannel
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
tier: node
app: flannel
role.kubernetes.io/networking: "1"
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.11.0-amd64
command:
- "/opt/bin/flanneld"
- "--ip-masq"
- "--kube-subnet-mgr"
- "--iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }}"
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
resources:
limits:
cpu: 100m
memory: 100Mi
requests:
memory: 100Mi
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel:v0.11.0-amd64
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
resources:
limits:
cpu: 10m
memory: 25Mi
requests:
cpu: 10m
memory: 25Mi
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@ -1,40 +0,0 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kopeio-networking-agent
namespace: kube-system
labels:
k8s-addon: networking.kope.io
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
name: kopeio-networking-agent
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
hostPID: true
hostIPC: true
hostNetwork: true
containers:
- resources:
requests:
cpu: 50m
memory: 100Mi
limits:
memory: 100Mi
securityContext:
privileged: true
image: kopeio/networking-agent:1.0.20181028
name: networking-agent
volumeMounts:
- name: lib-modules
mountPath: /lib/modules
readOnly: true
volumes:
- name: lib-modules
hostPath:
path: /lib/modules

View File

@ -1,373 +0,0 @@
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
# Allow the pod to run on all nodes. This is required
# for cluster communication
- effect: NoSchedule
operator: Exists
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.4.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,canal"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
# Calico Roles
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["projectcalico.org"]
resources:
- globalbgppeers
verbs:
- get
- list
- apiGroups: ["projectcalico.org"]
resources:
- globalconfigs
- globalbgpconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["projectcalico.org"]
resources:
- ippools
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["alpha.projectcalico.org"]
resources:
- systemnetworkpolicies
verbs:
- get
- list
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system

View File

@ -1,456 +0,0 @@
# Canal w/ Calico Version v2.6.2
# https://docs.projectcalico.org/v2.6/releases#v2.6.2
# This manifest includes the following component versions:
# calico/node:v2.6.2
# calico/cni:v1.11.0
# coreos/flannel:v0.9.0 (bug with v0.9.1: https://github.com/kubernetes/kops/issues/4037)
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"snat": true
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
# Allow the pod to run on all nodes. This is required
# for cluster communication
- effect: NoSchedule
operator: Exists
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.7
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,canal"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 50m
livenessProbe:
httpGet:
path: /liveness
port: 9099
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.2
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
resources:
limits:
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
# Create all the CustomResourceDefinitions needed for
# Calico policy-only mode.
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalfelixconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalFelixConfig
plural: globalfelixconfigs
singular: globalfelixconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalbgpconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalBGPConfig
plural: globalbgpconfigs
singular: globalbgpconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
---
# Calico Roles
# Pulled from https://docs.projectcalico.org/v2.5/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- bgppeers
- globalbgpconfigs
- ippools
- globalnetworkpolicies
verbs:
- create
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal-flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# Bind the calico ClusterRole to the canal ServiceAccount.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal-calico
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system

View File

@ -1,215 +0,0 @@
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{ .NonMasqueradeCIDR }}",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key": "CriticalAddonsOnly", "operator": "Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.4.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,canal"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}"
# Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom
- name: FELIX_CHAININSERTMODE
value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
resources:
requests:
cpu: 100m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.9.1
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config

View File

@ -1,523 +0,0 @@
{{- $etcd_scheme := EtcdScheme }}
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# etcd servers
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
{{- range $j, $member := $cluster.Members -}}
{{- if $j }},{{ end -}}
{{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
{{- end }}"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
{{- if eq $etcd_scheme "https" }}
"etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem",
"etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem",
"etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem",
"etcd_scheme": "https",
{{- end }}
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico
subjects:
- kind: ServiceAccount
name: calico
namespace: kube-system
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: calico
tolerations:
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.6.9
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "{{ .KubeControllerManager.ClusterCIDR }}"
- name: CALICO_IPV4POOL_IPIP
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,bgp"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set noderef for node controller.
- name: CALICO_K8S_NODE_REF
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Auto-detect the BGP IP address.
- name: IP
value: ""
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to the desired level
- name: FELIX_LOGSEVERITYSCREEN
value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}"
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.11.5
resources:
requests:
cpu: 10m
imagePullPolicy: Always
command: ["/install-cni.sh"]
env:
# The name of calico config file
- name: CNI_CONF_NAME
value: 10-calico.conf
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Necessary for gossip based DNS
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
---
# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
# be removed entirely once the new kube-controllers deployment has been deployed above.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# Turn this deployment off in favor of the kube-controllers deployment above.
replicas: 0
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
hostNetwork: true
serviceAccountName: calico
containers:
- name: calico-policy-controller
# This shouldn't get updated, since this is the last version we shipped that should be used.
image: quay.io/calico/kube-policy-controller:v0.7.0
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
{{- end }}
volumeMounts:
- mountPath: /etc/hosts
name: etc-hosts
readOnly: true
{{- if eq $etcd_scheme "https" }}
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
volumes:
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
# The controllers can only have a single active instance.
replicas: 1
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
serviceAccountName: calico
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.4
resources:
requests:
cpu: 10m
env:
# By default only policy, profile, workloadendpoint are turned
# on, node controller will decommission nodes that do not exist anymore
# this and CALICO_K8S_NODE_REF in calico-node fixes #3224, but invalid nodes that are
# already registered in calico needs to be deleted manually, see
# https://docs.projectcalico.org/v2.6/usage/decommissioning-a-node
- name: ENABLED_CONTROLLERS
value: policy,profile,workloadendpoint,node
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{- if eq $etcd_scheme "https" }}
- name: ETCD_CERT_FILE
value: /certs/calico-client.pem
- name: ETCD_KEY_FILE
value: /certs/calico-client-key.pem
- name: ETCD_CA_CERT_FILE
value: /certs/ca.pem
volumeMounts:
- mountPath: /certs
name: calico
readOnly: true
{{- end }}
volumes:
- name: etc-hosts
hostPath:
path: /etc/hosts
{{- if eq $etcd_scheme "https" }}
- name: calico
hostPath:
path: /srv/kubernetes/calico
{{- end }}
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
# This manifest installs the k8s-ec2-srcdst container, which disables
# src/dst ip checks to allow BGP to function for calico for hosts within subnets
# This only applies for AWS environments.
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- update
- patch
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: k8s-ec2-srcdst
labels:
role.kubernetes.io/networking: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: k8s-ec2-srcdst
subjects:
- kind: ServiceAccount
name: k8s-ec2-srcdst
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: k8s-ec2-srcdst
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
node-role.kubernetes.io/master: ""
{{- end -}}

View File

@ -1,272 +0,0 @@
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The calico-etcd PetSet service IP:port
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
{{- range $j, $member := $cluster.Members -}}
{{- if $j }},{{ end -}}
http://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
{{- end }}"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v2.4.0
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "kops,bgp"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "{{ .KubeControllerManager.ClusterCIDR }}"
- name: CALICO_IPV4POOL_IPIP
value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}"
# Auto-detect the BGP IP address.
- name: IP
value: ""
# Set to enable the experimental Prometheus metrics server
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}"
# TCP port that the Prometheus metrics server should bind to
- name: FELIX_PROMETHEUSMETRICSPORT
value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}"
# Enable Prometheus Go runtime metrics collection
- name: FELIX_PROMETHEUSGOMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}"
# Enable Prometheus process metrics collection
- name: FELIX_PROMETHEUSPROCESSMETRICSENABLED
value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}"
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.10.0
resources:
requests:
cpu: 10m
imagePullPolicy: Always
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
role.kubernetes.io/networking: "1"
spec:
# The policy controller can only have a single active instance.
replicas: 1
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.7.0
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
---
# This manifest installs the k8s-ec2-srcdst container, which disables
# src/dst ip checks to allow BGP to function for calico for hosts within subnets
# This only applies for AWS environments.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: k8s-ec2-srcdst
namespace: kube-system
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: k8s-ec2-srcdst
template:
metadata:
labels:
k8s-app: k8s-ec2-srcdst
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
hostNetwork: true
containers:
- image: ottoyiu/k8s-ec2-srcdst:v0.2.0-3-gc0c26eca
name: k8s-ec2-srcdst
resources:
requests:
cpu: 10m
memory: 64Mi
env:
- name: AWS_REGION
value: {{ Region }}
volumeMounts:
- name: ssl-certs
mountPath: "/etc/ssl/certs/ca-certificates.crt"
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-certificates.crt"
nodeSelector:
kubernetes.io/role: master
{{- end -}}

View File

@ -1,241 +0,0 @@
{{- if WeaveSecret }}
apiVersion: v1
kind: Secret
metadata:
name: weave-net
namespace: kube-system
stringData:
network-password: {{ WeaveSecret }}
---
{{- end }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
name: weave-net
role.kubernetes.io/networking: "1"
annotations:
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: {{ .KubeControllerManager.ClusterCIDR }}
{{- if .Networking.Weave.MTU }}
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
{{- if .Networking.Weave.NetExtraArgs }}
- name: EXTRA_ARGS
value: "{{ .Networking.Weave.NetExtraArgs }}"
{{- end }}
{{- if WeaveSecret }}
- name: WEAVE_PASSWORD
valueFrom:
secretKeyRef:
name: weave-net
key: network-password
{{- end }}
image: 'weaveworks/weave-kube:2.3.0'
ports:
- name: metrics
containerPort: 6782
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
resources:
requests:
cpu: {{ or .Networking.Weave.CPURequest "50m" }}
memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.CPULimit }}
cpu: {{ .Networking.Weave.CPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
args:
- '--use-legacy-netpol'
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.3.0'
ports:
- name: metrics
containerPort: 6781
resources:
requests:
cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }}
memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.NPCCPULimit }}
cpu: {{ .Networking.Weave.NPCCPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }}
securityContext:
privileged: true
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
updateStrategy:
type: RollingUpdate

View File

@ -1,258 +0,0 @@
{{- if WeaveSecret }}
apiVersion: v1
kind: Secret
metadata:
name: weave-net
namespace: kube-system
stringData:
network-password: {{ WeaveSecret }}
---
{{- end }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- 'networking.k8s.io'
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- weave-net
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
namespace: kube-system
labels:
name: weave-net
role.kubernetes.io/networking: "1"
spec:
# Wait 5 seconds to let pod connect before rolling next pod
minReadySeconds: 5
template:
metadata:
labels:
name: weave-net
role.kubernetes.io/networking: "1"
annotations:
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: {{ .KubeControllerManager.ClusterCIDR }}
{{- if .Networking.Weave.MTU }}
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
{{- if .Networking.Weave.NetExtraArgs }}
- name: EXTRA_ARGS
value: "{{ .Networking.Weave.NetExtraArgs }}"
{{- end }}
{{- if WeaveSecret }}
- name: WEAVE_PASSWORD
valueFrom:
secretKeyRef:
name: weave-net
key: network-password
{{- end }}
image: 'weaveworks/weave-kube:2.6.0'
ports:
- name: metrics
containerPort: 6782
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
resources:
requests:
cpu: {{ or .Networking.Weave.CPURequest "50m" }}
memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.CPULimit }}
cpu: {{ .Networking.Weave.CPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-npc
args: []
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.6.0'
ports:
- name: metrics
containerPort: 6781
resources:
requests:
cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }}
memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.NPCCPULimit }}
cpu: {{ .Networking.Weave.NPCCPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
updateStrategy:
type: RollingUpdate

View File

@ -1,129 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
labels:
name: weave-net
role.kubernetes.io/networking: "1"
namespace: kube-system
spec:
template:
metadata:
annotations:
prometheus.io/scrape: "true"
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: >-
[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"},{"key":"CriticalAddonsOnly", "operator":"Exists"}]
labels:
name: weave-net
role.kubernetes.io/networking: "1"
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: {{ .KubeControllerManager.ClusterCIDR }}
{{- if .Networking.Weave.MTU }}
- name: WEAVE_MTU
value: "{{ .Networking.Weave.MTU }}"
{{- end }}
{{- if .Networking.Weave.ConnLimit }}
- name: CONN_LIMIT
value: "{{ .Networking.Weave.ConnLimit }}"
{{- end }}
image: 'weaveworks/weave-kube:2.3.0'
ports:
- name: metrics
containerPort: 6782
livenessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: {{ or .Networking.Weave.CPURequest "50m" }}
memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }}
limits:
{{- if .Networking.Weave.CPULimit }}
cpu: {{ .Networking.Weave.CPULimit }}
{{- end }}
memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }}
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
args:
- '--use-legacy-netpol'
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.3.0'
ports:
- name: metrics
containerPort: 6781
resources:
requests:
cpu: 50m
memory: 200Mi
limits:
memory: 200Mi
securityContext:
privileged: true
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules

View File

@ -1,138 +0,0 @@
# ------------------------------------------
# Config Map
# ------------------------------------------
apiVersion: v1
kind: ConfigMap
metadata:
name: spotinst-kubernetes-cluster-controller-config
namespace: kube-system
data:
spotinst.token: {{ SpotinstToken }}
spotinst.account: {{ SpotinstAccount }}
spotinst.cluster-identifier: {{ ClusterName }}
---
# ------------------------------------------
# Secret
# ------------------------------------------
apiVersion: v1
kind: Secret
metadata:
name: spotinst-kubernetes-cluster-controller-certs
namespace: kube-system
type: Opaque
---
# ------------------------------------------
# Service Account
# ------------------------------------------
apiVersion: v1
kind: ServiceAccount
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------
# Cluster Role
# ------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["pods", "nodes", "replicationcontrollers", "events", "limitranges", "services", "persistentvolumes", "persistentvolumeclaims", "namespaces"]
verbs: ["get", "delete", "list", "patch", "update"]
- apiGroups: ["apps"]
resources: ["deployments"]
verbs: ["get","list","patch"]
- apiGroups: ["extensions"]
resources: ["replicasets"]
verbs: ["get","list"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["patch", "update", "escalate"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["list"]
- apiGroups: ["metrics.k8s.io"]
resources: ["pods"]
verbs: ["list"]
- nonResourceURLs: ["/version/", "/version"]
verbs: ["get"]
---
# ------------------------------------------
# Cluster Role Binding
# ------------------------------------------
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: spotinst-kubernetes-cluster-controller
subjects:
- kind: ServiceAccount
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
---
# ------------------------------------------
# Deployment
# ------------------------------------------
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
name: spotinst-kubernetes-cluster-controller
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
template:
metadata:
labels:
k8s-addon: spotinst-kubernetes-cluster-controller.addons.k8s.io
spec:
containers:
- name: spotinst-kubernetes-cluster-controller
imagePullPolicy: Always
image: spotinst/kubernetes-cluster-controller:1.0.39
volumeMounts:
- name: spotinst-kubernetes-cluster-controller-certs
mountPath: /certs
livenessProbe:
httpGet:
path: /healthcheck
port: 4401
initialDelaySeconds: 300
periodSeconds: 30
env:
- name: SPOTINST_TOKEN
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.token
- name: SPOTINST_ACCOUNT
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.account
- name: CLUSTER_IDENTIFIER
valueFrom:
configMapKeyRef:
name: spotinst-kubernetes-cluster-controller-config
key: spotinst.cluster-identifier
volumes:
- name: spotinst-kubernetes-cluster-controller-certs
secret:
secretName: spotinst-kubernetes-cluster-controller-certs
serviceAccountName: spotinst-kubernetes-cluster-controller
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---

View File

@ -1,24 +0,0 @@
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: default
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2
---
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: gp2
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
k8s-addon: storage-aws.addons.k8s.io
provisioner: kubernetes.io/aws-ebs
parameters:
type: gp2

View File

@ -1,13 +0,0 @@
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
k8s-addon: storage-gce.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard

View File

@ -86,11 +86,15 @@ const TagNameKopsRole = "kubernetes.io/kops/role"
const TagNameClusterOwnershipPrefix = "kubernetes.io/cluster/"
const (
WellKnownAccountKopeio = "383156758163"
WellKnownAccountRedhat = "309956199498"
WellKnownAccountCoreOS = "595879546273"
WellKnownAccountAmazonSystemLinux2 = "137112412989"
WellKnownAccountUbuntu = "099720109477"
WellKnownAccountAmazonLinux2 = "137112412989"
WellKnownAccountCentOS = "679593333241"
WellKnownAccountCoreOS = "595879546273"
WellKnownAccountDebian9 = "379101102735"
WellKnownAccountDebian10 = "136693071363"
WellKnownAccountFlatcar = "075585003325"
WellKnownAccountKopeio = "383156758163"
WellKnownAccountRedhat = "309956199498"
WellKnownAccountUbuntu = "099720109477"
)
type AWSCloud interface {
@ -1165,14 +1169,24 @@ func resolveImage(ec2Client ec2iface.EC2API, name string) (*ec2.Image, error) {
// Check for well known owner aliases
switch owner {
case "kope.io":
owner = WellKnownAccountKopeio
case "coreos.com":
case "amazon", "amazon.com":
owner = WellKnownAccountAmazonLinux2
case "centos":
owner = WellKnownAccountCentOS
case "coreos", "coreos.com":
owner = WellKnownAccountCoreOS
case "redhat.com":
case "debian9":
owner = WellKnownAccountDebian9
case "debian10":
owner = WellKnownAccountDebian10
case "flatcar":
owner = WellKnownAccountFlatcar
case "kopeio", "kope.io":
owner = WellKnownAccountKopeio
case "redhat", "redhat.com":
owner = WellKnownAccountRedhat
case "amazon.com":
owner = WellKnownAccountAmazonSystemLinux2
case "ubuntu":
owner = WellKnownAccountUbuntu
}
request.Owners = []*string{&owner}

View File

@ -238,20 +238,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "kube-dns.addons.k8s.io"
version := "1.14.13-kops.2"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -261,7 +247,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -285,7 +271,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if kubeDNS.Provider == "CoreDNS" {
{
key := "coredns.addons.k8s.io"
version := "1.3.1-kops.5"
version := "1.6.6-kops.1"
{
location := key + "/k8s-1.6.yaml"
@ -296,7 +282,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -304,7 +290,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
{
key := "coredns.addons.k8s.io"
version := "1.3.1-kops.6"
version := "1.6.6-kops.2"
{
location := key + "/k8s-1.12.yaml"
@ -345,12 +331,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
id := "k8s-1.8"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -368,12 +353,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
id := "k8s-1.9"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.9.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -398,20 +382,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "dns-controller.addons.k8s.io"
version := "1.17.0-alpha.1"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -421,7 +391,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -447,20 +417,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "external-dns.addons.k8s.io"
version := "0.4.5-kops.1"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -470,7 +426,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -518,21 +474,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.15.0",
Id: id,
})
}
{
id := "v1.6.0"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.7.0",
KubernetesVersion: "<1.15.0",
Id: id,
})
}
@ -547,12 +489,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -561,31 +502,16 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "storage-gce.addons.k8s.io"
version := "1.7.0"
{
id := "v1.6.0"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.7.0",
Id: id,
})
}
{
id := "v1.7.0"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
}
@ -593,33 +519,17 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if featureflag.Spotinst.Enabled() {
key := "spotinst-kubernetes-cluster-controller.addons.k8s.io"
{
id := "v1.8.0"
location := key + "/" + id + ".yaml"
version := "1.0.39"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: "<1.9.0",
Id: id,
})
}
{
id := "v1.9.0"
location := key + "/" + id + ".yaml"
version := "1.0.39"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.9.0",
Id: id,
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
Id: id,
})
}
@ -662,20 +572,6 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "networking.kope.io"
version := "1.0.20181028-kops.2"
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
@ -685,7 +581,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -708,53 +604,8 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Weave != nil {
key := "networking.weave"
versions := map[string]string{
"pre-k8s-1.6": "2.3.0-kops.3",
"k8s-1.6": "2.3.0-kops.3",
"k8s-1.7": "2.6.0-kops.2",
"k8s-1.8": "2.6.0-kops.2",
"k8s-1.12": "2.6.0-kops.3",
}
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
location := key + "/k8s-1.6.yaml"
id := "k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.7.0",
Id: id,
})
}
{
location := key + "/k8s-1.7.yaml"
id := "k8s-1.7"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.8.0",
Id: id,
})
"k8s-1.8": "2.6.0-kops.2",
"k8s-1.12": "2.6.0-kops.3",
}
{
@ -766,7 +617,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -789,23 +640,8 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Flannel != nil {
key := "networking.flannel"
versions := map[string]string{
"pre-k8s-1.6": "0.11.0-kops.1",
"k8s-1.6": "0.11.0-kops.2",
"k8s-1.12": "0.11.0-kops.3",
}
{
location := key + "/pre-k8s-1.6.yaml"
id := "pre-k8s-1.6"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
"k8s-1.6": "0.11.0-kops.2",
"k8s-1.12": "0.11.0-kops.3",
}
{
@ -817,7 +653,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -840,12 +676,10 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Calico != nil {
key := "networking.projectcalico.org"
versions := map[string]string{
"pre-k8s-1.6": "2.4.2-kops.1",
"k8s-1.6": "2.6.9-kops.1",
"k8s-1.7": "2.6.12-kops.1",
"k8s-1.7-v3": "3.8.0-kops.2",
"k8s-1.12": "3.9.3-kops.2",
"k8s-1.16": "3.10.2-kops.1",
"k8s-1.7": "2.6.12-kops.1",
"k8s-1.7-v3": "3.8.0-kops.2",
"k8s-1.12": "3.9.3-kops.2",
"k8s-1.16": "3.10.2-kops.1",
}
{
@ -886,39 +720,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
} else {
{
id := "pre-k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
id := "k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.7.0",
Id: id,
})
}
{
id := "k8s-1.7"
location := key + "/" + id + ".yaml"
@ -928,7 +734,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -938,53 +744,9 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
if b.cluster.Spec.Networking.Canal != nil {
key := "networking.projectcalico.org.canal"
versions := map[string]string{
"pre-k8s-1.6": "2.4.2-kops.2",
"k8s-1.6": "2.4.2-kops.2",
"k8s-1.8": "2.6.7-kops.3",
"k8s-1.9": "3.2.3-kops.1",
"k8s-1.12": "3.7.4-kops.1",
"k8s-1.15": "3.10.2-kops.1",
}
{
id := "pre-k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: "<1.6.0",
Id: id,
})
}
{
id := "k8s-1.6"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.8.0",
Id: id,
})
}
{
id := "k8s-1.8"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.9.0",
Id: id,
})
"k8s-1.9": "3.2.3-kops.1",
"k8s-1.12": "3.7.4-kops.1",
"k8s-1.15": "3.10.2-kops.1",
}
{
id := "k8s-1.9"
@ -995,7 +757,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.9.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1040,7 +802,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1073,7 +835,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1097,26 +859,11 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
key := "networking.amazon-vpc-routed-eni"
versions := map[string]string{
"k8s-1.7": "1.5.0-kops.1",
"k8s-1.8": "1.5.0-kops.1",
"k8s-1.10": "1.5.0-kops.2",
"k8s-1.12": "1.5.5-kops.1",
}
{
id := "k8s-1.7"
location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.8.0",
Id: id,
})
}
{
id := "k8s-1.8"
location := key + "/" + id + ".yaml"
@ -1126,7 +873,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(versions[id]),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.10.0",
KubernetesVersion: "<1.10.0",
Id: id,
})
}
@ -1173,7 +920,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: networkingSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1209,7 +956,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: authenticationSelector,
Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}
@ -1310,7 +1057,7 @@ func (b *BootstrapChannelBuilder) buildAddons() *channelsapi.Addons {
Version: fi.String(version),
Selector: map[string]string{"k8s-addon": key},
Manifest: fi.String(location),
KubernetesVersion: ">=1.7.0 <1.12.0",
KubernetesVersion: "<1.12.0",
Id: id,
})
}

View File

@ -28,6 +28,7 @@ import (
hcl_parser "github.com/hashicorp/hcl/json/parser"
"k8s.io/klog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/upup/pkg/fi"
)
@ -258,7 +259,11 @@ func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error {
// See https://github.com/kubernetes/kops/pull/2424 for why we require 0.9.3
terraformConfiguration := make(map[string]interface{})
terraformConfiguration["required_version"] = ">= 0.9.3"
if featureflag.TerraformJSON.Enabled() {
terraformConfiguration["required_version"] = ">= 0.12.0"
} else {
terraformConfiguration["required_version"] = ">= 0.9.3"
}
data := make(map[string]interface{})
data["terraform"] = terraformConfiguration
@ -278,10 +283,12 @@ func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error {
return fmt.Errorf("error marshaling terraform data to json: %v", err)
}
useJson := false
if useJson {
t.files["kubernetes.tf"] = jsonBytes
if featureflag.TerraformJSON.Enabled() {
t.files["kubernetes.tf.json"] = jsonBytes
p := path.Join(t.outDir, "kubernetes.tf")
if _, err := os.Stat(p); err == nil {
return fmt.Errorf("Error generating kubernetes.tf.json: If you are upgrading from terraform 0.11 or earlier please read the release notes. Also, the kubernetes.tf file is already present. Please move the file away since it will be replaced by the kubernetes.tf.json file. ")
}
} else {
f, err := hcl_parser.Parse(jsonBytes)
if err != nil {

View File

@ -18,32 +18,23 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 15ade04df128488a534141bd5b8593d078f4953f
manifestHash: b4dff071aa340fd71650c96f213fdf4b4f799c71
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -83,7 +65,7 @@ spec:
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 4d19a9d30591c09243539c4022003d2f875818fc
manifestHash: a304440f4f7d2e289eb12c37adeac04253d84906
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
@ -97,31 +79,15 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: k8s-1.7
kubernetesVersion: '>=1.7.0 <1.8.0'
manifest: networking.amazon-vpc-routed-eni/k8s-1.7.yaml
manifestHash: 394edf46a78e6d1f6dda920b0214afcd4ce34bc3
name: networking.amazon-vpc-routed-eni
selector:
role.kubernetes.io/networking: "1"
version: 1.5.0-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0 <1.10.0'
kubernetesVersion: <1.10.0
manifest: networking.amazon-vpc-routed-eni/k8s-1.8.yaml
manifestHash: 544fd24d754b32e8896dba6113f1053a4ba86694
name: networking.amazon-vpc-routed-eni
@ -131,11 +97,11 @@ spec:
- id: k8s-1.10
kubernetesVersion: '>=1.10.0 <1.12.0'
manifest: networking.amazon-vpc-routed-eni/k8s-1.10.yaml
manifestHash: eddd81f1dc347b3ae4566f5aa098106f14597f7c
manifestHash: 672f2fdfc6286512e9918014f7853728db2f6dad
name: networking.amazon-vpc-routed-eni
selector:
role.kubernetes.io/networking: "1"
version: 1.5.0-kops.2
version: 1.5.0-kops.1
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: networking.amazon-vpc-routed-eni/k8s-1.12.yaml

View File

@ -18,32 +18,23 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 90f1e4bedea6da183eb4c6788879f7297119ff3e
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: c74ca65f461c764fc9682c6d9ec171b241bec335
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 1af655a2947a56b55f67bff73a06cfe9e7b12c35
manifestHash: 4da6501bd5ad16fab9e05fbfe7c6d587441a8ec4
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -83,7 +65,7 @@ spec:
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 4d19a9d30591c09243539c4022003d2f875818fc
manifestHash: a304440f4f7d2e289eb12c37adeac04253d84906
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
@ -97,34 +79,26 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: k8s-1.7
kubernetesVersion: '>=1.7.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: networking.cilium.io/k8s-1.7.yaml
manifestHash: 2d40b9ab7453b4a0a413196fae4c8bdcd62c69ce
name: networking.cilium.io
selector:
role.kubernetes.io/networking: "1"
version: 1.6.4-kops.3
version: 1.6.4-kops.2
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: networking.cilium.io/k8s-1.12.yaml
manifestHash: b36181e5522a41b1726362e138ad87df87839a68
manifestHash: 2d40b9ab7453b4a0a413196fae4c8bdcd62c69ce
name: networking.cilium.io
selector:
role.kubernetes.io/networking: "1"
version: 1.6.4-kops.3
version: 1.6.4-kops.2

View File

@ -18,32 +18,23 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 15ade04df128488a534141bd5b8593d078f4953f
manifestHash: b4dff071aa340fd71650c96f213fdf4b4f799c71
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -83,7 +65,7 @@ spec:
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 4d19a9d30591c09243539c4022003d2f875818fc
manifestHash: a304440f4f7d2e289eb12c37adeac04253d84906
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
@ -97,18 +79,10 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0

View File

@ -18,32 +18,23 @@ spec:
selector:
k8s-addon: core.addons.k8s.io
version: 1.4.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: 895c961cb9365cbedb22edd20a7648182ae7ed3f
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
manifestHash: 555f952a8b955ce7a5dd0bcd06a5be9e72bd2895
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 15ade04df128488a534141bd5b8593d078f4953f
manifestHash: b4dff071aa340fd71650c96f213fdf4b4f799c71
name: kube-dns.addons.k8s.io
selector:
k8s-addon: kube-dns.addons.k8s.io
version: 1.14.13-kops.2
version: 1.14.13-kops.1
- id: k8s-1.8
kubernetesVersion: '>=1.8.0'
manifest: rbac.addons.k8s.io/k8s-1.8.yaml
manifestHash: 5d53ce7b920cd1e8d65d2306d80a041420711914
name: rbac.addons.k8s.io
@ -51,7 +42,6 @@ spec:
k8s-addon: rbac.addons.k8s.io
version: 1.8.0
- id: k8s-1.9
kubernetesVersion: '>=1.9.0'
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: e1508d77cb4e527d7a2939babe36dc350dd83745
name: kubelet-api.rbac.addons.k8s.io
@ -64,16 +54,8 @@ spec:
selector:
k8s-addon: limit-range.addons.k8s.io
version: 1.5.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
manifestHash: e19c5456a31381c08dd166ce1faf85ce7acc15e3
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 1.17.0-alpha.1
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 2d6fa6910077fecdf1c98da4303631588cfc9c01
name: dns-controller.addons.k8s.io
@ -83,7 +65,7 @@ spec:
- id: k8s-1.12
kubernetesVersion: '>=1.12.0'
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 4d19a9d30591c09243539c4022003d2f875818fc
manifestHash: a304440f4f7d2e289eb12c37adeac04253d84906
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
@ -97,47 +79,15 @@ spec:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.7.0
kubernetesVersion: '>=1.7.0 <1.15.0'
kubernetesVersion: <1.15.0
manifest: storage-aws.addons.k8s.io/v1.7.0.yaml
manifestHash: 62705a596142e6cc283280e8aa973e51536994c5
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: v1.6.0
kubernetesVersion: <1.7.0
manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
manifestHash: 7de4b2eb0521d669172038759c521418711d8266
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 1.15.0
- id: pre-k8s-1.6
kubernetesVersion: <1.6.0
manifest: networking.weave/pre-k8s-1.6.yaml
manifestHash: 8e7a361fff381e0ed84e0011506ff3bfdc7bc202
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.3.0-kops.3
- id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.7.0'
manifest: networking.weave/k8s-1.6.yaml
manifestHash: 3f021695840729640da3910d8da357e905d3450c
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.3.0-kops.3
- id: k8s-1.7
kubernetesVersion: '>=1.7.0 <1.8.0'
manifest: networking.weave/k8s-1.7.yaml
manifestHash: 990772f9809ffb0cff4ea9341a9ab7e9094d7587
name: networking.weave
selector:
role.kubernetes.io/networking: "1"
version: 2.6.0-kops.2
- id: k8s-1.8
kubernetesVersion: '>=1.8.0 <1.12.0'
kubernetesVersion: <1.12.0
manifest: networking.weave/k8s-1.8.yaml
manifestHash: 50a20409003956b7c31a479408ca42ec97774854
name: networking.weave

View File

@ -254,7 +254,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
} else {
loader.Builders = append(loader.Builders, &model.KubeRouterBuilder{NodeupModelContext: modelContext})
}
if c.cluster.Spec.Networking.Calico != nil || c.cluster.Spec.Networking.Cilium != nil {
if c.cluster.Spec.Networking.Calico != nil {
loader.Builders = append(loader.Builders, &model.EtcdTLSBuilder{NodeupModelContext: modelContext})
}

View File

@ -23,8 +23,10 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"strconv"
"strings"
"k8s.io/klog"
"k8s.io/kops/upup/pkg/fi"
@ -47,6 +49,9 @@ type Archive struct {
// StripComponents is the number of components to remove when expanding the archive
StripComponents int `json:"stripComponents,omitempty"`
// MapFiles is the list of files to extract with corresponding directories to extract
MapFiles map[string]string `json:"mapFiles,omitempty"`
}
const (
@ -155,20 +160,38 @@ func (_ *Archive) RenderLocal(t *local.LocalTarget, a, e, changes *Archive) erro
return err
}
targetDir := e.TargetDir
if err := os.MkdirAll(targetDir, 0755); err != nil {
return fmt.Errorf("error creating directories %q: %v", targetDir, err)
}
if len(e.MapFiles) == 0 {
targetDir := e.TargetDir
if err := os.MkdirAll(targetDir, 0755); err != nil {
return fmt.Errorf("error creating directories %q: %v", targetDir, err)
}
args := []string{"tar", "xf", localFile, "-C", targetDir}
if e.StripComponents != 0 {
args = append(args, "--strip-components="+strconv.Itoa(e.StripComponents))
}
args := []string{"tar", "xf", localFile, "-C", targetDir}
if e.StripComponents != 0 {
args = append(args, "--strip-components="+strconv.Itoa(e.StripComponents))
}
klog.Infof("running command %s", args)
cmd := exec.Command(args[0], args[1:]...)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("error installing archive %q: %v: %s", e.Name, err, string(output))
klog.Infof("running command %s", args)
cmd := exec.Command(args[0], args[1:]...)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("error installing archive %q: %v: %s", e.Name, err, string(output))
}
} else {
for src, dest := range e.MapFiles {
stripCount := strings.Count(src, "/")
targetDir := filepath.Join(e.TargetDir, dest)
if err := os.MkdirAll(targetDir, 0755); err != nil {
return fmt.Errorf("error creating directories %q: %v", targetDir, err)
}
args := []string{"tar", "xf", localFile, "-C", targetDir, "--strip-components=" + strconv.Itoa(stripCount), src}
klog.Infof("running command %s", args)
cmd := exec.Command(args[0], args[1:]...)
if output, err := cmd.CombinedOutput(); err != nil {
return fmt.Errorf("error installing archive %q: %v: %s", e.Name, err, string(output))
}
}
}
// We write a marker file to prevent re-execution

View File

@ -54,9 +54,10 @@ type Package struct {
}
const (
localPackageDir = "/var/cache/nodeup/packages/"
containerdPackageName = "containerd.io"
dockerPackageName = "docker-ce"
localPackageDir = "/var/cache/nodeup/packages/"
containerSelinuxPackageName = "container-selinux"
containerdPackageName = "containerd.io"
dockerPackageName = "docker-ce"
)
var _ fi.HasDependencies = &Package{}
@ -83,10 +84,24 @@ func (e *Package) GetDependencies(tasks map[string]fi.Task) []fi.Task {
}
}
// Docker should wait for containerd to be installed
// containerd should wait for container-selinux to be installed
if e.Name == containerdPackageName {
for _, v := range tasks {
if vp, ok := v.(*Package); ok {
if vp.Name == containerSelinuxPackageName {
deps = append(deps, v)
}
}
}
}
// Docker should wait for container-selinux and containerd to be installed
if e.Name == dockerPackageName {
for _, v := range tasks {
if vp, ok := v.(*Package); ok {
if vp.Name == containerSelinuxPackageName {
deps = append(deps, v)
}
if vp.Name == containerdPackageName {
deps = append(deps, v)
}