Remove code for unsupported features

This commit is contained in:
John Gardiner Myers 2021-08-28 09:00:18 -07:00
parent f041bdafdc
commit be8933b577
49 changed files with 40 additions and 3200 deletions

View File

@ -11,7 +11,6 @@ go_library(
"convenience.go",
"directories.go",
"docker.go",
"etcd.go",
"etcd_manager_tls.go",
"file_assets.go",
"firewall.go",
@ -44,7 +43,6 @@ go_library(
"//pkg/apis/kops/model:go_default_library",
"//pkg/apis/kops/util:go_default_library",
"//pkg/apis/nodeup:go_default_library",
"//pkg/assets:go_default_library",
"//pkg/configbuilder:go_default_library",
"//pkg/dns:go_default_library",
"//pkg/flagbuilder:go_default_library",

View File

@ -357,17 +357,6 @@ func (c *NodeupModelContext) IsKubernetesLT(version string) bool {
return !c.IsKubernetesGTE(version)
}
// UseEtcdManager checks if the etcd cluster has etcd-manager enabled
func (c *NodeupModelContext) UseEtcdManager() bool {
for _, x := range c.Cluster.Spec.EtcdClusters {
if x.Provider == kops.EtcdProviderTypeManager {
return true
}
}
return false
}
// UseEtcdTLS checks if the etcd cluster has TLS enabled bool
func (c *NodeupModelContext) UseEtcdTLS() bool {
// @note: because we enforce that 'both' have to be enabled for TLS we only need to check one here.

View File

@ -19,10 +19,8 @@ package model
import (
"fmt"
"sort"
"strconv"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kops/upup/pkg/fi"
)
@ -72,8 +70,3 @@ func addHostPathMapping(pod *v1.Pod, container *v1.Container, name, path string)
return &container.VolumeMounts[len(container.VolumeMounts)-1]
}
// convEtcdSettingsToMs converts etcd settings to a string rep of int milliseconds
func convEtcdSettingsToMs(dur *metav1.Duration) string {
return strconv.FormatInt(dur.Nanoseconds()/1000000, 10)
}

View File

@ -135,7 +135,7 @@ func (b *DockerBuilder) Build(c *fi.ModelBuilderContext) error {
}
// Enable health-check
if b.healthCheck() || (b.IsKubernetesLT("1.18") && b.Distribution.IsDebianFamily()) {
if b.healthCheck() {
c.AddTask(b.buildSystemdHealthCheckScript())
c.AddTask(b.buildSystemdHealthCheckService())
c.AddTask(b.buildSystemdHealthCheckTimer())

View File

@ -30,14 +30,6 @@ import (
"k8s.io/kops/util/pkg/distributions"
)
func TestDockerBuilder_Simple(t *testing.T) {
runDockerBuilderTest(t, "simple")
}
func TestDockerBuilder_18_06_3(t *testing.T) {
runDockerBuilderTest(t, "docker_18.06.3")
}
func TestDockerBuilder_19_03_11(t *testing.T) {
runDockerBuilderTest(t, "docker_19.03.11")
}

View File

@ -1,61 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"k8s.io/kops/pkg/wellknownusers"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kops/util/pkg/distributions"
"k8s.io/klog/v2"
)
// EtcdBuilder installs etcd
type EtcdBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &EtcdBuilder{}
// Build is responsible for creating the etcd user
func (b *EtcdBuilder) Build(c *fi.ModelBuilderContext) error {
if !b.IsMaster || b.UseEtcdManager() {
return nil
}
switch b.Distribution {
case distributions.DistributionFlatcar:
klog.Infof("Detected Flatcar; skipping etcd user installation")
return nil
case distributions.DistributionContainerOS:
klog.Infof("Detected ContainerOS; skipping etcd user installation")
return nil
}
// TODO: Do we actually use the user anywhere?
c.AddTask(&nodetasks.UserTask{
Name: "user",
UID: wellknownusers.LegacyEtcd,
Shell: "/sbin/nologin",
Home: "/var/etcd",
})
return nil
}

View File

@ -29,7 +29,7 @@ var _ fi.ModelBuilder = &EtcdManagerTLSBuilder{}
// Build is responsible for TLS configuration for etcd-manager
func (b *EtcdManagerTLSBuilder) Build(ctx *fi.ModelBuilderContext) error {
if !b.IsMaster || !b.UseEtcdManager() {
if !b.IsMaster {
return nil
}

View File

@ -108,7 +108,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
}
}
if b.UseEtcdManager() {
{
c.AddTask(&nodetasks.File{
Path: filepath.Join(pathSrvKAPI, "etcd-ca.crt"),
Contents: fi.NewStringResource(b.NodeupConfig.CAs["etcd-clients-ca"]),
@ -130,8 +130,6 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
if err := issueCert.AddFileTasks(c, pathSrvKAPI, issueCert.Name, "", nil); err != nil {
return err
}
} else if b.UseEtcdTLS() {
kubeAPIServer.EtcdCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
}
kubeAPIServer.EtcdCertFile = filepath.Join(pathSrvKAPI, "etcd-client.crt")
kubeAPIServer.EtcdKeyFile = filepath.Join(pathSrvKAPI, "etcd-client.key")
@ -493,12 +491,7 @@ func (b *KubeAPIServerBuilder) writeStaticCredentials(c *fi.ModelBuilderContext,
// Support for basic auth was deprecated 1.16 and removed in 1.19
// https://github.com/kubernetes/kubernetes/pull/89069
if b.IsKubernetesLT("1.18") {
kubeAPIServer.TokenAuthFile = filepath.Join(pathSrvKAPI, "known_tokens.csv")
if kubeAPIServer.DisableBasicAuth == nil || !*kubeAPIServer.DisableBasicAuth {
kubeAPIServer.BasicAuthFile = filepath.Join(pathSrvKAPI, "basic_auth.csv")
}
} else if b.IsKubernetesLT("1.19") {
if b.IsKubernetesLT("1.19") {
if kubeAPIServer.DisableBasicAuth != nil && !*kubeAPIServer.DisableBasicAuth {
kubeAPIServer.BasicAuthFile = filepath.Join(pathSrvKAPI, "basic_auth.csv")
}

View File

@ -106,10 +106,8 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error {
var config *SchedulerConfig
if b.IsKubernetesGTE("1.19") {
config = NewSchedulerConfig("kubescheduler.config.k8s.io/v1beta1")
} else if b.IsKubernetesGTE("1.18") {
config = NewSchedulerConfig("kubescheduler.config.k8s.io/v1alpha2")
} else {
config = NewSchedulerConfig("kubescheduler.config.k8s.io/v1alpha1")
config = NewSchedulerConfig("kubescheduler.config.k8s.io/v1alpha2")
}
manifest, err := configbuilder.BuildConfigYaml(&kubeScheduler, config)

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/rbac"
@ -98,28 +97,6 @@ func (t *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
// retrieve the etcd peer certificates and private keys from the keystore
if !t.UseEtcdManager() && t.UseEtcdTLS() {
for _, x := range []string{"etcd", "etcd-peer"} {
if err := t.BuildCertificateTask(c, x, fmt.Sprintf("%s.pem", x), nil); err != nil {
return err
}
}
for _, x := range []string{"etcd", "etcd-peer"} {
if err := t.BuildLegacyPrivateKeyTask(c, x, fmt.Sprintf("%s-key.pem", x), nil); err != nil {
return err
}
}
pathEtcdClient := filepath.Join(t.PathSrvKubernetes(), "kube-apiserver", "etcd-client")
if err := t.BuildCertificateTask(c, "etcd-client", pathEtcdClient+".crt", nil); err != nil {
return err
}
if err := t.BuildLegacyPrivateKeyTask(c, "etcd-client", pathEtcdClient+".key", nil); err != nil {
return err
}
}
}
envFile, err := t.buildEnvFile()
@ -179,33 +156,16 @@ func (t *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
// ProtokubeFlags are the flags for protokube
type ProtokubeFlags struct {
ApplyTaints *bool `json:"applyTaints,omitempty" flag:"apply-taints"`
Channels []string `json:"channels,omitempty" flag:"channels"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
EtcdBackupImage string `json:"etcd-backup-image,omitempty" flag:"etcd-backup-image"`
EtcdBackupStore string `json:"etcd-backup-store,omitempty" flag:"etcd-backup-store"`
EtcdImage *string `json:"etcd-image,omitempty" flag:"etcd-image"`
EtcdLeaderElectionTimeout *string `json:"etcd-election-timeout,omitempty" flag:"etcd-election-timeout"`
EtcdHearbeatInterval *string `json:"etcd-heartbeat-interval,omitempty" flag:"etcd-heartbeat-interval"`
InitializeRBAC *bool `json:"initializeRBAC,omitempty" flag:"initialize-rbac"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
Master *bool `json:"master,omitempty" flag:"master"`
PeerTLSCaFile *string `json:"peer-ca,omitempty" flag:"peer-ca"`
PeerTLSCertFile *string `json:"peer-cert,omitempty" flag:"peer-cert"`
PeerTLSKeyFile *string `json:"peer-key,omitempty" flag:"peer-key"`
TLSAuth *bool `json:"tls-auth,omitempty" flag:"tls-auth"`
TLSCAFile *string `json:"tls-ca,omitempty" flag:"tls-ca"`
TLSCertFile *string `json:"tls-cert,omitempty" flag:"tls-cert"`
TLSKeyFile *string `json:"tls-key,omitempty" flag:"tls-key"`
Zone []string `json:"zone,omitempty" flag:"zone"`
// ManageEtcd is true if protokube should manage etcd; being replaced by etcd-manager
ManageEtcd bool `json:"manageEtcd,omitempty" flag:"manage-etcd"`
// RemoveDNSNames allows us to remove dns records, so that they can be managed elsewhere
// We use it e.g. for the switch to etcd-manager
RemoveDNSNames string `json:"removeDNSNames,omitempty" flag:"remove-dns-names"`
@ -229,84 +189,13 @@ type ProtokubeFlags struct {
// ProtokubeFlags is responsible for building the command line flags for protokube
func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*ProtokubeFlags, error) {
imageVersion := t.Cluster.Spec.EtcdClusters[0].Version
// overrides imageVersion if set
etcdContainerImage := t.Cluster.Spec.EtcdClusters[0].Image
var leaderElectionTimeout string
var heartbeatInterval string
if v := t.Cluster.Spec.EtcdClusters[0].LeaderElectionTimeout; v != nil {
leaderElectionTimeout = convEtcdSettingsToMs(v)
}
if v := t.Cluster.Spec.EtcdClusters[0].HeartbeatInterval; v != nil {
heartbeatInterval = convEtcdSettingsToMs(v)
}
f := &ProtokubeFlags{
Channels: t.NodeupConfig.Channels,
Containerized: fi.Bool(false),
EtcdLeaderElectionTimeout: s(leaderElectionTimeout),
EtcdHearbeatInterval: s(heartbeatInterval),
LogLevel: fi.Int32(4),
Master: b(t.IsMaster),
}
f.ManageEtcd = false
if len(t.NodeupConfig.EtcdManifests) == 0 {
klog.V(4).Infof("no EtcdManifests; protokube will manage etcd")
f.ManageEtcd = true
}
if f.ManageEtcd {
for _, e := range t.Cluster.Spec.EtcdClusters {
// Because we can only specify a single EtcdBackupStore at the moment, we only backup main, not events
if e.Name != "main" {
continue
}
if e.Backups != nil {
if f.EtcdBackupImage == "" {
f.EtcdBackupImage = e.Backups.Image
}
if f.EtcdBackupStore == "" {
f.EtcdBackupStore = e.Backups.BackupStore
}
}
}
// TODO this is duplicate code with etcd model
image := fmt.Sprintf("k8s.gcr.io/etcd:%s", imageVersion)
// override image if set as API value
if etcdContainerImage != "" {
image = etcdContainerImage
}
assets := assets.NewAssetBuilder(t.Cluster, false)
remapped, err := assets.RemapImage(image)
if err != nil {
return nil, fmt.Errorf("unable to remap container %q: %v", image, err)
}
image = remapped
f.EtcdImage = s(image)
// check if we are using tls and add the options to protokube
if t.UseEtcdTLS() {
f.PeerTLSCaFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.PeerTLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-peer.pem"))
f.PeerTLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-peer-key.pem"))
f.TLSCAFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.TLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd.pem"))
f.TLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-key.pem"))
}
if t.UseEtcdTLSAuth() {
enableAuth := true
f.TLSAuth = b(enableAuth)
}
}
f.InitializeRBAC = fi.Bool(true)
zone := t.Cluster.Spec.DNSZone
@ -376,8 +265,8 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*Protokube
f.NodeName = nodeName
}
// Remove DNS names if we're using etcd-manager
if !f.ManageEtcd {
// Remove DNS names since we're using etcd-manager
{
var names []string
// Mirroring the logic used to construct DNS names in protokube/pkg/protokube/etcd_cluster.go

View File

@ -1,41 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.example.com
docker:
version: 18.06.3
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.13.6
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a

View File

@ -1,428 +0,0 @@
file: /usr/bin/docker-runc
mode: +i
---
contents: |-
DOCKER_OPTS=--ip-masq=false --iptables=false --log-driver=json-file --log-level=info --log-opt=max-file=5 --log-opt=max-size=10m --storage-driver=overlay2,overlay,aufs
DOCKER_NOFILE=1000000
path: /etc/sysconfig/docker
type: file
---
contents: |
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to be run periodically, to check the health
# of docker. If it detects a failure, it will restart docker using systemctl.
healthcheck() {
if output=`timeout 60 docker network ls`; then
echo "$output" | fgrep -qw host || {
echo "docker 'host' network missing"
return 1
}
else
echo "docker returned $?"
return 1
fi
}
if healthcheck; then
echo "docker healthy"
exit 0
fi
echo "docker failed"
echo "Giving docker 30 seconds grace before restarting"
sleep 30
if healthcheck; then
echo "docker recovered"
exit 0
fi
echo "docker still unresponsive; triggering docker restart"
systemctl stop docker
echo "wait all tcp sockets to close"
sleep `cat /proc/sys/net/ipv4/tcp_fin_timeout`
sleep 10
systemctl start docker
echo "Waiting 120 seconds to give docker time to start"
sleep 60
if healthcheck; then
echo "docker recovered"
exit 0
fi
echo "docker still failing"
mode: "0755"
path: /opt/kops/bin/docker-healthcheck
type: file
---
contents:
Asset:
AssetPath: docker/docker
Key: docker
mode: "0755"
path: /usr/bin/docker
type: file
---
contents:
Asset:
AssetPath: docker/docker-containerd
Key: docker-containerd
mode: "0755"
path: /usr/bin/docker-containerd
type: file
---
contents:
Asset:
AssetPath: docker/docker-containerd-ctr
Key: docker-containerd-ctr
mode: "0755"
path: /usr/bin/docker-containerd-ctr
type: file
---
contents:
Asset:
AssetPath: docker/docker-containerd-shim
Key: docker-containerd-shim
mode: "0755"
path: /usr/bin/docker-containerd-shim
type: file
---
contents:
Asset:
AssetPath: docker/docker-init
Key: docker-init
mode: "0755"
path: /usr/bin/docker-init
type: file
---
contents:
Asset:
AssetPath: docker/docker-proxy
Key: docker-proxy
mode: "0755"
path: /usr/bin/docker-proxy
type: file
---
contents:
Asset:
AssetPath: docker/docker-runc
Key: docker-runc
mode: "0755"
path: /usr/bin/docker-runc
type: file
---
contents:
Asset:
AssetPath: docker/dockerd
Key: dockerd
mode: "0755"
path: /usr/bin/dockerd
type: file
---
contents: |2
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2018 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
path: /usr/share/doc/docker/apache.txt
type: file
---
GID: null
Name: docker
System: true
---
Name: docker-healthcheck.service
definition: |
[Unit]
Description=Run docker-healthcheck once
Documentation=https://kops.sigs.k8s.io
[Service]
Type=oneshot
ExecStart=/opt/kops/bin/docker-healthcheck
[Install]
WantedBy=multi-user.target
enabled: true
manageState: true
running: true
smartRestart: true
---
Name: docker-healthcheck.timer
definition: |
[Unit]
Description=Trigger docker-healthcheck periodically
Documentation=https://kops.sigs.k8s.io
[Timer]
OnUnitInactiveSec=10s
Unit=docker-healthcheck.service
[Install]
WantedBy=multi-user.target
enabled: true
manageState: true
running: true
smartRestart: true
---
Name: docker.service
definition: |
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
Requires=docker.socket
[Service]
EnvironmentFile=/etc/sysconfig/docker
EnvironmentFile=/etc/environment
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// "$DOCKER_OPTS"
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
enabled: true
manageState: true
running: true
smartRestart: true
---
Name: docker.socket
definition: |
[Unit]
Description=Docker Socket for the API
PartOf=docker.service
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
enabled: true
manageState: true
running: true
smartRestart: true

View File

@ -10,7 +10,7 @@ spec:
cloudProvider: aws
configBase: memfs://logflags.example.com/minimal.example.com
docker:
version: 17.03.2
version: 19.03.11
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
@ -22,7 +22,7 @@ spec:
name: events
iam:
legacy: false
kubernetesVersion: v1.17.0
kubernetesVersion: v1.18.0
masterInternalName: api.internal.logflags.example.com
masterPublicName: api.logflags.example.com
networkCIDR: 172.20.0.0/16

View File

@ -1,41 +0,0 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.example.com
docker:
version: 17.03.2
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: master-us-test-1a
name: events
iam:
legacy: false
kubernetesVersion: v1.14.6
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a

View File

@ -1,428 +0,0 @@
file: /usr/bin/docker-runc
mode: +i
---
contents: |-
DOCKER_OPTS=--ip-masq=false --iptables=false --log-driver=json-file --log-level=info --log-opt=max-file=5 --log-opt=max-size=10m --storage-driver=overlay2,overlay,aufs
DOCKER_NOFILE=1000000
path: /etc/sysconfig/docker
type: file
---
contents: |
#!/bin/bash
# Copyright 2019 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to be run periodically, to check the health
# of docker. If it detects a failure, it will restart docker using systemctl.
healthcheck() {
if output=`timeout 60 docker network ls`; then
echo "$output" | fgrep -qw host || {
echo "docker 'host' network missing"
return 1
}
else
echo "docker returned $?"
return 1
fi
}
if healthcheck; then
echo "docker healthy"
exit 0
fi
echo "docker failed"
echo "Giving docker 30 seconds grace before restarting"
sleep 30
if healthcheck; then
echo "docker recovered"
exit 0
fi
echo "docker still unresponsive; triggering docker restart"
systemctl stop docker
echo "wait all tcp sockets to close"
sleep `cat /proc/sys/net/ipv4/tcp_fin_timeout`
sleep 10
systemctl start docker
echo "Waiting 120 seconds to give docker time to start"
sleep 60
if healthcheck; then
echo "docker recovered"
exit 0
fi
echo "docker still failing"
mode: "0755"
path: /opt/kops/bin/docker-healthcheck
type: file
---
contents:
Asset:
AssetPath: docker/docker
Key: docker
mode: "0755"
path: /usr/bin/docker
type: file
---
contents:
Asset:
AssetPath: docker/docker-containerd
Key: docker-containerd
mode: "0755"
path: /usr/bin/docker-containerd
type: file
---
contents:
Asset:
AssetPath: docker/docker-containerd-ctr
Key: docker-containerd-ctr
mode: "0755"
path: /usr/bin/docker-containerd-ctr
type: file
---
contents:
Asset:
AssetPath: docker/docker-containerd-shim
Key: docker-containerd-shim
mode: "0755"
path: /usr/bin/docker-containerd-shim
type: file
---
contents:
Asset:
AssetPath: docker/docker-init
Key: docker-init
mode: "0755"
path: /usr/bin/docker-init
type: file
---
contents:
Asset:
AssetPath: docker/docker-proxy
Key: docker-proxy
mode: "0755"
path: /usr/bin/docker-proxy
type: file
---
contents:
Asset:
AssetPath: docker/docker-runc
Key: docker-runc
mode: "0755"
path: /usr/bin/docker-runc
type: file
---
contents:
Asset:
AssetPath: docker/dockerd
Key: dockerd
mode: "0755"
path: /usr/bin/dockerd
type: file
---
contents: |2
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2018 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
path: /usr/share/doc/docker/apache.txt
type: file
---
GID: null
Name: docker
System: true
---
Name: docker-healthcheck.service
definition: |
[Unit]
Description=Run docker-healthcheck once
Documentation=https://kops.sigs.k8s.io
[Service]
Type=oneshot
ExecStart=/opt/kops/bin/docker-healthcheck
[Install]
WantedBy=multi-user.target
enabled: true
manageState: true
running: true
smartRestart: true
---
Name: docker-healthcheck.timer
definition: |
[Unit]
Description=Trigger docker-healthcheck periodically
Documentation=https://kops.sigs.k8s.io
[Timer]
OnUnitInactiveSec=10s
Unit=docker-healthcheck.service
[Install]
WantedBy=multi-user.target
enabled: true
manageState: true
running: true
smartRestart: true
---
Name: docker.service
definition: |
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
Requires=docker.socket
[Service]
EnvironmentFile=/etc/sysconfig/docker
EnvironmentFile=/etc/environment
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// "$DOCKER_OPTS"
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
enabled: true
manageState: true
running: true
smartRestart: true
---
Name: docker.socket
definition: |
[Unit]
Description=Docker Socket for the API
PartOf=docker.service
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
enabled: true
manageState: true
running: true
smartRestart: true

View File

@ -26,7 +26,7 @@ spec:
iam: {}
kubelet:
hostnameOverride: master.hostname.invalid
kubernetesVersion: v1.17.0
kubernetesVersion: v1.18.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16

View File

@ -540,12 +540,10 @@ type EtcdProviderType string
const (
EtcdProviderTypeManager EtcdProviderType = "Manager"
EtcdProviderTypeLegacy EtcdProviderType = "Legacy"
)
var SupportedEtcdProviderTypes = []string{
string(EtcdProviderTypeManager),
string(EtcdProviderTypeLegacy),
}
// EtcdClusterSpec is the etcd cluster specification

View File

@ -53,10 +53,6 @@ func awsValidateExternalCloudControllerManager(cluster *kops.Cluster) (allErrs f
return allErrs
}
fldPath := field.NewPath("spec", "externalCloudControllerManager")
if !cluster.IsKubernetesGTE("1.18") {
allErrs = append(allErrs, field.Forbidden(fldPath, "AWS external CCM requires kubernetes 1.18+"))
}
if !hasAWSEBSCSIDriver(c) {
allErrs = append(allErrs, field.Forbidden(fldPath,
"AWS external CCM cannot be used without enabling spec.cloudConfig.AWSEBSCSIDriver."))

View File

@ -637,13 +637,10 @@ func validateKubelet(k *kops.KubeletConfigSpec, c *kops.Cluster, kubeletPath *fi
if k.TopologyManagerPolicy != "" {
allErrs = append(allErrs, IsValidValue(kubeletPath.Child("topologyManagerPolicy"), &k.TopologyManagerPolicy, []string{"none", "best-effort", "restricted", "single-numa-node"})...)
if !c.IsKubernetesGTE("1.18") {
allErrs = append(allErrs, field.Forbidden(kubeletPath.Child("topologyManagerPolicy"), "topologyManagerPolicy requires at least Kubernetes 1.18"))
}
}
if k.EnableCadvisorJsonEndpoints != nil {
if c.IsKubernetesLT("1.18") && c.IsKubernetesGTE("1.21") {
if c.IsKubernetesGTE("1.21") {
allErrs = append(allErrs, field.Forbidden(kubeletPath.Child("enableCadvisorJsonEndpoints"), "enableCadvisorJsonEndpoints requires Kubernetes 1.18-1.20"))
}
}
@ -920,9 +917,6 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe
hasCiliumCluster := false
for _, cluster := range c.EtcdClusters {
if cluster.Name == "cilium" {
if cluster.Provider == kops.EtcdProviderTypeLegacy {
allErrs = append(allErrs, field.Invalid(fldPath.Root().Child("etcdClusters"), kops.EtcdProviderTypeLegacy, "Legacy etcd provider is not supported for the cilium cluster"))
}
hasCiliumCluster = true
break
}
@ -1002,9 +996,6 @@ func validateEtcdClusterSpec(spec kops.EtcdClusterSpec, c *kops.Cluster, fieldPa
if spec.Provider != "" {
value := string(spec.Provider)
allErrs = append(allErrs, IsValidValue(fieldPath.Child("provider"), &value, kops.SupportedEtcdProviderTypes)...)
if spec.Provider == kops.EtcdProviderTypeLegacy && c.IsKubernetesGTE("1.18") {
allErrs = append(allErrs, field.Forbidden(fieldPath.Child("provider"), "support for Legacy mode removed as of Kubernetes 1.18"))
}
}
if len(spec.Members) == 0 {
allErrs = append(allErrs, field.Required(fieldPath.Child("etcdMembers"), "No members defined in etcd cluster"))

View File

@ -192,27 +192,6 @@ func TestSetClusterFields(t *testing.T) {
},
},
},
{
Fields: []string{
"cluster.spec.etcdClusters[*].provider=Manager",
},
Input: kops.Cluster{
Spec: kops.ClusterSpec{
EtcdClusters: []kops.EtcdClusterSpec{
{Name: "one", Provider: kops.EtcdProviderTypeLegacy},
{Name: "two"},
},
},
},
Output: kops.Cluster{
Spec: kops.ClusterSpec{
EtcdClusters: []kops.EtcdClusterSpec{
{Name: "one", Provider: kops.EtcdProviderTypeManager},
{Name: "two", Provider: kops.EtcdProviderTypeManager},
},
},
},
},
{
Fields: []string{
"cluster.spec.etcdClusters[*].image=etcd-manager:v1.2.3",

View File

@ -175,11 +175,7 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se
}
basicAuthEnabled := false
if !util.IsKubernetesGTE("1.18", *k8sVersion) {
if cluster.Spec.KubeAPIServer == nil || cluster.Spec.KubeAPIServer.DisableBasicAuth == nil || !*cluster.Spec.KubeAPIServer.DisableBasicAuth {
basicAuthEnabled = true
}
} else if !util.IsKubernetesGTE("1.19", *k8sVersion) {
if !util.IsKubernetesGTE("1.19", *k8sVersion) {
if cluster.Spec.KubeAPIServer != nil && cluster.Spec.KubeAPIServer.DisableBasicAuth != nil && !*cluster.Spec.KubeAPIServer.DisableBasicAuth {
basicAuthEnabled = true
}

View File

@ -229,10 +229,7 @@ func (b *BootstrapScriptBuilder) ResourceNodeUp(c *fi.ModelBuilderContext, ig *k
keypairs = append(keypairs, "etcd-client-cilium")
}
if ig.HasAPIServer() {
keypairs = append(keypairs, "apiserver-aggregator-ca", "service-account")
if b.UseEtcdManager() {
keypairs = append(keypairs, "etcd-clients-ca")
}
keypairs = append(keypairs, "apiserver-aggregator-ca", "service-account", "etcd-clients-ca")
} else if !model.UseKopsControllerForNodeBootstrap(b.Cluster) {
keypairs = append(keypairs, "kubelet", "kube-proxy")
if b.Cluster.Spec.Networking.Kuberouter != nil {

View File

@ -24,8 +24,6 @@ import (
"k8s.io/kops/upup/pkg/fi/loader"
)
const DefaultBackupImage = "k8s.gcr.io/etcdadm/etcd-backup:3.0.20210707"
// EtcdOptionsBuilder adds options for etcd to the model
type EtcdOptionsBuilder struct {
*OptionsContext
@ -67,49 +65,9 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error {
return fmt.Errorf("unexpected etcd version %q", c.Version)
}
// We enable TLS if we're running EtcdManager
if c.Provider == kops.EtcdProviderTypeManager {
c.EnableEtcdTLS = true
c.EnableTLSAuth = true
}
// We remap the etcd manager image when we build the manifest,
// but we need to map the standalone images here because protokube launches them
if c.Provider == kops.EtcdProviderTypeLegacy {
// remap etcd image
{
image := c.Image
if image == "" {
image = fmt.Sprintf("k8s.gcr.io/etcd:%s", c.Version)
}
if image != "" {
image, err := b.AssetBuilder.RemapImage(image)
if err != nil {
return fmt.Errorf("unable to remap container %q: %v", image, err)
}
c.Image = image
}
}
// remap backup manager image
if c.Backups != nil {
image := c.Backups.Image
if image == "" {
image = DefaultBackupImage
}
if image != "" {
image, err := b.AssetBuilder.RemapImage(image)
if err != nil {
return fmt.Errorf("unable to remap container %q: %v", image, err)
}
c.Backups.Image = image
}
}
}
}
return nil
}

View File

@ -57,10 +57,6 @@ var _ fi.ModelBuilder = &EtcdManagerBuilder{}
// Build creates the tasks
func (b *EtcdManagerBuilder) Build(c *fi.ModelBuilderContext) error {
for _, etcdCluster := range b.Cluster.Spec.EtcdClusters {
if etcdCluster.Provider != kops.EtcdProviderTypeManager {
continue
}
name := etcdCluster.Name
version := etcdCluster.Version

View File

@ -41,10 +41,6 @@ func (b *EtcdManagerOptionsBuilder) BuildOptions(o interface{}) error {
for i := range clusterSpec.EtcdClusters {
etcdCluster := &clusterSpec.EtcdClusters[i]
if etcdCluster.Provider != kops.EtcdProviderTypeManager {
continue
}
if etcdCluster.Backups == nil {
etcdCluster.Backups = &kops.EtcdBackupSpec{}
}

View File

@ -310,17 +310,6 @@ func (b *KopsModelContext) UseNetworkLoadBalancer() bool {
return b.Cluster.Spec.API.LoadBalancer.Class == kops.LoadBalancerClassNetwork
}
// UseEtcdManager checks to see if etcd manager is enabled
func (b *KopsModelContext) UseEtcdManager() bool {
for _, x := range b.Cluster.Spec.EtcdClusters {
if x.Provider == kops.EtcdProviderTypeManager {
return true
}
}
return false
}
// UseEtcdTLS checks to see if etcd tls is enabled
func (b *KopsModelContext) UseEtcdTLS() bool {
for _, x := range b.Cluster.Spec.EtcdClusters {

View File

@ -17,7 +17,6 @@ limitations under the License.
package model
import (
"fmt"
"strings"
"k8s.io/kops/pkg/rbac"
@ -72,63 +71,6 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(t)
}
if b.UseEtcdManager() {
// We generate keypairs in the etcdmanager task itself
} else if b.UseEtcdTLS() {
// check if we need to generate certificates for etcd peers certificates from a different CA?
// @question i think we should use another KeyStore for this, perhaps registering a EtcdKeyStore given
// that mutual tls used to verify between the peers we don't want certificates for kubernetes able to act as a peer.
// For clients assuming we are using etcdv3 is can switch on user authentication and map the common names for auth.
servingNames := []string{fmt.Sprintf("*.internal.%s", b.ClusterName()), "localhost", "127.0.0.1"}
// @question should wildcard's be here instead of generating per node. If we ever provide the
// ability to resize the master, this will become a blocker
c.AddTask(&fitasks.Keypair{
AlternateNames: servingNames,
Lifecycle: b.Lifecycle,
Name: fi.String("etcd"),
Subject: "cn=etcd",
// TODO: Can this be "server" now that we're not using it for peer connectivity?
Type: "clientServer",
Signer: defaultCA,
})
// For peer authentication, the same cert is used both as a client
// cert and as a server cert (which is unusual). Moreover, etcd
// 3.2 introduces some breaking changes to certificate validation
// where it tries to match any IP or DNS names to the client IP
// (including reverse DNS lookups!) We _could_ include a wildcard
// reverse DNS name e.g. *.ec2.internal for EC2, but it seems
// better just to list the names that we expect peer connectivity
// to happen on.
var peerNames []string
for _, etcdCluster := range b.Cluster.Spec.EtcdClusters {
prefix := "etcd-" + etcdCluster.Name + "-"
if prefix == "etcd-main-" {
prefix = "etcd-"
}
for _, m := range etcdCluster.Members {
peerNames = append(peerNames, prefix+m.Name+".internal."+b.ClusterName())
}
}
c.AddTask(&fitasks.Keypair{
AlternateNames: peerNames,
Lifecycle: b.Lifecycle,
Name: fi.String("etcd-peer"),
Subject: "cn=etcd-peer",
Type: "clientServer",
Signer: defaultCA,
})
c.AddTask(&fitasks.Keypair{
Name: fi.String("etcd-client"),
Lifecycle: b.Lifecycle,
Subject: "cn=etcd-client",
Type: "client",
Signer: defaultCA,
})
}
if b.KopsModelContext.Cluster.Spec.Networking.Kuberouter != nil && !b.UseKopsControllerForNodeBootstrap() {
t := &fitasks.Keypair{
Name: fi.String("kube-router"),

View File

@ -24,9 +24,6 @@ const (
// Used by e.g. dns-controller
Generic = 10001
// LegacyEtcd is the user id for the etcd user under the legacy provider
LegacyEtcd = 10002
// AWSAuthenticator is the user-id for the aws-iam-authenticator (built externally)
AWSAuthenticator = 10000

View File

@ -63,20 +63,17 @@ func main() {
// run is responsible for running the protokube service controller
func run() error {
var zones []string
var applyTaints, initializeRBAC, containerized, master, tlsAuth bool
var cloud, clusterID, dnsServer, dnsProviderID, dnsInternalSuffix, gossipSecret, gossipListen, gossipProtocol, gossipSecretSecondary, gossipListenSecondary, gossipProtocolSecondary string
var flagChannels, tlsCert, tlsKey, tlsCA, peerCert, peerKey, peerCA string
var etcdBackupImage, etcdBackupStore, etcdImageSource, etcdElectionTimeout, etcdHeartbeatInterval string
var initializeRBAC, containerized, master bool
var cloud, clusterID, dnsProviderID, dnsInternalSuffix, gossipSecret, gossipListen, gossipProtocol, gossipSecretSecondary, gossipListenSecondary, gossipProtocolSecondary string
var flagChannels string
var dnsUpdateInterval int
flag.BoolVar(&applyTaints, "apply-taints", applyTaints, "Apply taints to nodes based on the role")
flag.BoolVar(&containerized, "containerized", containerized, "Set if we are running containerized.")
flag.BoolVar(&initializeRBAC, "initialize-rbac", initializeRBAC, "Set if we should initialize RBAC")
flag.BoolVar(&master, "master", master, "Whether or not this node is a master")
flag.StringVar(&cloud, "cloud", "aws", "CloudProvider we are using (aws,digitalocean,gce,openstack)")
flag.StringVar(&clusterID, "cluster-id", clusterID, "Cluster ID")
flag.StringVar(&dnsInternalSuffix, "dns-internal-suffix", dnsInternalSuffix, "DNS suffix for internal domain names")
flag.StringVar(&dnsServer, "dns-server", dnsServer, "DNS Server")
flags.IntVar(&dnsUpdateInterval, "dns-update-interval", 5, "Configure interval at which to update DNS records.")
flag.StringVar(&flagChannels, "channels", flagChannels, "channels to install")
flag.StringVar(&gossipProtocol, "gossip-protocol", "mesh", "mesh/memberlist")
@ -85,23 +82,8 @@ func run() error {
flag.StringVar(&gossipProtocolSecondary, "gossip-protocol-secondary", "memberlist", "mesh/memberlist")
flag.StringVar(&gossipListenSecondary, "gossip-listen-secondary", fmt.Sprintf("0.0.0.0:%d", wellknownports.ProtokubeGossipMemberlist), "address:port on which to bind for gossip")
flags.StringVar(&gossipSecretSecondary, "gossip-secret-secondary", gossipSecret, "Secret to use to secure gossip")
flag.StringVar(&peerCA, "peer-ca", peerCA, "Path to a file containing the peer ca in PEM format")
flag.StringVar(&peerCert, "peer-cert", peerCert, "Path to a file containing the peer certificate")
flag.StringVar(&peerKey, "peer-key", peerKey, "Path to a file containing the private key for the peers")
flag.BoolVar(&tlsAuth, "tls-auth", tlsAuth, "Indicates the peers and client should enforce authentication via CA")
flag.StringVar(&tlsCA, "tls-ca", tlsCA, "Path to a file containing the ca for client certificates")
flag.StringVar(&tlsCert, "tls-cert", tlsCert, "Path to a file containing the certificate for etcd server")
flag.StringVar(&tlsKey, "tls-key", tlsKey, "Path to a file containing the private key for etcd server")
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
flags.StringVar(&dnsProviderID, "dns", "aws-route53", "DNS provider we should use (aws-route53, google-clouddns, digitalocean)")
flags.StringVar(&etcdBackupImage, "etcd-backup-image", "", "Set to override the image for (experimental) etcd backups")
flags.StringVar(&etcdBackupStore, "etcd-backup-store", "", "Set to enable (experimental) etcd backups")
flags.StringVar(&etcdImageSource, "etcd-image", "k8s.gcr.io/etcd:2.2.1", "Etcd Source Container Registry")
flags.StringVar(&etcdElectionTimeout, "etcd-election-timeout", etcdElectionTimeout, "time in ms for an election to timeout")
flags.StringVar(&etcdHeartbeatInterval, "etcd-heartbeat-interval", etcdHeartbeatInterval, "time in ms of a heartbeat interval")
manageEtcd := false
flag.BoolVar(&manageEtcd, "manage-etcd", manageEtcd, "Set to manage etcd (deprecated in favor of etcd-manager)")
bootstrapMasterNodeLabels := false
flag.BoolVar(&bootstrapMasterNodeLabels, "bootstrap-master-node-labels", bootstrapMasterNodeLabels, "Bootstrap the labels for master nodes (required in k8s 1.16)")
@ -239,7 +221,6 @@ func run() error {
}
protokube.RootFS = rootfs
protokube.Containerized = containerized
var dnsProvider protokube.DNSProvider
@ -384,42 +365,23 @@ func run() error {
removeDNSRecords(removeDNSNames, dnsProvider)
}()
modelDir := "model/etcd"
var channels []string
if flagChannels != "" {
channels = strings.Split(flagChannels, ",")
}
k := &protokube.KubeBoot{
ApplyTaints: applyTaints,
BootstrapMasterNodeLabels: bootstrapMasterNodeLabels,
NodeName: nodeName,
Channels: channels,
DNS: dnsProvider,
ManageEtcd: manageEtcd,
EtcdBackupImage: etcdBackupImage,
EtcdBackupStore: etcdBackupStore,
EtcdImageSource: etcdImageSource,
EtcdElectionTimeout: etcdElectionTimeout,
EtcdHeartbeatInterval: etcdHeartbeatInterval,
InitializeRBAC: initializeRBAC,
InternalDNSSuffix: dnsInternalSuffix,
InternalIP: internalIP,
Kubernetes: protokube.NewKubernetesContext(),
Master: master,
ModelDir: modelDir,
PeerCA: peerCA,
PeerCert: peerCert,
PeerKey: peerKey,
TLSAuth: tlsAuth,
TLSCA: tlsCA,
TLSCert: tlsCert,
TLSKey: tlsKey,
}
k.Init(volumes)
if dnsProvider != nil {
go dnsProvider.Run()
}

View File

@ -1,4 +1,4 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
@ -8,11 +8,8 @@ go_library(
"azure_volume.go",
"channels.go",
"do_volume.go",
"etcd_cluster.go",
"etcd_manifest.go",
"gce_volume.go",
"gossipdns.go",
"helper.go",
"kube_boot.go",
"kube_boot_task.go",
"kube_context.go",
@ -22,15 +19,12 @@ go_library(
"rbac.go",
"tainter.go",
"utils.go",
"volume_mounter.go",
"volumes.go",
],
importpath = "k8s.io/kops/protokube/pkg/protokube",
visibility = ["//visibility:public"],
deps = [
"//dns-controller/pkg/dns:go_default_library",
"//pkg/k8scodecs:go_default_library",
"//pkg/kubemanifest:go_default_library",
"//pkg/nodelabels:go_default_library",
"//protokube/pkg/etcd:go_default_library",
"//protokube/pkg/gossip:go_default_library",
@ -41,13 +35,11 @@ go_library(
"//protokube/pkg/gossip/do:go_default_library",
"//protokube/pkg/gossip/gce:go_default_library",
"//protokube/pkg/gossip/openstack:go_default_library",
"//protokube/pkg/hostmount:go_default_library",
"//upup/pkg/fi/cloudup/aliup:go_default_library",
"//upup/pkg/fi/cloudup/awsup:go_default_library",
"//upup/pkg/fi/cloudup/azure:go_default_library",
"//upup/pkg/fi/cloudup/gce:go_default_library",
"//upup/pkg/fi/cloudup/openstack:go_default_library",
"//util/pkg/exec:go_default_library",
"//vendor/cloud.google.com/go/compute/metadata:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute:go_default_library",
"//vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2020-06-01/network:go_default_library",
@ -67,24 +59,10 @@ go_library(
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
"//vendor/k8s.io/mount-utils:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/nsenter:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["volume_mounter_test.go"],
embed = [":go_default_library"],
deps = ["//protokube/pkg/etcd:go_default_library"],
)

View File

@ -1,344 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/k8scodecs"
"k8s.io/kops/protokube/pkg/etcd"
)
// EtcdCluster is the configuration for the etcd cluster
type EtcdCluster struct {
// ClientPort is the incoming ports for client
ClientPort int
// ClusterName is the cluster name
ClusterName string
// ClusterToken is the cluster token
ClusterToken string
// CPURequest is the pod request for CPU
CPURequest *resource.Quantity
// MemoryRequest is the pod request for Memory
MemoryRequest *resource.Quantity
// DataDirName is the path to the data directory
DataDirName string
// ImageSource is the docker image to use
ImageSource string
// LogFile is the location of the logfile
LogFile string
// Me is the node that we will be in the cluster
Me *EtcdNode
// Nodes is a list of nodes in the cluster (including the self-node, Me)
Nodes []*EtcdNode
// PeerPort is the port for peers to connect
PeerPort int
// PodName is the name given to the pod
PodName string
// ProxyMode indicates we are running in proxy mode
ProxyMode bool
// Spec is the specification found from the volumes
Spec *etcd.EtcdClusterSpec
// VolumeMountPath is the mount path
VolumeMountPath string
// TLSAuth indicates we should enforce peer and client verification
TLSAuth bool
// TLSCA is the path to a client ca for etcd clients
TLSCA string
// TLSCert is the path to a client certificate for etcd
TLSCert string
// TLSKey is the path to a client private key for etcd
TLSKey string
// PeerCA is the path to a peer ca for etcd
PeerCA string
// PeerCert is the path to a peer ca for etcd
PeerCert string
// PeerKey is the path to a peer ca for etcd
PeerKey string
// ElectionTimeout is the leader election timeout
ElectionTimeout string
// HeartbeatInterval is the heartbeat interval
HeartbeatInterval string
// BackupImage is the image to use for backing up etcd
BackupImage string
// BackupStore is a VFS path for backing up etcd
BackupStore string
}
// EtcdNode is a definition for the etcd node
type EtcdNode struct {
Name string
InternalName string
}
// EtcdController defines the etcd controller
type EtcdController struct {
kubeBoot *KubeBoot
//volume *Volume
//volumeSpec *etcd.EtcdClusterSpec
cluster *EtcdCluster
}
// newEtcdController creates and returns a new etcd controller
func newEtcdController(kubeBoot *KubeBoot, v *Volume, spec *etcd.EtcdClusterSpec) (*EtcdController, error) {
k := &EtcdController{
kubeBoot: kubeBoot,
}
// prepare parse variables for cpu and memory requests
cpuRequest100 := resource.MustParse("100m")
cpuRequest200 := resource.MustParse("200m")
memoryRequest := resource.MustParse("100Mi")
cluster := &EtcdCluster{
CPURequest: &cpuRequest100,
MemoryRequest: &memoryRequest,
ClientPort: 4001,
ClusterName: "etcd-" + spec.ClusterKey,
DataDirName: "data-" + spec.ClusterKey,
ElectionTimeout: kubeBoot.EtcdElectionTimeout,
HeartbeatInterval: kubeBoot.EtcdHeartbeatInterval,
ImageSource: kubeBoot.EtcdImageSource,
PeerCA: kubeBoot.PeerCA,
PeerCert: kubeBoot.PeerCert,
PeerKey: kubeBoot.PeerKey,
PeerPort: 2380,
PodName: "etcd-server-" + spec.ClusterKey,
Spec: spec,
TLSAuth: kubeBoot.TLSAuth,
TLSCA: kubeBoot.TLSCA,
TLSCert: kubeBoot.TLSCert,
TLSKey: kubeBoot.TLSKey,
VolumeMountPath: v.Mountpoint,
}
// We used to build this through text files ... it turns out to just be more complicated than code!
switch spec.ClusterKey {
case "main":
cluster.ClusterName = "etcd"
cluster.DataDirName = "data"
cluster.PodName = "etcd-server"
cluster.CPURequest = &cpuRequest200
// Because we can only specify a single EtcdBackupStore at the moment, we only backup main, not events
cluster.BackupImage = kubeBoot.EtcdBackupImage
cluster.BackupStore = kubeBoot.EtcdBackupStore
case "events":
cluster.ClientPort = 4002
cluster.PeerPort = 2381
default:
return nil, fmt.Errorf("unknown etcd cluster key %q", spec.ClusterKey)
}
k.cluster = cluster
return k, nil
}
// RunSyncLoop is responsible for managing the etcd sign loop
func (k *EtcdController) RunSyncLoop() {
for {
if err := k.syncOnce(); err != nil {
klog.Warningf("error during attempt to bootstrap (will sleep and retry): %v", err)
}
time.Sleep(1 * time.Minute)
}
}
func (k *EtcdController) syncOnce() error {
return k.cluster.configure(k.kubeBoot)
}
func (c *EtcdCluster) configure(k *KubeBoot) error {
name := c.ClusterName
if !strings.HasPrefix(name, "etcd") {
// For sanity, and to avoid collisions in directories / dns
return fmt.Errorf("unexpected name for etcd cluster (must start with etcd): %q", name)
}
if c.LogFile == "" {
c.LogFile = "/var/log/" + name + ".log"
}
if c.PodName == "" {
c.PodName = c.ClusterName
}
err := touchFile(pathFor(c.LogFile))
if err != nil {
return fmt.Errorf("error touching log-file %q: %v", c.LogFile, err)
}
if c.ClusterToken == "" {
c.ClusterToken = "etcd-cluster-token-" + name
}
// By default we use 100Mi for etcd memory
if c.MemoryRequest == nil || c.MemoryRequest.IsZero() {
memoryRequest, err := resource.ParseQuantity("100Mi")
if err != nil {
return fmt.Errorf("error parsing memory request for etcd (%s): %v", "100Mi", err)
}
c.MemoryRequest = &memoryRequest
}
// By default we use 100m for etcd cpu, unless the name is 'main', then we use 200m by default
if c.CPURequest == nil || c.CPURequest.IsZero() {
if c.ClusterName == "main" {
cpuRequest, err := resource.ParseQuantity("200m")
if err != nil {
return fmt.Errorf("error parsing cpu request for etcd (%s): %v", "200m", err)
}
c.CPURequest = &cpuRequest
} else {
cpuRequest, err := resource.ParseQuantity("100m")
if err != nil {
return fmt.Errorf("error parsing cpu request for etcd (%s): %v", "100m", err)
}
c.CPURequest = &cpuRequest
}
}
var nodes []*EtcdNode
for _, nodeName := range c.Spec.NodeNames {
name := name + "-" + nodeName
fqdn := k.BuildInternalDNSName(name)
node := &EtcdNode{
Name: name,
InternalName: fqdn,
}
nodes = append(nodes, node)
if nodeName == c.Spec.NodeName {
c.Me = node
if err = k.CreateInternalDNSNameRecord(fqdn); err != nil {
return fmt.Errorf("error mapping internal dns name for %q: %v", name, err)
}
}
}
c.Nodes = nodes
if c.Me == nil {
return fmt.Errorf("my node name %s not found in cluster %v", c.Spec.NodeName, strings.Join(c.Spec.NodeNames, ","))
}
pod := BuildEtcdManifest(c)
manifest, err := k8scodecs.ToVersionedYaml(pod)
if err != nil {
return fmt.Errorf("error marshaling pod to yaml: %v", err)
}
// Time to write the manifest!
// To avoid a possible race condition where the manifest survives a reboot but the volume
// is not mounted or not yet mounted, we use a symlink from /etc/kubernetes/manifests/<name>.manifest
// to a file on the volume itself. Thus kubelet cannot launch the manifest unless the volume is mounted.
manifestSource := "/etc/kubernetes/manifests/" + name + ".manifest"
manifestTargetDir := path.Join(c.VolumeMountPath, "k8s.io", "manifests")
manifestTarget := path.Join(manifestTargetDir, name+".manifest")
writeManifest := true
{
// See if the manifest has changed
existingManifest, err := ioutil.ReadFile(pathFor(manifestTarget))
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("error reading manifest file %q: %v", manifestTarget, err)
}
} else if bytes.Equal(existingManifest, manifest) {
writeManifest = false
} else {
klog.Infof("Need to update manifest file: %q", manifestTarget)
}
}
createSymlink := true
{
// See if the symlink is correct
stat, err := os.Lstat(pathFor(manifestSource))
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("error reading manifest symlink %q: %v", manifestSource, err)
}
} else if (stat.Mode() & os.ModeSymlink) != 0 {
// It's a symlink, make sure the target matches
target, err := os.Readlink(pathFor(manifestSource))
if err != nil {
return fmt.Errorf("error reading manifest symlink %q: %v", manifestSource, err)
}
if target == manifestTarget {
createSymlink = false
} else {
klog.Infof("Need to update manifest symlink (wrong target %q): %q", target, manifestSource)
}
} else {
klog.Infof("Need to update manifest symlink (not a symlink): %q", manifestSource)
}
}
if createSymlink || writeManifest {
err = os.Remove(pathFor(manifestSource))
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("error removing etcd manifest symlink (for strict creation) %q: %v", manifestSource, err)
}
err = os.MkdirAll(pathFor(manifestTargetDir), 0755)
if err != nil {
return fmt.Errorf("error creating directories for etcd manifest %q: %v", manifestTargetDir, err)
}
err = ioutil.WriteFile(pathFor(manifestTarget), manifest, 0644)
if err != nil {
return fmt.Errorf("error writing etcd manifest %q: %v", manifestTarget, err)
}
// Note: no pathFor on the target, because it's a symlink and we want it to evaluate on the host
err = os.Symlink(manifestTarget, pathFor(manifestSource))
if err != nil {
return fmt.Errorf("error creating etcd manifest symlink %q -> %q: %v", manifestSource, manifestTarget, err)
}
klog.Infof("Updated etcd manifest: %s", manifestSource)
}
return nil
}
// isTLS indicates the etcd cluster should be configured to use tls
func (c *EtcdCluster) isTLS() bool {
return notEmpty(c.TLSCert) && notEmpty(c.TLSKey)
}
// String returns the debug string
func (c *EtcdCluster) String() string {
return DebugString(c)
}
func (e *EtcdNode) String() string {
return DebugString(e)
}

View File

@ -1,311 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
import (
"fmt"
"path/filepath"
"strings"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kops/pkg/kubemanifest"
"k8s.io/kops/util/pkg/exec"
)
// BuildEtcdManifest creates the pod spec, based on the etcd cluster
func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
pod := &v1.Pod{}
pod.APIVersion = "v1"
pod.Kind = "Pod"
pod.Name = c.PodName
pod.Namespace = "kube-system"
pod.Labels = map[string]string{"k8s-app": c.PodName}
pod.Spec.HostNetwork = true
// dereference our resource requests if they exist
// cpu
var cpuRequest resource.Quantity
if c.CPURequest != nil {
cpuRequest = *c.CPURequest
}
// memory
var memoryRequest resource.Quantity
if c.MemoryRequest != nil {
memoryRequest = *c.MemoryRequest
}
{
container := v1.Container{
Name: "etcd-container",
Image: c.ImageSource,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpuRequest,
v1.ResourceMemory: memoryRequest,
},
},
Command: exec.WithTee("/usr/local/bin/etcd", []string{}, "/var/log/etcd.log"),
}
// build the environment variables for etcd service
container.Env = buildEtcdEnvironmentOptions(c)
container.LivenessProbe = &v1.Probe{
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
}
// ensure we have the correct probe schema
if c.isTLS() {
container.LivenessProbe.TCPSocket = &v1.TCPSocketAction{
Host: "127.0.0.1",
Port: intstr.FromInt(c.ClientPort),
}
} else {
container.LivenessProbe.HTTPGet = &v1.HTTPGetAction{
Host: "127.0.0.1",
Port: intstr.FromInt(c.ClientPort),
Path: "/health",
Scheme: v1.URISchemeHTTP,
}
}
container.Ports = append(container.Ports, v1.ContainerPort{
Name: "serverport",
ContainerPort: int32(c.PeerPort),
HostPort: int32(c.PeerPort),
})
container.Ports = append(container.Ports, v1.ContainerPort{
Name: "clientport",
ContainerPort: int32(c.ClientPort),
HostPort: int32(c.ClientPort),
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: "varetcdata",
MountPath: "/var/etcd/" + c.DataDirName,
ReadOnly: false,
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: "varlogetcd",
MountPath: "/var/log/etcd.log",
ReadOnly: false,
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: "hosts",
MountPath: "/etc/hosts",
ReadOnly: true,
})
// add the host path mount to the pod spec
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "varetcdata",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: c.VolumeMountPath + "/var/etcd/" + c.DataDirName,
},
},
})
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "varlogetcd",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: c.LogFile,
},
},
})
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: "hosts",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/hosts",
},
},
})
// @check if tls is enabled and mount the directory. It might be worth considering
// if we you use our own directory in /srv i.e /srv/etcd rather than the default /src/kubernetes
if c.isTLS() {
for _, dirname := range buildCertificateDirectories(c) {
normalized := strings.Replace(dirname, "/", "", -1)
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
Name: normalized,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: dirname,
},
},
})
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: normalized,
MountPath: dirname,
ReadOnly: true,
})
}
}
pod.Spec.Containers = append(pod.Spec.Containers, container)
}
if c.BackupStore != "" && c.BackupImage != "" {
backupContainer := buildEtcdBackupManagerContainer(c)
pod.Spec.Containers = append(pod.Spec.Containers, *backupContainer)
}
kubemanifest.MarkPodAsCritical(pod)
kubemanifest.MarkPodAsClusterCritical(pod)
return pod
}
// buildEtcdEnvironmentOptions is responsible for building the environment variables for etcd
// @question should we perhaps make this version specific in prep for v3 support?
func buildEtcdEnvironmentOptions(c *EtcdCluster) []v1.EnvVar {
var options []v1.EnvVar
// @check if we are using TLS
scheme := "http"
if c.isTLS() {
scheme = "https"
}
// add the default setting for masters - http or https
options = append(options, []v1.EnvVar{
{Name: "ETCD_NAME", Value: c.Me.Name},
{Name: "ETCD_DATA_DIR", Value: "/var/etcd/" + c.DataDirName},
{Name: "ETCD_LISTEN_PEER_URLS", Value: fmt.Sprintf("%s://0.0.0.0:%d", scheme, c.PeerPort)},
{Name: "ETCD_LISTEN_CLIENT_URLS", Value: fmt.Sprintf("%s://0.0.0.0:%d", scheme, c.ClientPort)},
{Name: "ETCD_ADVERTISE_CLIENT_URLS", Value: fmt.Sprintf("%s://%s:%d", scheme, c.Me.InternalName, c.ClientPort)},
{Name: "ETCD_INITIAL_ADVERTISE_PEER_URLS", Value: fmt.Sprintf("%s://%s:%d", scheme, c.Me.InternalName, c.PeerPort)},
{Name: "ETCD_INITIAL_CLUSTER_STATE", Value: "new"},
{Name: "ETCD_INITIAL_CLUSTER_TOKEN", Value: c.ClusterToken}}...)
// add timeout/hearbeat settings
if notEmpty(c.ElectionTimeout) {
options = append(options, v1.EnvVar{Name: "ETCD_ELECTION_TIMEOUT", Value: c.ElectionTimeout})
}
if notEmpty(c.HeartbeatInterval) {
options = append(options, v1.EnvVar{Name: "ETCD_HEARTBEAT_INTERVAL", Value: c.HeartbeatInterval})
}
// @check if we are using peer certificates
if notEmpty(c.PeerCA) {
options = append(options, []v1.EnvVar{
{Name: "ETCD_PEER_TRUSTED_CA_FILE", Value: c.PeerCA}}...)
}
if notEmpty(c.PeerCert) {
options = append(options, v1.EnvVar{Name: "ETCD_PEER_CERT_FILE", Value: c.PeerCert})
}
if notEmpty(c.PeerKey) {
options = append(options, v1.EnvVar{Name: "ETCD_PEER_KEY_FILE", Value: c.PeerKey})
}
if notEmpty(c.TLSCA) {
options = append(options, v1.EnvVar{Name: "ETCD_TRUSTED_CA_FILE", Value: c.TLSCA})
}
if notEmpty(c.TLSCert) {
options = append(options, v1.EnvVar{Name: "ETCD_CERT_FILE", Value: c.TLSCert})
}
if notEmpty(c.TLSKey) {
options = append(options, v1.EnvVar{Name: "ETCD_KEY_FILE", Value: c.TLSKey})
}
if c.isTLS() {
if c.TLSAuth {
options = append(options, v1.EnvVar{Name: "ETCD_CLIENT_CERT_AUTH", Value: "true"})
options = append(options, v1.EnvVar{Name: "ETCD_PEER_CLIENT_CERT_AUTH", Value: "true"})
}
}
// @step: generate the initial cluster
var hosts []string
for _, node := range c.Nodes {
hosts = append(hosts, node.Name+"="+fmt.Sprintf("%s://%s:%d", scheme, node.InternalName, c.PeerPort))
}
options = append(options, v1.EnvVar{Name: "ETCD_INITIAL_CLUSTER", Value: strings.Join(hosts, ",")})
return options
}
// buildCertificateDirectories generates a list of the base directories which the certificates are located
// so we can map in as volumes. They will probably all be placed into /src/kubernetes, but just to make it
// generic.
func buildCertificateDirectories(c *EtcdCluster) []string {
tracked := make(map[string]bool)
for _, x := range []string{c.TLSCA, c.TLSCert, c.TLSKey, c.PeerCA, c.PeerKey, c.PeerKey} {
dir := filepath.ToSlash(filepath.Dir(x))
if x == "" || tracked[dir] {
continue
}
tracked[dir] = true
}
var list []string
for k := range tracked {
list = append(list, k)
}
return list
}
// notEmpty is just a code pretty version if string != ""
func notEmpty(v string) bool {
return v != ""
}
// buildEtcdBackupManagerContainer builds a container for the standalone etcd backup manager
func buildEtcdBackupManagerContainer(c *EtcdCluster) *v1.Container {
command := []string{"/etcd-backup"}
command = append(command, "--backup-store", c.BackupStore)
command = append(command, "--cluster-name", c.ClusterName)
command = append(command, "--data-dir", "/var/etcd/"+c.DataDirName)
if c.isTLS() {
command = append(command, "--client-url", "https://127.0.0.1:4001")
command = append(command, "--client-ca-file", c.TLSCA)
command = append(command, "--client-cert-file", c.TLSCert)
command = append(command, "--client-key-file", c.TLSKey)
}
container := v1.Container{
Name: "etcd-backup",
Image: c.BackupImage,
Command: command,
}
// TODO: TLS options
// TODO: Liveness probe?
// volume should already have been registered
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: "varetcdata",
MountPath: "/var/etcd/" + c.DataDirName,
ReadOnly: false,
})
if c.isTLS() {
for _, dirname := range buildCertificateDirectories(c) {
normalized := strings.Replace(dirname, "/", "", -1)
// pod volume already registered for etcd container above
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: normalized,
MountPath: dirname,
ReadOnly: true,
})
}
}
return &container
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
import (
"fmt"
"os"
)
// touchFile does what is says on the tin, it touches a file
func touchFile(p string) error {
_, err := os.Lstat(p)
if err == nil {
return nil
}
if !os.IsNotExist(err) {
return fmt.Errorf("error getting state of file %q: %v", p, err)
}
f, err := os.Create(p)
if err != nil {
return fmt.Errorf("error touching file %q: %v", p, err)
}
if err = f.Close(); err != nil {
return fmt.Errorf("error closing touched file %q: %v", p, err)
}
return nil
}

View File

@ -20,7 +20,6 @@ import (
"context"
"fmt"
"net"
"path/filepath"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -28,8 +27,6 @@ import (
)
var (
// Containerized indicates the etcd is containerized
Containerized = false
// RootFS is the root fs path
RootFS = "/"
)
@ -44,45 +41,13 @@ type KubeBoot struct {
InternalDNSSuffix string
// InternalIP is the internal ip address of the node
InternalIP net.IP
// ApplyTaints controls whether we set taints based on the master label
ApplyTaints bool
// DNS is the dns provider
DNS DNSProvider
// ModelDir is the model directory
ModelDir string
// Kubernetes holds a kubernetes client
Kubernetes *KubernetesContext
// Master indicates we are a master node
Master bool
// ManageEtcd is true if we should manage etcd.
// Deprecated in favor of etcd-manager.
ManageEtcd bool
// EtcdBackupImage is the image to use for backing up etcd
EtcdBackupImage string
// EtcdBackupStore is the VFS path to which we should backup etcd
EtcdBackupStore string
// Etcd container registry location.
EtcdImageSource string
// EtcdElectionTimeout is the leader election timeout
EtcdElectionTimeout string
// EtcdHeartbeatInterval is the heartbeat interval
EtcdHeartbeatInterval string
// TLSAuth indicates we should enforce peer and client verification
TLSAuth bool
// TLSCA is the path to a client ca for etcd
TLSCA string
// TLSCert is the path to a tls certificate for etcd
TLSCert string
// TLSKey is the path to a tls private key for etcd
TLSKey string
// PeerCA is the path to a peer ca for etcd
PeerCA string
// PeerCert is the path to a peer certificate for etcd
PeerCert string
// PeerKey is the path to a peer private key for etcd
PeerKey string
// BootstrapMasterNodeLabels controls the initial application of node labels to our node
// The node is found by matching NodeName
BootstrapMasterNodeLabels bool
@ -90,15 +55,6 @@ type KubeBoot struct {
// NodeName is the name of our node as it will be registered in k8s.
// Used by BootstrapMasterNodeLabels
NodeName string
volumeMounter *VolumeMountController
etcdControllers map[string]*EtcdController
}
// Init is responsible for initializing the controllers
func (k *KubeBoot) Init(volumesProvider Volumes) {
k.volumeMounter = newVolumeMountController(volumesProvider)
k.etcdControllers = make(map[string]*EtcdController)
}
// RunSyncLoop is responsible for provision the cluster
@ -130,42 +86,12 @@ func (k *KubeBoot) RunSyncLoop() {
}
func (k *KubeBoot) syncOnce(ctx context.Context) error {
if k.Master && k.ManageEtcd {
// attempt to mount the volumes
volumes, err := k.volumeMounter.mountMasterVolumes()
if err != nil {
return err
}
for _, v := range volumes {
for _, etcdSpec := range v.Info.EtcdClusters {
key := etcdSpec.ClusterKey + "::" + etcdSpec.NodeName
etcdController := k.etcdControllers[key]
if etcdController == nil {
klog.Infof("Found etcd cluster spec on volume %q: %v", v.ID, etcdSpec)
etcdController, err := newEtcdController(k, v, etcdSpec)
if err != nil {
klog.Warningf("error building etcd controller: %v", err)
} else {
k.etcdControllers[key] = etcdController
go etcdController.RunSyncLoop()
}
}
}
}
}
if k.Master {
if k.BootstrapMasterNodeLabels {
if err := bootstrapMasterNodeLabels(ctx, k.Kubernetes, k.NodeName); err != nil {
klog.Warningf("error bootstrapping master node labels: %v", err)
}
}
if k.ApplyTaints {
if err := applyMasterTaints(ctx, k.Kubernetes); err != nil {
klog.Warningf("error updating master taints: %v", err)
}
}
if k.InitializeRBAC {
if err := applyRBAC(ctx, k.Kubernetes); err != nil {
klog.Warningf("error initializing rbac: %v", err)
@ -188,17 +114,6 @@ func pathFor(hostPath string) string {
return RootFS + hostPath[1:]
}
func pathForSymlinks(hostPath string) string {
path := pathFor(hostPath)
symlink, err := filepath.EvalSymlinks(path)
if err != nil {
return path
}
return symlink
}
func (k *KubeBoot) String() string {
return DebugString(k)
}

View File

@ -67,10 +67,8 @@ const (
KubeProxyClusterRoleName = "system:node-proxier"
clusterRoleKind = "ClusterRole"
roleKind = "Role"
serviceAccountKind = "ServiceAccount"
rbacAPIGroup = "rbac.authorization.k8s.io"
//anonymousUser = "system:anonymous"
// Constants for what we name our ServiceAccounts with limited access to the cluster in case of RBAC
KubeDNSServiceAccountName = "kube-dns"
@ -106,38 +104,6 @@ func createServiceAccounts(ctx context.Context, clientset kubernetes.Interface)
func createClusterRoleBindings(ctx context.Context, clientset *kubernetes.Clientset) error {
clusterRoleBindings := []rbac.ClusterRoleBinding{
//{
// ObjectMeta: metav1.ObjectMeta{
// Name: "kubeadm:kubelet-bootstrap",
// },
// RoleRef: rbac.RoleRef{
// APIGroup: rbacAPIGroup,
// Kind: clusterRoleKind,
// Name: NodeBootstrapperClusterRoleName,
// },
// Subjects: []rbac.Subject{
// {
// Kind: "Group",
// Name: bootstrapapi.BootstrapGroup,
// },
// },
//},
//{
// ObjectMeta: metav1.ObjectMeta{
// Name: nodeAutoApproveBootstrap,
// },
// RoleRef: rbac.RoleRef{
// APIGroup: rbacAPIGroup,
// Kind: clusterRoleKind,
// Name: nodeAutoApproveBootstrap,
// },
// Subjects: []rbac.Subject{
// {
// Kind: "Group",
// Name: bootstrapapi.BootstrapGroup,
// },
// },
//},
{
ObjectMeta: metav1.ObjectMeta{
Name: "kubeadm:node-proxier",

View File

@ -16,18 +16,6 @@ limitations under the License.
package protokube
import (
"context"
"encoding/json"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
)
type nodePatch struct {
Metadata *nodePatchMetadata `json:"metadata,omitempty"`
Spec *nodePatchSpec `json:"spec,omitempty"`
@ -41,71 +29,3 @@ type nodePatchMetadata struct {
type nodePatchSpec struct {
Unschedulable *bool `json:"unschedulable,omitempty"`
}
// TaintsAnnotationKey represents the key of taints data (json serialized)
// in the Annotations of a Node.
// Note that this is for k8s <= 1.5 only
const TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
// applyMasterTaints finds masters that have not yet been tainted, and applies the master taint.
// Once all supported kubelet versions accept the --register-with-taints flag introduced in 1.6.0, this can probably
// go away entirely. It also sets the unschedulable flag to false, so pods (with a toleration) can target the node
func applyMasterTaints(ctx context.Context, kubeContext *KubernetesContext) error {
client, err := kubeContext.KubernetesClient()
if err != nil {
return err
}
options := metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{"kubernetes.io/role": "master"}).String(),
}
klog.V(2).Infof("Querying k8s for nodes with selector %q", options.LabelSelector)
nodes, err := client.CoreV1().Nodes().List(ctx, options)
if err != nil {
return fmt.Errorf("error querying nodes: %v", err)
}
taint := []v1.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}}
taintJSON, err := json.Marshal(taint)
if err != nil {
return fmt.Errorf("error serializing taint: %v", err)
}
for i := range nodes.Items {
node := &nodes.Items[i]
nodeTaintJSON := node.Annotations[TaintsAnnotationKey]
if nodeTaintJSON != "" {
if nodeTaintJSON != string(taintJSON) {
klog.Infof("Node %q is registered with taint: %v", node.Name, nodeTaintJSON)
}
continue
}
nodePatchMetadata := &nodePatchMetadata{
Annotations: map[string]string{TaintsAnnotationKey: string(taintJSON)},
}
unschedulable := false
nodePatchSpec := &nodePatchSpec{
Unschedulable: &unschedulable,
}
nodePatch := &nodePatch{
Metadata: nodePatchMetadata,
Spec: nodePatchSpec,
}
nodePatchJson, err := json.Marshal(nodePatch)
if err != nil {
return fmt.Errorf("error building node patch: %v", err)
}
klog.V(2).Infof("sending patch for node %q: %q", node.Name, string(nodePatchJson))
_, err = client.CoreV1().Nodes().Patch(ctx, node.Name, types.StrategicMergePatchType, nodePatchJson, metav1.PatchOptions{})
if err != nil {
// TODO: Should we keep going?
return fmt.Errorf("error applying patch to node: %v", err)
}
}
return nil
}

View File

@ -1,315 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
import (
"fmt"
"os"
"sort"
"time"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kops/protokube/pkg/hostmount"
"k8s.io/mount-utils"
utilexec "k8s.io/utils/exec"
"k8s.io/utils/nsenter"
)
type VolumeMountController struct {
mounted map[string]*Volume
provider Volumes
}
func newVolumeMountController(provider Volumes) *VolumeMountController {
c := &VolumeMountController{}
c.mounted = make(map[string]*Volume)
c.provider = provider
return c
}
func (k *VolumeMountController) mountMasterVolumes() ([]*Volume, error) {
// TODO: mount ephemeral volumes (particular on AWS)?
// Mount master volumes
attached, err := k.attachMasterVolumes()
if err != nil {
return nil, fmt.Errorf("unable to attach master volumes: %v", err)
}
for _, v := range attached {
existing := k.mounted[v.ID]
if existing != nil {
continue
}
klog.V(2).Infof("Master volume %q is attached at %q", v.ID, v.LocalDevice)
mountpoint := "/mnt/master-" + v.ID
// On ContainerOS, we mount to /mnt/disks instead (/mnt is readonly)
_, err := os.Stat(pathFor("/mnt/disks"))
if err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("error checking for /mnt/disks: %v", err)
}
} else {
mountpoint = "/mnt/disks/master-" + v.ID
}
klog.Infof("Doing safe-format-and-mount of %s to %s", v.LocalDevice, mountpoint)
fstype := ""
err = k.safeFormatAndMount(v, mountpoint, fstype)
if err != nil {
klog.Warningf("unable to mount master volume: %q", err)
continue
}
klog.Infof("mounted master volume %q on %s", v.ID, mountpoint)
v.Mountpoint = mountpoint
k.mounted[v.ID] = v
}
var volumes []*Volume
for _, v := range k.mounted {
volumes = append(volumes, v)
}
return volumes, nil
}
func (k *VolumeMountController) safeFormatAndMount(volume *Volume, mountpoint string, fstype string) error {
// Wait for the device to show up
device := ""
for {
found, err := k.provider.FindMountedVolume(volume)
if err != nil {
return err
}
if found != "" {
device = found
break
}
klog.Infof("Waiting for volume %q to be attached", volume.ID)
time.Sleep(1 * time.Second)
}
klog.Infof("Found volume %q mounted at device %q", volume.ID, device)
safeFormatAndMount := &mount.SafeFormatAndMount{}
if Containerized {
ne, err := nsenter.NewNsenter(pathFor("/"), utilexec.New())
if err != nil {
return fmt.Errorf("error building ns-enter helper: %v", err)
}
// Build mount & exec implementations that execute in the host namespaces
safeFormatAndMount.Interface = hostmount.New(ne)
safeFormatAndMount.Exec = ne
// Note that we don't use PathFor for operations going through safeFormatAndMount,
// because our mounter and nsenter will operate in the host
} else {
safeFormatAndMount.Interface = mount.New("")
safeFormatAndMount.Exec = utilexec.New()
}
// Check if it is already mounted
// TODO: can we now use IsLikelyNotMountPoint or IsMountPointMatch instead here
mounts, err := safeFormatAndMount.List()
if err != nil {
return fmt.Errorf("error listing existing mounts: %v", err)
}
var existing []*mount.MountPoint
for i := range mounts {
m := &mounts[i]
klog.V(8).Infof("found existing mount: %v", m)
// Note: when containerized, we still list mounts in the host, so we don't need to call pathFor(mountpoint)
if m.Path == mountpoint {
existing = append(existing, m)
}
}
// Mount only if isn't mounted already
if len(existing) == 0 {
options := []string{}
klog.Infof("Creating mount directory %q", pathFor(mountpoint))
if err := os.MkdirAll(pathFor(mountpoint), 0750); err != nil {
return err
}
klog.Infof("Mounting device %q on %q", device, mountpoint)
err = safeFormatAndMount.FormatAndMount(device, mountpoint, fstype, options)
if err != nil {
return fmt.Errorf("error formatting and mounting disk %q on %q: %v", device, mountpoint, err)
}
} else {
klog.Infof("Device already mounted on %q, verifying it is our device", mountpoint)
if len(existing) != 1 {
klog.Infof("Existing mounts unexpected")
for i := range mounts {
m := &mounts[i]
klog.Infof("%s\t%s", m.Device, m.Path)
}
return fmt.Errorf("found multiple existing mounts of %q at %q", device, mountpoint)
}
klog.Infof("Found existing mount of %q at %q", device, mountpoint)
}
// If we're containerized we also want to mount the device (again) into our container
// We could also do this with mount propagation, but this is simple
if Containerized {
source := pathFor(device)
target := pathFor(mountpoint)
options := []string{}
mounter := mount.New("")
mountedDevice, _, err := mount.GetDeviceNameFromMount(mounter, target)
if err != nil {
return fmt.Errorf("error checking for mounts of %s inside container: %v", target, err)
}
if mountedDevice != "" {
// We check that it is the correct device. We also tolerate /dev/X as well as /root/dev/X and any of symlinks to them
if mountedDevice != source && mountedDevice != device && pathFor(mountedDevice) != pathForSymlinks(device) {
return fmt.Errorf("device already mounted at %s, but is %s and we want %s or %s", target, mountedDevice, source, device)
}
} else {
klog.Infof("mounting inside container: %s -> %s", source, target)
if err := mounter.Mount(source, target, fstype, options); err != nil {
return fmt.Errorf("error mounting %s inside container at %s: %v", source, target, err)
}
}
}
return nil
}
func (k *VolumeMountController) attachMasterVolumes() ([]*Volume, error) {
volumes, err := k.provider.FindVolumes()
if err != nil {
return nil, err
}
var tryAttach []*Volume
var attached []*Volume
for _, v := range volumes {
if doNotMountVolume(v) {
continue
}
if v.AttachedTo == "" {
tryAttach = append(tryAttach, v)
}
if v.LocalDevice != "" {
attached = append(attached, v)
}
}
if len(tryAttach) == 0 {
return attached, nil
}
// Make sure we don't try to mount multiple volumes from the same cluster
attachedClusters := sets.NewString()
for _, attached := range attached {
for _, etcdCluster := range attached.Info.EtcdClusters {
attachedClusters.Insert(etcdCluster.ClusterKey)
}
}
// Mount in a consistent order
sort.Stable(ByEtcdClusterName(tryAttach))
// Actually attempt the mounting
for _, v := range tryAttach {
alreadyMounted := ""
for _, etcdCluster := range v.Info.EtcdClusters {
if attachedClusters.Has(etcdCluster.ClusterKey) {
alreadyMounted = etcdCluster.ClusterKey
}
}
if alreadyMounted != "" {
klog.V(2).Infof("Skipping mount of master volume %q, because etcd cluster %q is already mounted", v.ID, alreadyMounted)
continue
}
klog.V(2).Infof("Trying to mount master volume: %q", v.ID)
err := k.provider.AttachVolume(v)
if err != nil {
// We are racing with other instances here; this can happen
klog.Warningf("Error attaching volume %q: %v", v.ID, err)
} else {
if v.LocalDevice == "" {
klog.Fatalf("AttachVolume did not set LocalDevice")
}
attached = append(attached, v)
// Mark this cluster as attached now
for _, etcdCluster := range v.Info.EtcdClusters {
attachedClusters.Insert(etcdCluster.ClusterKey)
}
}
}
klog.V(2).Infof("Currently attached volumes: %v", attached)
return attached, nil
}
// doNotMountVolume tests that the volume has an Etcd Cluster associated
func doNotMountVolume(v *Volume) bool {
if len(v.Info.EtcdClusters) == 0 {
klog.Warningf("Local device: %q, volume id: %q is being skipped and will not mounted, since it does not have a etcd cluster", v.LocalDevice, v.ID)
return true
}
return false
}
// ByEtcdClusterName sorts volumes so that we mount in a consistent order,
// and in addition we try to mount the main etcd volume before the events etcd volume
type ByEtcdClusterName []*Volume
func (a ByEtcdClusterName) Len() int {
return len(a)
}
func (a ByEtcdClusterName) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a ByEtcdClusterName) Less(i, j int) bool {
nameI := ""
if len(a[i].Info.EtcdClusters) > 0 {
nameI = a[i].Info.EtcdClusters[0].ClusterKey
}
nameJ := ""
if len(a[j].Info.EtcdClusters) > 0 {
nameJ = a[j].Info.EtcdClusters[0].ClusterKey
}
// reverse so "main" comes before "events"
return nameI > nameJ
}

View File

@ -1,100 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
import (
"sort"
"strings"
"testing"
"k8s.io/kops/protokube/pkg/etcd"
)
func getIDs(volumes []*Volume) string {
var ids []string
for _, v := range volumes {
ids = append(ids, v.ID)
}
return strings.Join(ids, ",")
}
func Test_VolumeSort_ByEtcdClusterName(t *testing.T) {
v1 := &Volume{}
v1.ID = "1"
v2 := &Volume{}
v2.ID = "2"
v3 := &Volume{}
v3.ID = "3"
volumes := []*Volume{v1, v2, v3}
sort.Stable(ByEtcdClusterName(volumes))
if getIDs(volumes) != "1,2,3" {
t.Fatalf("Fail at sort 1: %v", getIDs(volumes))
}
v2.Info.EtcdClusters = append(v2.Info.EtcdClusters, &etcd.EtcdClusterSpec{ClusterKey: "events"})
sort.Stable(ByEtcdClusterName(volumes))
if getIDs(volumes) != "2,1,3" {
t.Fatalf("Fail at sort 2: %v", getIDs(volumes))
}
v3.Info.EtcdClusters = append(v3.Info.EtcdClusters, &etcd.EtcdClusterSpec{ClusterKey: "main"})
sort.Stable(ByEtcdClusterName(volumes))
if getIDs(volumes) != "3,2,1" {
t.Fatalf("Fail at sort 3: %v", getIDs(volumes))
}
}
func Test_Mount_Volumes(t *testing.T) {
grid := []struct {
volume *Volume
doNotMount bool
description string
}{
{
&Volume{
LocalDevice: "/dev/xvda",
},
true,
"xda without a etcd cluster, do not mount",
},
{
&Volume{
LocalDevice: "/dev/xvdb",
Info: VolumeInfo{
EtcdClusters: []*etcd.EtcdClusterSpec{
{
ClusterKey: "foo",
NodeName: "bar",
},
},
},
},
true,
"xdb with a etcd cluster, mount",
},
}
for _, g := range grid {
d := doNotMountVolume(g.volume)
if d && !g.doNotMount {
t.Fatalf("volume mount should not have mounted: %s, description: %s", g.volume.LocalDevice, g.description)
}
}
}

View File

@ -1,13 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_test(
name = "go_default_test",
srcs = ["integration_test.go"],
data = glob(["main/*"]), #keep
deps = [
"//pkg/apis/kops:go_default_library",
"//pkg/diff:go_default_library",
"//pkg/k8scodecs:go_default_library",
"//protokube/pkg/protokube:go_default_library",
],
)

View File

@ -1,79 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"io/ioutil"
"path"
"strings"
"testing"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/pkg/k8scodecs"
"k8s.io/kops/protokube/pkg/protokube"
)
func TestBuildEtcdManifest(t *testing.T) {
cs := []struct {
TestFile string
}{
{TestFile: "non_tls.yaml"},
{TestFile: "tls.yaml"},
{TestFile: "etcd_env_vars.yaml"},
}
for i, x := range cs {
cluster, expected := loadTestIntegration(t, path.Join("main", x.TestFile))
definition := protokube.BuildEtcdManifest(cluster)
generated, err := k8scodecs.ToVersionedYaml(definition)
if err != nil {
t.Errorf("case %d, unable to convert to yaml, error: %v", i, err)
continue
}
rendered := strings.TrimSpace(string(generated))
expected = strings.TrimSpace(expected)
rendered = strings.Replace(rendered, "\r", "", -1)
expected = strings.Replace(expected, "\r", "", -1)
if rendered != expected {
diffString := diff.FormatDiff(expected, string(rendered))
t.Logf("diff:\n%s\n", diffString)
t.Errorf("case %d, failed, manifest differed from expected", i)
}
}
}
// loadTestIntegration is responsible for loading the integration files
func loadTestIntegration(t *testing.T, path string) (*protokube.EtcdCluster, string) {
content, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("unable to read in the integretion file: %s, error: %v", path, err)
}
documents := strings.Split(string(content), "---")
if len(documents) != 2 {
t.Fatalf("unable to find both documents in the integration file: %s, error %v:", path, err)
}
// read the specification into a etcd spec
cluster := &protokube.EtcdCluster{}
err = kops.ParseRawYaml([]byte(documents[0]), cluster)
if err != nil {
t.Fatalf("error parsing etcd specification in file: %s, error: %v", path, err)
}
return cluster, documents[1]
}

View File

@ -1,110 +0,0 @@
clientPort: 4001
clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
memoryRequest: "100Mi"
dataDirName: data-main
imageSource: k8s.gcr.io/etcd:2.2.1
logFile: /var/log/etcd.log
peerPort: 2380
podName: etcd-server-main
volumeMountPath: /mnt/main
electionTimeout: "1000"
heartbeatInterval: "100"
me:
name: node0
internalName: node0.internal
nodes:
- name: node0
internalName: node0.internal
- name: node1
internalName: node1.internal
- name: node2
internalName: node2.internal
spec: {}
---
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-server-main
name: etcd-server-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd >
/tmp/pipe 2>&1
env:
- name: ETCD_NAME
value: node0
- name: ETCD_DATA_DIR
value: /var/etcd/data-main
- name: ETCD_LISTEN_PEER_URLS
value: http://0.0.0.0:2380
- name: ETCD_LISTEN_CLIENT_URLS
value: http://0.0.0.0:4001
- name: ETCD_ADVERTISE_CLIENT_URLS
value: http://node0.internal:4001
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: http://node0.internal:2380
- name: ETCD_INITIAL_CLUSTER_STATE
value: new
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: token-main
- name: ETCD_ELECTION_TIMEOUT
value: "1000"
- name: ETCD_HEARTBEAT_INTERVAL
value: "100"
- name: ETCD_INITIAL_CLUSTER
value: node0=http://node0.internal:2380,node1=http://node1.internal:2380,node2=http://node2.internal:2380
image: k8s.gcr.io/etcd:2.2.1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 4001
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd-container
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
resources:
requests:
cpu: 200m
memory: 100Mi
volumeMounts:
- mountPath: /var/etcd/data-main
name: varetcdata
- mountPath: /var/log/etcd.log
name: varlogetcd
- mountPath: /etc/hosts
name: hosts
readOnly: true
hostNetwork: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /mnt/main/var/etcd/data-main
name: varetcdata
- hostPath:
path: /var/log/etcd.log
name: varlogetcd
- hostPath:
path: /etc/hosts
name: hosts
status: {}

View File

@ -1,104 +0,0 @@
clientPort: 4001
clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
memoryRequest: "100Mi"
dataDirName: data-main
imageSource: k8s.gcr.io/etcd:2.2.1
logFile: /var/log/etcd.log
peerPort: 2380
podName: etcd-server-main
volumeMountPath: /mnt/main
me:
name: node0
internalName: node0.internal
nodes:
- name: node0
internalName: node0.internal
- name: node1
internalName: node1.internal
- name: node2
internalName: node2.internal
spec: {}
---
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-server-main
name: etcd-server-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd >
/tmp/pipe 2>&1
env:
- name: ETCD_NAME
value: node0
- name: ETCD_DATA_DIR
value: /var/etcd/data-main
- name: ETCD_LISTEN_PEER_URLS
value: http://0.0.0.0:2380
- name: ETCD_LISTEN_CLIENT_URLS
value: http://0.0.0.0:4001
- name: ETCD_ADVERTISE_CLIENT_URLS
value: http://node0.internal:4001
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: http://node0.internal:2380
- name: ETCD_INITIAL_CLUSTER_STATE
value: new
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: token-main
- name: ETCD_INITIAL_CLUSTER
value: node0=http://node0.internal:2380,node1=http://node1.internal:2380,node2=http://node2.internal:2380
image: k8s.gcr.io/etcd:2.2.1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /health
port: 4001
scheme: HTTP
initialDelaySeconds: 15
timeoutSeconds: 15
name: etcd-container
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
resources:
requests:
cpu: 200m
memory: 100Mi
volumeMounts:
- mountPath: /var/etcd/data-main
name: varetcdata
- mountPath: /var/log/etcd.log
name: varlogetcd
- mountPath: /etc/hosts
name: hosts
readOnly: true
hostNetwork: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /mnt/main/var/etcd/data-main
name: varetcdata
- hostPath:
path: /var/log/etcd.log
name: varlogetcd
- hostPath:
path: /etc/hosts
name: hosts
status: {}

View File

@ -1,126 +0,0 @@
tlsCA: /srv/kubernetes/ca.crt
tlsCert: /srv/kubernetes/etcd.pem
tlsKey: /srv/kubernetes/etcd-key.pem
clientPort: 4001
clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
memoryRequest: "100Mi"
dataDirName: data-main
imageSource: k8s.gcr.io/etcd:2.2.1
logFile: /var/log/etcd.log
peerCA: /srv/kubernetes/ca.crt
peerCert: /srv/kubernetes/etcd.pem
peerKey: /srv/kubernetes/etcd-key.pem
peerPort: 2380
podName: etcd-server-main
volumeMountPath: /mnt/main
me:
name: node0
internalName: node0.internal
nodes:
- name: node0
internalName: node0.internal
- name: node1
internalName: node1.internal
- name: node2
internalName: node2.internal
spec: {}
---
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-server-main
name: etcd-server-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /usr/local/bin/etcd >
/tmp/pipe 2>&1
env:
- name: ETCD_NAME
value: node0
- name: ETCD_DATA_DIR
value: /var/etcd/data-main
- name: ETCD_LISTEN_PEER_URLS
value: https://0.0.0.0:2380
- name: ETCD_LISTEN_CLIENT_URLS
value: https://0.0.0.0:4001
- name: ETCD_ADVERTISE_CLIENT_URLS
value: https://node0.internal:4001
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: https://node0.internal:2380
- name: ETCD_INITIAL_CLUSTER_STATE
value: new
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: token-main
- name: ETCD_PEER_TRUSTED_CA_FILE
value: /srv/kubernetes/ca.crt
- name: ETCD_PEER_CERT_FILE
value: /srv/kubernetes/etcd.pem
- name: ETCD_PEER_KEY_FILE
value: /srv/kubernetes/etcd-key.pem
- name: ETCD_TRUSTED_CA_FILE
value: /srv/kubernetes/ca.crt
- name: ETCD_CERT_FILE
value: /srv/kubernetes/etcd.pem
- name: ETCD_KEY_FILE
value: /srv/kubernetes/etcd-key.pem
- name: ETCD_INITIAL_CLUSTER
value: node0=https://node0.internal:2380,node1=https://node1.internal:2380,node2=https://node2.internal:2380
image: k8s.gcr.io/etcd:2.2.1
livenessProbe:
initialDelaySeconds: 15
tcpSocket:
host: 127.0.0.1
port: 4001
timeoutSeconds: 15
name: etcd-container
ports:
- containerPort: 2380
hostPort: 2380
name: serverport
- containerPort: 4001
hostPort: 4001
name: clientport
resources:
requests:
cpu: 200m
memory: 100Mi
volumeMounts:
- mountPath: /var/etcd/data-main
name: varetcdata
- mountPath: /var/log/etcd.log
name: varlogetcd
- mountPath: /etc/hosts
name: hosts
readOnly: true
- mountPath: /srv/kubernetes
name: srvkubernetes
readOnly: true
hostNetwork: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /mnt/main/var/etcd/data-main
name: varetcdata
- hostPath:
path: /var/log/etcd.log
name: varlogetcd
- hostPath:
path: /etc/hosts
name: hosts
- hostPath:
path: /srv/kubernetes
name: srvkubernetes
status: {}

View File

@ -1244,13 +1244,11 @@ func newNodeUpConfigBuilder(cluster *kops.Cluster, assetBuilder *assets.AssetBui
if isMaster {
for _, etcdCluster := range cluster.Spec.EtcdClusters {
if etcdCluster.Provider == kops.EtcdProviderTypeManager {
p := configBase.Join("manifests/etcd/" + etcdCluster.Name + ".yaml").Path()
etcdManifests[role] = append(etcdManifests[role], p)
}
}
}
}
configBuilder := nodeUpConfigBuilder{
assetBuilder: assetBuilder,

View File

@ -898,7 +898,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann
if kops.CloudProviderID(b.Cluster.Spec.CloudProvider) == kops.CloudProviderAWS {
if b.IsKubernetesGTE("1.18") && b.Cluster.Spec.ExternalCloudControllerManager != nil {
if b.Cluster.Spec.ExternalCloudControllerManager != nil {
key := "aws-cloud-controller.addons.k8s.io"
{

View File

@ -233,8 +233,6 @@ func precreateDNS(ctx context.Context, cluster *kops.Cluster, cloud fi.Cloud) er
// buildPrecreateDNSHostnames returns the hostnames we should precreate
func buildPrecreateDNSHostnames(cluster *kops.Cluster) []string {
dnsInternalSuffix := ".internal." + cluster.ObjectMeta.Name
var dnsHostnames []string
if cluster.Spec.MasterPublicName != "" {
@ -249,21 +247,6 @@ func buildPrecreateDNSHostnames(cluster *kops.Cluster) []string {
klog.Warningf("cannot pre-create MasterInternalName - not set")
}
for _, etcdCluster := range cluster.Spec.EtcdClusters {
if etcdCluster.Provider == kops.EtcdProviderTypeManager {
continue
}
etcClusterName := "etcd-" + etcdCluster.Name
if etcdCluster.Name == "main" {
// Special case
etcClusterName = "etcd"
}
for _, etcdClusterMember := range etcdCluster.Members {
name := etcClusterName + "-" + etcdClusterMember.Name + dnsInternalSuffix
dnsHostnames = append(dnsHostnames, name)
}
}
if apimodel.UseKopsControllerForNodeBootstrap(cluster) {
name := "kops-controller.internal." + cluster.ObjectMeta.Name
dnsHostnames = append(dnsHostnames, name)

View File

@ -53,12 +53,6 @@ func TestPrecreateDNSNames(t *testing.T) {
expected := []string{
"api.cluster1.example.com",
"api.internal.cluster1.example.com",
"etcd-events-zonea.internal.cluster1.example.com",
"etcd-events-zoneb.internal.cluster1.example.com",
"etcd-events-zonec.internal.cluster1.example.com",
"etcd-zone1.internal.cluster1.example.com",
"etcd-zone2.internal.cluster1.example.com",
"etcd-zone3.internal.cluster1.example.com",
}
sort.Strings(actual)

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kops"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/model"
version "k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
@ -1196,20 +1195,6 @@ func createEtcdCluster(etcdCluster string, masters []*api.InstanceGroup, encrypt
func addCiliumNetwork(cluster *api.Cluster) {
cilium := &api.CiliumNetworkingSpec{}
cluster.Spec.Networking.Cilium = cilium
nodeport := false
if cluster.Spec.KubernetesVersion == "" {
nodeport = true
} else {
k8sVersion, err := version.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
if err == nil {
if version.IsKubernetesGTE("1.18", *k8sVersion) {
nodeport = true
}
} else {
klog.Error(err.Error())
}
}
if nodeport {
cilium.EnableNodePort = true
if cluster.Spec.KubeProxy == nil {
cluster.Spec.KubeProxy = &api.KubeProxyConfig{}
@ -1217,4 +1202,3 @@ func addCiliumNetwork(cluster *api.Cluster) {
enabled := false
cluster.Spec.KubeProxy.Enabled = &enabled
}
}

View File

@ -289,7 +289,6 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
loader.Builders = append(loader.Builders, &model.HookBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeletBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubectlBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.EtcdBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.LogrotateBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.ManifestsBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.PackagesBuilder{NodeupModelContext: modelContext})