Create env-var helper function

Refactor to start to centralize the env-var configuration for system
components, also start to add test coverage so we can be sure we
haven't broken things!
This commit is contained in:
Justin SB 2019-09-02 09:39:15 -07:00
parent 2d860e6c2d
commit 3fbc906cbc
No known key found for this signature in database
GPG Key ID: 8DEC5C8217494E37
15 changed files with 891 additions and 64 deletions

View File

@ -183,6 +183,7 @@ k8s.io/kops/upup/pkg/fi/utils
k8s.io/kops/upup/pkg/kutil k8s.io/kops/upup/pkg/kutil
k8s.io/kops/upup/tools/generators/fitask k8s.io/kops/upup/tools/generators/fitask
k8s.io/kops/upup/tools/generators/pkg/codegen k8s.io/kops/upup/tools/generators/pkg/codegen
k8s.io/kops/util/pkg/env
k8s.io/kops/util/pkg/exec k8s.io/kops/util/pkg/exec
k8s.io/kops/util/pkg/hashing k8s.io/kops/util/pkg/hashing
k8s.io/kops/util/pkg/maps k8s.io/kops/util/pkg/maps

View File

@ -26,8 +26,8 @@ go_library(
"//upup/pkg/fi/cloudup/openstack:go_default_library", "//upup/pkg/fi/cloudup/openstack:go_default_library",
"//upup/pkg/fi/fitasks:go_default_library", "//upup/pkg/fi/fitasks:go_default_library",
"//upup/pkg/fi/loader:go_default_library", "//upup/pkg/fi/loader:go_default_library",
"//util/pkg/env:go_default_library",
"//util/pkg/exec:go_default_library", "//util/pkg/exec:go_default_library",
"//util/pkg/proxy:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -43,8 +43,8 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/gce" "k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack" "k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/fitasks" "k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/util/pkg/env"
"k8s.io/kops/util/pkg/exec" "k8s.io/kops/util/pkg/exec"
"k8s.io/kops/util/pkg/proxy"
) )
const metaFilename = "_etcd_backup.meta" const metaFilename = "_etcd_backup.meta"
@ -225,14 +225,6 @@ spec:
name: pki name: pki
` `
func appendEnvVariableIfExist(variable string, envs []v1.EnvVar) []v1.EnvVar {
envVarValue := os.Getenv(variable)
if envVarValue != "" {
envs = append(envs, v1.EnvVar{Name: variable, Value: envVarValue})
}
return envs
}
// buildPod creates the pod spec, based on the EtcdClusterSpec // buildPod creates the pod spec, based on the EtcdClusterSpec
func (b *EtcdManagerBuilder) buildPod(etcdCluster *kops.EtcdClusterSpec) (*v1.Pod, error) { func (b *EtcdManagerBuilder) buildPod(etcdCluster *kops.EtcdClusterSpec) (*v1.Pod, error) {
var pod *v1.Pod var pod *v1.Pod
@ -457,29 +449,9 @@ func (b *EtcdManagerBuilder) buildPod(etcdCluster *kops.EtcdClusterSpec) (*v1.Po
}) })
} }
container.Env = proxy.GetProxyEnvVars(b.Cluster.Spec.EgressProxy) envMap := env.BuildSystemComponentEnvVars(&b.Cluster.Spec)
// Custom S3 endpoint container.Env = envMap.ToEnvVars()
container.Env = appendEnvVariableIfExist("S3_ENDPOINT", container.Env)
container.Env = appendEnvVariableIfExist("S3_ACCESS_KEY_ID", container.Env)
container.Env = appendEnvVariableIfExist("S3_SECRET_ACCESS_KEY", container.Env)
// Openstack related values
container.Env = appendEnvVariableIfExist("OS_TENANT_ID", container.Env)
container.Env = appendEnvVariableIfExist("OS_TENANT_NAME", container.Env)
container.Env = appendEnvVariableIfExist("OS_PROJECT_ID", container.Env)
container.Env = appendEnvVariableIfExist("OS_PROJECT_NAME", container.Env)
container.Env = appendEnvVariableIfExist("OS_PROJECT_DOMAIN_NAME", container.Env)
container.Env = appendEnvVariableIfExist("OS_PROJECT_DOMAIN_ID", container.Env)
container.Env = appendEnvVariableIfExist("OS_DOMAIN_NAME", container.Env)
container.Env = appendEnvVariableIfExist("OS_DOMAIN_ID", container.Env)
container.Env = appendEnvVariableIfExist("OS_USERNAME", container.Env)
container.Env = appendEnvVariableIfExist("OS_PASSWORD", container.Env)
container.Env = appendEnvVariableIfExist("OS_AUTH_URL", container.Env)
container.Env = appendEnvVariableIfExist("OS_REGION_NAME", container.Env)
// Digital Ocean related values.
container.Env = appendEnvVariableIfExist("DIGITALOCEAN_ACCESS_TOKEN", container.Env)
{ {
foundPKI := false foundPKI := false

View File

@ -27,28 +27,32 @@ import (
) )
func Test_RunEtcdManagerBuilder(t *testing.T) { func Test_RunEtcdManagerBuilder(t *testing.T) {
basedir := "tests/minimal" for _, basedir := range []string{"tests/minimal", "tests/proxy"} {
basedir := basedir
context := &fi.ModelBuilderContext{ t.Run(fmt.Sprintf("basedir=%s", basedir), func(t *testing.T) {
Tasks: make(map[string]fi.Task), context := &fi.ModelBuilderContext{
} Tasks: make(map[string]fi.Task),
kopsModelContext, err := LoadKopsModelContext(basedir) }
if err != nil { kopsModelContext, err := LoadKopsModelContext(basedir)
t.Fatalf("error loading model %q: %v", basedir, err) if err != nil {
return t.Fatalf("error loading model %q: %v", basedir, err)
} return
}
builder := EtcdManagerBuilder{ builder := EtcdManagerBuilder{
KopsModelContext: kopsModelContext, KopsModelContext: kopsModelContext,
AssetBuilder: assets.NewAssetBuilder(kopsModelContext.Cluster, ""), AssetBuilder: assets.NewAssetBuilder(kopsModelContext.Cluster, ""),
} }
if err := builder.Build(context); err != nil { if err := builder.Build(context); err != nil {
t.Fatalf("error from Build: %v", err) t.Fatalf("error from Build: %v", err)
return return
} }
testutils.ValidateTasks(t, basedir, context) testutils.ValidateTasks(t, basedir, context)
})
}
} }
func LoadKopsModelContext(basedir string) (*model.KopsModelContext, error) { func LoadKopsModelContext(basedir string) (*model.KopsModelContext, error) {

View File

@ -0,0 +1,89 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.example.com
egressProxy:
httpProxy:
host: proxy.example.com
excludes: noproxy.example.com
etcdClusters:
- cpuRequest: 200m
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
memoryRequest: 100Mi
name: main
provider: Manager
backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-main
- cpuRequest: 100m
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
memoryRequest: 100Mi
name: events
provider: Manager
backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd-events
kubernetesVersion: v1.12.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
kubenet: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a

View File

@ -0,0 +1,224 @@
Lifecycle: null
Name: etcd-clients-ca
Signer: null
alternateNameTasks: null
alternateNames: null
format: v1alpha2
subject: cn=etcd-clients-ca
type: ca
---
Lifecycle: null
Name: etcd-manager-ca-events
Signer: null
alternateNameTasks: null
alternateNames: null
format: v1alpha2
subject: cn=etcd-manager-ca-events
type: ca
---
Lifecycle: null
Name: etcd-manager-ca-main
Signer: null
alternateNameTasks: null
alternateNames: null
format: v1alpha2
subject: cn=etcd-manager-ca-main
type: ca
---
Lifecycle: null
Name: etcd-peers-ca-events
Signer: null
alternateNameTasks: null
alternateNames: null
format: v1alpha2
subject: cn=etcd-peers-ca-events
type: ca
---
Lifecycle: null
Name: etcd-peers-ca-main
Signer: null
alternateNameTasks: null
alternateNames: null
format: v1alpha2
subject: cn=etcd-peers-ca-main
type: ca
---
Contents:
Name: ""
Resource: |-
{
"memberCount": 1
}
Lifecycle: null
Location: backups/etcd/events/control/etcd-cluster-spec
Name: etcd-cluster-spec-events
---
Contents:
Name: ""
Resource: |-
{
"memberCount": 1
}
Lifecycle: null
Location: backups/etcd/main/control/etcd-cluster-spec
Name: etcd-cluster-spec-main
---
Contents:
Name: ""
Resource: |
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3997
--insecure=false --peer-urls=https://__name__:2381 --quarantine-client-urls=https://__name__:3995
--v=6 --volume-name-tag=k8s.io/etcd/events --volume-provider=aws --volume-tag=k8s.io/etcd/events
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: NO_PROXY
value: noproxy.example.com
- name: http_proxy
value: http://proxy.example.com
- name: https_proxy
value: http://proxy.example.com
- name: no_proxy
value: noproxy.example.com
image: kopeio/etcd-manager:3.0.20190816
name: etcd-manager
resources:
requests:
cpu: 100m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /etc/hosts
name: hosts
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /etc/hosts
type: File
name: hosts
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}
Lifecycle: null
Location: manifests/etcd/events.yaml
Name: manifests-etcdmanager-events
---
Contents:
Name: ""
Resource: |
apiVersion: v1
kind: Pod
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd-main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --etcd-insecure=true --grpc-port=3996
--insecure=false --peer-urls=https://__name__:2380 --quarantine-client-urls=https://__name__:3994
--v=6 --volume-name-tag=k8s.io/etcd/main --volume-provider=aws --volume-tag=k8s.io/etcd/main
--volume-tag=k8s.io/role/master=1 --volume-tag=kubernetes.io/cluster/minimal.example.com=owned
> /tmp/pipe 2>&1
env:
- name: NO_PROXY
value: noproxy.example.com
- name: http_proxy
value: http://proxy.example.com
- name: https_proxy
value: http://proxy.example.com
- name: no_proxy
value: noproxy.example.com
image: kopeio/etcd-manager:3.0.20190816
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /etc/hosts
name: hosts
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /etc/hosts
type: File
name: hosts
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}
Lifecycle: null
Location: manifests/etcd/main.yaml
Name: manifests-etcdmanager-main

View File

@ -105,19 +105,40 @@ func runChannelBuilderTest(t *testing.T, key string) {
t.Fatalf("error from BootstrapChannelBuilder Build: %v", err) t.Fatalf("error from BootstrapChannelBuilder Build: %v", err)
} }
name := cluster.ObjectMeta.Name + "-addons-bootstrap" {
manifestTask := context.Tasks[name] name := cluster.ObjectMeta.Name + "-addons-bootstrap"
if manifestTask == nil { manifestTask := context.Tasks[name]
t.Fatalf("manifest task not found (%q)", name) if manifestTask == nil {
t.Fatalf("manifest task not found (%q)", name)
}
manifestFileTask := manifestTask.(*fitasks.ManagedFile)
actualManifest, err := manifestFileTask.Contents.AsString()
if err != nil {
t.Fatalf("error getting manifest as string: %v", err)
}
expectedManifestPath := path.Join(basedir, "manifest.yaml")
testutils.AssertMatchesFile(t, actualManifest, expectedManifestPath)
} }
manifestFileTask := manifestTask.(*fitasks.ManagedFile) for _, k := range []string{"dns-controller.addons.k8s.io-k8s-1.12" /*, "kops-controller.addons.k8s.io-k8s-1.16"*/} {
actualManifest, err := manifestFileTask.Contents.AsString() name := cluster.ObjectMeta.Name + "-addons-" + k
if err != nil { manifestTask := context.Tasks[name]
t.Fatalf("error getting manifest as string: %v", err) if manifestTask == nil {
for k := range context.Tasks {
t.Logf("found task %s", k)
}
t.Fatalf("manifest task not found (%q)", name)
}
manifestFileTask := manifestTask.(*fitasks.ManagedFile)
actualManifest, err := manifestFileTask.Contents.AsString()
if err != nil {
t.Fatalf("error getting manifest as string: %v", err)
}
expectedManifestPath := path.Join(basedir, k+".yaml")
testutils.AssertMatchesFile(t, actualManifest, expectedManifestPath)
} }
expectedManifestPath := path.Join(basedir, "manifest.yaml")
testutils.AssertMatchesFile(t, actualManifest, expectedManifestPath)
} }

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
spec:
containers:
- command:
- /usr/bin/dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --zone=*/*
- -v=2
image: kope/dns-controller:1.14.0-alpha.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
serviceAccount: dns-controller
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -20,6 +20,10 @@ spec:
- instanceGroup: master-us-test-1a - instanceGroup: master-us-test-1a
name: master-us-test-1a name: master-us-test-1a
name: events name: events
egressProxy:
httpProxy:
host: proxy.example.com
excludes: noproxy.example.com
kubernetesVersion: v1.4.6 kubernetesVersion: v1.4.6
masterInternalName: api.internal.minimal.example.com masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com masterPublicName: api.minimal.example.com

View File

@ -0,0 +1,111 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
spec:
containers:
- command:
- /usr/bin/dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --zone=*/*
- -v=2
env:
- name: NO_PROXY
value: noproxy.example.com,127.0.0.1,localhost,api.minimal.example.com,minimal.example.com,100.64.0.1,100.64.0.0/10,169.254.169.254,172.20.0.0/16
- name: http_proxy
value: http://proxy.example.com
- name: https_proxy
value: http://proxy.example.com
- name: no_proxy
value: noproxy.example.com,127.0.0.1,localhost,api.minimal.example.com,minimal.example.com,100.64.0.1,100.64.0.0/10,169.254.169.254,172.20.0.0/16
image: kope/dns-controller:1.14.0-alpha.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
serviceAccount: dns-controller
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -67,7 +67,7 @@ spec:
- id: k8s-1.6 - id: k8s-1.6
kubernetesVersion: '>=1.6.0 <1.12.0' kubernetesVersion: '>=1.6.0 <1.12.0'
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
manifestHash: 1e6ad361396158a93c3f59e939265f74bb003586 manifestHash: 4ba33ea15d601523c39b18c2481376e7036a6dac
name: dns-controller.addons.k8s.io name: dns-controller.addons.k8s.io
selector: selector:
k8s-addon: dns-controller.addons.k8s.io k8s-addon: dns-controller.addons.k8s.io
@ -75,7 +75,7 @@ spec:
- id: k8s-1.12 - id: k8s-1.12
kubernetesVersion: '>=1.12.0' kubernetesVersion: '>=1.12.0'
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: aaf42d7dcff21f7e32177e933fde10cff8b03bc3 manifestHash: 91914c7727ec59000c4d759d83e6cdfe30cc6751
name: dns-controller.addons.k8s.io name: dns-controller.addons.k8s.io
selector: selector:
k8s-addon: dns-controller.addons.k8s.io k8s-addon: dns-controller.addons.k8s.io

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
spec:
containers:
- command:
- /usr/bin/dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --zone=*/*
- -v=2
image: kope/dns-controller:1.14.0-alpha.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
serviceAccount: dns-controller
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -0,0 +1,102 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.14.0-alpha.1
spec:
containers:
- command:
- /usr/bin/dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --zone=*/*
- -v=2
image: kope/dns-controller:1.14.0-alpha.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
serviceAccount: dns-controller
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

13
util/pkg/env/BUILD.bazel vendored Normal file
View File

@ -0,0 +1,13 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["standard.go"],
importpath = "k8s.io/kops/util/pkg/env",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/kops:go_default_library",
"//util/pkg/proxy:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
],
)

82
util/pkg/env/standard.go vendored Normal file
View File

@ -0,0 +1,82 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package env
import (
"os"
"sort"
corev1 "k8s.io/api/core/v1"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/util/pkg/proxy"
)
type EnvVars map[string]string
func (m EnvVars) addEnvVariableIfExist(name string) {
v := os.Getenv(name)
if v != "" {
m[name] = v
}
}
func BuildSystemComponentEnvVars(spec *kops.ClusterSpec) EnvVars {
vars := make(EnvVars)
for _, v := range proxy.GetProxyEnvVars(spec.EgressProxy) {
vars[v.Name] = v.Value
}
// Custom S3 endpoint
vars.addEnvVariableIfExist("S3_ENDPOINT")
vars.addEnvVariableIfExist("S3_ACCESS_KEY_ID")
vars.addEnvVariableIfExist("S3_SECRET_ACCESS_KEY")
// Openstack related values
vars.addEnvVariableIfExist("OS_TENANT_ID")
vars.addEnvVariableIfExist("OS_TENANT_NAME")
vars.addEnvVariableIfExist("OS_PROJECT_ID")
vars.addEnvVariableIfExist("OS_PROJECT_NAME")
vars.addEnvVariableIfExist("OS_PROJECT_DOMAIN_NAME")
vars.addEnvVariableIfExist("OS_PROJECT_DOMAIN_ID")
vars.addEnvVariableIfExist("OS_DOMAIN_NAME")
vars.addEnvVariableIfExist("OS_DOMAIN_ID")
vars.addEnvVariableIfExist("OS_USERNAME")
vars.addEnvVariableIfExist("OS_PASSWORD")
vars.addEnvVariableIfExist("OS_AUTH_URL")
vars.addEnvVariableIfExist("OS_REGION_NAME")
// Digital Ocean related values.
vars.addEnvVariableIfExist("DIGITALOCEAN_ACCESS_TOKEN")
return vars
}
func (m EnvVars) ToEnvVars() []corev1.EnvVar {
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
var l []corev1.EnvVar
for _, k := range keys {
l = append(l, corev1.EnvVar{Name: k, Value: m[k]})
}
return l
}