Merge branch 'master' into tenancy

This commit is contained in:
Justin Santa Barbara 2017-03-27 21:31:16 -04:00 committed by GitHub
commit 7e8ed66620
95 changed files with 1541 additions and 446 deletions

3
.gitignore vendored
View File

@ -1,3 +1,6 @@
# Compiled python files
*.pyc
# OSX leaves these everywhere on SMB shares # OSX leaves these everywhere on SMB shares
._* ._*

View File

@ -183,7 +183,8 @@ push-aws-dry: push
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --dryrun --v=8 ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --dryrun --v=8
push-gce-run: push push-gce-run: push
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=metadata://gce/config --v=8 ssh ${TARGET} sudo cp /tmp/nodeup /home/kubernetes/bin/nodeup
ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /home/kubernetes/bin/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8
# -t is for CentOS http://unix.stackexchange.com/questions/122616/why-do-i-need-a-tty-to-run-sudo-if-i-can-sudo-without-a-password # -t is for CentOS http://unix.stackexchange.com/questions/122616/why-do-i-need-a-tty-to-run-sudo-if-i-can-sudo-without-a-password
push-aws-run: push push-aws-run: push
@ -276,6 +277,11 @@ gofmt:
gofmt -w -s dns-controller/cmd gofmt -w -s dns-controller/cmd
gofmt -w -s dns-controller/pkg gofmt -w -s dns-controller/pkg
goimports:
sh -c hack/update-goimports
verify-goimports:
sh -c hack/verify-goimports
govet: govet:
go vet \ go vet \

View File

@ -16,9 +16,14 @@ The project is created by wearemolecule, and maintained at
``` ```
# Version 1.2.0 # Version 1.2.0
# https://github.com/wearemolecule/route53-kubernetes/tree/v1.2.0 # https://github.com/wearemolecule/route53-kubernetes/tree/v1.2.0
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/monitoring-standalone/v1.2.0.yaml $ kubectl apply -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/route53-mapper/v1.2.0.yml
``` ```
**Important:**
This addon requires [additional IAM permissions](../../docs/iam_roles.md) on the master instances.
The required permissions are described [here](https://github.com/wearemolecule/route53-kubernetes).
These can be configured using `kops edit cluster` or `kops create -f [...]`.
### Service Configuration ### Service Configuration
Add the `dns: route53` label and your target DNS entry in a `domainName` Add the `dns: route53` label and your target DNS entry in a `domainName`

View File

@ -7,6 +7,8 @@ spec:
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09 - name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
providerID: aws providerID: aws
kubernetesVersion: ">=1.5.0" kubernetesVersion: ">=1.5.0"
- providerID: gce
name: "cos-cloud/cos-stable-56-9000-84-2"
cluster: cluster:
kubernetesVersion: v1.5.4 kubernetesVersion: v1.5.4
networking: networking:

View File

@ -362,6 +362,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
g := &api.InstanceGroup{} g := &api.InstanceGroup{}
g.Spec.Role = api.InstanceGroupRoleMaster g.Spec.Role = api.InstanceGroupRoleMaster
g.Spec.Taints = []string{api.TaintNoScheduleMaster}
g.Spec.Subnets = []string{subnet.Name} g.Spec.Subnets = []string{subnet.Name}
g.Spec.MinSize = fi.Int32(1) g.Spec.MinSize = fi.Int32(1)
g.Spec.MaxSize = fi.Int32(1) g.Spec.MaxSize = fi.Int32(1)

View File

@ -29,8 +29,9 @@ In short:
## Using Kops HA ## Using Kops HA
We can create HA clusters using kops, but only it's important to note that you must plan for this at time of cluster creation. Currently it is not possible to change We can create HA clusters using kops, but only it's important to note that migrating from a single-master
the etcd cluster size (i.e. we cannot change an HA cluster to be non-HA, or a non-HA cluster to be HA.) [Issue #1512](https://github.com/kubernetes/kops/issues/1512) cluster to a multi-master cluster is a complicated operation (described [here](./single-to-multi-master.md)).
If possible, try to plan this at time of cluster creation.
When you first call `kops create cluster`, you specify the `--master-zones` flag listing the zones you want your masters When you first call `kops create cluster`, you specify the `--master-zones` flag listing the zones you want your masters
to run in, for example: to run in, for example:

View File

@ -131,6 +131,27 @@ So the procedure is:
* Rolling-update, only if you want to apply changes immediately: `kops rolling-update cluster` * Rolling-update, only if you want to apply changes immediately: `kops rolling-update cluster`
## Adding Taints to an Instance Group
If you're running Kubernetes 1.6.0 or later, you can also control taints in the InstanceGroup.
The taints property takes a list of strings. The following example would add two taints to an IG,
using the same `edit` -> `update` -> `rolling-update` process as above.
```
metadata:
creationTimestamp: "2016-07-10T15:47:14Z"
name: nodes
spec:
machineType: m3.medium
maxSize: 3
minSize: 3
role: Node
taints:
- dedicated=gpu:NoSchedule
- team=search:PreferNoSchedule
```
## Resizing the master ## Resizing the master
(This procedure should be pretty familiar by now!) (This procedure should be pretty familiar by now!)

View File

@ -0,0 +1,220 @@
# Migrating from single to multi-master
This document describes how to go from a single-master cluster (created by kops)
to a multi-master cluster.
## Warnings
This is a risky procedure that **can lead to data-loss** in the etcd cluster.
Please follow all the backup steps before attempting it. Please read the
[etcd admin guide](https://github.com/coreos/etcd/blob/v2.2.1/Documentation/admin_guide.md)
before attempting it.
During this procedure, you will experience **downtime** on the API server, but
not on the end user services. During this downtime, existing pods will continue
to work, but you will not be able to create new pods and any existing pod that
dies will not be restarted.
## 1 - Backups
### a - Backup main etcd cluster
```bash
$ kubectl --namespace=kube-system get pods | grep etcd
etcd-server-events-ip-172-20-36-161.ec2.internal 1/1 Running 4 2h
etcd-server-ip-172-20-36-161.ec2.internal 1/1 Running 4 2h
$ kubectl --namespace=kube-system exec etcd-server-ip-172-20-36-161.ec2.internal -it -- sh
/ # etcdctl backup --data-dir /var/etcd/data --backup-dir /var/etcd/backup
/ # mv /var/etcd/backup/ /var/etcd/data/
/ # exit
$ kubectl --namespace=kube-system get pod etcd-server-ip-172-20-36-161.ec2.internal -o json | jq '.spec.volumes[] | select(.name | contains("varetcdata")) | .hostPath.path'
"/mnt/master-vol-0ea119c15602cbb57/var/etcd/data"
$ ssh admin@<master-node>
admin@ip-172-20-36-161:~$ sudo -i
root@ip-172-20-36-161:~# mv /mnt/master-vol-0ea119c15602cbb57/var/etcd/data/backup /home/admin/
root@ip-172-20-36-161:~# chown -R admin: /home/admin/backup/
root@ip-172-20-36-161:~# exit
admin@ip-172-20-36-161:~$ exit
$ scp -r admin@<master-node>:backup/ .
```
### b - Backup event etcd cluster
```bash
$ kubectl --namespace=kube-system exec etcd-server-events-ip-172-20-36-161.ec2.internal -it -- sh
/ # etcdctl backup --data-dir /var/etcd/data-events --backup-dir /var/etcd/backup
/ # mv /var/etcd/backup/ /var/etcd/data-events/
/ # exit
$ kubectl --namespace=kube-system get pod etcd-server-events-ip-172-20-36-161.ec2.internal -o json | jq '.spec.volumes[] | select(.name | contains("varetcdata")) | .hostPath.path'
"/mnt/master-vol-0bb5ad222911c6777/var/etcd/data-events"
$ ssh admin@<master-node>
admin@ip-172-20-36-161:~$ sudo -i
root@ip-172-20-36-161:~# mv /mnt/master-vol-0bb5ad222911c6777/var/etcd/data-events/backup/ /home/admin/backup-events
root@ip-172-20-36-161:~# chown -R admin: /home/admin/backup-events/
root@ip-172-20-36-161:~# exit
admin@ip-172-20-36-161:~$ exit
$ scp -r admin@<master-node>:backup-events/ .
```
## 2 - Add a new master
### a - Create the instance group
Create 1 kops instance group for the first one of your new masters, in
a different AZ from the existing one.
```bash
$ kops create instancegroup master-<availability-zone2>
```
* ``maxSize`` and ``minSize`` should be 1,
* ``role`` should be ``Master``,
* only one zone should be listed.
### b - Reference the new masters in your cluster configuration
*kops will refuse to have only 2 members in the etcd clusters, so we have to
reference a third one, even if we have not created it yet.*
```bash
$ kops edit cluster example.com
```
* In ``.spec.etcdClusters`` 2 new members in each cluster, one for each new
availability zone.
### c - Add a new member to the etcd clusters
**The clusters will stop to work until the new member is started**.
```bash
$ kubectl --namespace=kube-system exec etcd-server-ip-172-20-36-161.ec2.internal -- etcdctl member add etcd-<availability-zone2> http://etcd-<availability-zone2>.internal.example.com:2380
$ kubectl --namespace=kube-system exec etcd-server-events-ip-172-20-36-161.ec2.internal -- etcdctl --endpoint http://127.0.0.1:4002 member add etcd-events-<availability-zone2> http://etcd-events-<availability-zone2>.internal.example.com:2381
```
### d - Launch the new master
```bash
$ kops update cluster example.com --yes
# wait for the new master to boot and initialize
$ ssh admin@<new-master>
admin@ip-172-20-116-230:~$ sudo -i
root@ip-172-20-116-230:~# systemctl stop kubelet
root@ip-172-20-116-230:~# systemctl stop protokube
```
Reinitialize the etcd instances:
* In both ``/etc/kubernetes/manifests/etcd-events.manifest`` and
``/etc/kubernetes/manifests/etcd.manifest``, edit the
``ETCD_INITIAL_CLUSTER_STATE`` variable to ``existing``.
* In the same files, remove the third non-existing member from
``ETCD_INITIAL_CLUSTER``.
* Delete the containers and the data directories:
```bash
root@ip-172-20-116-230:~# docker stop $(docker ps | grep "etcd:2.2.1" | awk '{print $1}')
root@ip-172-20-116-230:~# rm -r /mnt/master-vol-03b97b1249caf379a/var/etcd/data-events/member/
root@ip-172-20-116-230:~# rm -r /mnt/master-vol-0dbfd1f3c60b8c509/var/etcd/data/member/
```
Launch them again:
```bash
root@ip-172-20-116-230:~# systemctl start kubelet
```
At this point, both etcd clusters should be healthy with two members:
```bash
$ kubectl --namespace=kube-system exec etcd-server-ip-172-20-36-161.ec2.internal -- etcdctl member list
$ kubectl --namespace=kube-system exec etcd-server-ip-172-20-36-161.ec2.internal -- etcdctl cluster-health
$ kubectl --namespace=kube-system exec etcd-server-events-ip-172-20-36-161.ec2.internal -- etcdctl --endpoint http://127.0.0.1:4002 member list
$ kubectl --namespace=kube-system exec etcd-server-events-ip-172-20-36-161.ec2.internal -- etcdctl --endpoint http://127.0.0.1:4002 cluster-health
```
If not, check ``/var/log/etcd.log`` for problems.
Restart protokube on the new master:
```bash
root@ip-172-20-116-230:~# systemctl start protokube
```
## 3 - Add the third master
### a - Create the instance group
Create 1 kops instance group for the third master, in
a different AZ from the existing ones.
```bash
$ kops create instancegroup master-<availability-zone3>
```
* ``maxSize`` and ``minSize`` should be 1,
* ``role`` should be ``Master``,
* only one zone should be listed.
### b - Add a new member to the etcd clusters
```bash
$ kubectl --namespace=kube-system exec etcd-server-ip-172-20-36-161.ec2.internal -- etcdctl member add etcd-<availability-zone3> http://etcd-<availability-zone3>.internal.example.com:2380
$ kubectl --namespace=kube-system exec etcd-server-events-ip-172-20-36-161.ec2.internal -- etcdctl --endpoint http://127.0.0.1:4002 member add etcd-events-<availability-zone3> http://etcd-events-<availability-zone3>.internal.example.com:2381
```
### c - Launch the third master
```bash
$ kops update cluster example.com --yes
# wait for the third master to boot and initialize
$ ssh admin@<third-master>
admin@ip-172-20-139-130:~$ sudo -i
root@ip-172-20-139-130:~# systemctl stop kubelet
root@ip-172-20-139-130:~# systemctl stop protokube
```
Reinitialize the etcd instances:
* In both ``/etc/kubernetes/manifests/etcd-events.manifest`` and
``/etc/kubernetes/manifests/etcd.manifest``, edit the
``ETCD_INITIAL_CLUSTER_STATE`` variable to ``existing``.
* Delete the containers and the data directories:
```bash
root@ip-172-20-139-130:~# docker stop $(docker ps | grep "etcd:2.2.1" | awk '{print $1}')
root@ip-172-20-139-130:~# rm -r /mnt/master-vol-019796c3511a91b4f//var/etcd/data-events/member/
root@ip-172-20-139-130:~# rm -r /mnt/master-vol-0c89fd6f6a256b686/var/etcd/data/member/
```
Launch them again:
```bash
root@ip-172-20-139-130:~# systemctl start kubelet
```
At this point, both etcd clusters should be healthy with three members:
```bash
$ kubectl --namespace=kube-system exec etcd-server-ip-172-20-36-161.ec2.internal -- etcdctl member list
$ kubectl --namespace=kube-system exec etcd-server-ip-172-20-36-161.ec2.internal -- etcdctl cluster-health
$ kubectl --namespace=kube-system exec etcd-server-events-ip-172-20-36-161.ec2.internal -- etcdctl --endpoint http://127.0.0.1:4002 member list
$ kubectl --namespace=kube-system exec etcd-server-events-ip-172-20-36-161.ec2.internal -- etcdctl --endpoint http://127.0.0.1:4002 cluster-health
```
If not, check ``/var/log/etcd.log`` for problems.
Restart protokube on the third master:
```bash
root@ip-172-20-139-130:~# systemctl start protokube
```
## 4 - Cleanup
To be sure that everything runs smoothly and is setup correctly, it is advised
to terminate the masters one after the other (always keeping 2 of them up and
running). They will be restarted with a clean config and should join the others
without any problems.
While optional, this last step allows you to be sure that your masters are
fully configured by Kops and that there is no residual manual configuration.
If there is any configuration problem, they will be detected during this step
and not during a future upgrade or, worse, during a master failure.

View File

@ -28,6 +28,7 @@ k8s.io/kops/pkg/apis/kops/util
k8s.io/kops/pkg/apis/kops/v1alpha1 k8s.io/kops/pkg/apis/kops/v1alpha1
k8s.io/kops/pkg/apis/kops/v1alpha2 k8s.io/kops/pkg/apis/kops/v1alpha2
k8s.io/kops/pkg/apis/kops/validation k8s.io/kops/pkg/apis/kops/validation
k8s.io/kops/pkg/apis/nodeup
k8s.io/kops/pkg/client/simple k8s.io/kops/pkg/client/simple
k8s.io/kops/pkg/client/simple/vfsclientset k8s.io/kops/pkg/client/simple/vfsclientset
k8s.io/kops/pkg/diff k8s.io/kops/pkg/diff

61
hack/deps.py Executable file
View File

@ -0,0 +1,61 @@
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This python script helps sync godeps from the k8s repos into our git submodules
# It generates bash commands where changes are needed
# We can probably also use it for deps when the time comes!
import json
import sys
import subprocess
from pprint import pprint
from os.path import expanduser, join
kops_dir = expanduser('~/k8s/src/k8s.io/kops')
k8s_dir = expanduser('~/k8s/src/k8s.io/kubernetes')
with open(join(k8s_dir, 'Godeps/Godeps.json')) as data_file:
godeps = json.load(data_file)
#pprint(godeps)
godep_map = {}
for godep in godeps['Deps']:
#print("%s %s" % (godep['ImportPath'], godep['Rev']))
godep_map[godep['ImportPath']] = godep['Rev']
process = subprocess.Popen(['git', 'submodule', 'status'], stdout=subprocess.PIPE, cwd=kops_dir)
submodule_status, err = process.communicate()
for submodule_line in submodule_status.splitlines():
tokens = submodule_line.split()
dep = tokens[1]
dep = dep.replace('_vendor/', '')
sha = tokens[0]
sha = sha.replace('+', '')
godep_sha = godep_map.get(dep)
if not godep_sha:
for k in godep_map:
if k.startswith(dep):
godep_sha = godep_map[k]
break
if godep_sha:
if godep_sha != sha:
print("# update needed: %s vs %s" % (godep_sha, sha))
print("pushd _vendor/{dep}; git fetch; git checkout {sha}; popd".format(dep=dep, sha=godep_sha))
else:
print("# UNKNOWN dep %s" % dep)

17
hack/lib/__init__.py Normal file
View File

@ -0,0 +1,17 @@
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["kubernetes"]

View File

@ -0,0 +1,17 @@
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["devtools"]

View File

@ -0,0 +1,27 @@
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os import path
gopath=os.environ['GOPATH']
def read_packages_file(package_name):
packages = []
with open(path.join(gopath, 'src', package_name, 'hack/.packages')) as packages_file:
for package in packages_file:
packages.append(package.replace('\n', ''))
return packages

39
hack/update-goimports Executable file
View File

@ -0,0 +1,39 @@
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os import path
import subprocess
import sys
from lib.kubernetes import devtools
gopath=os.environ['GOPATH']
package_name='k8s.io/kops'
packages = devtools.read_packages_file(package_name)
paths = []
for package in packages:
if package == package_name:
continue
paths.append(package)
print("packages %s" % paths)
subprocess.call(['goimports', '-w'] + paths, cwd=path.join(gopath, 'src'))

46
hack/verify-goimports Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os import path
import subprocess
import sys
from lib.kubernetes import devtools
gopath=os.environ['GOPATH']
package_name='k8s.io/kops'
packages = devtools.read_packages_file(package_name)
paths = []
for package in packages:
if package == package_name:
continue
paths.append(package)
print("packages %s" % paths)
process = subprocess.Popen(['goimports', '-l'] + paths, stdout=subprocess.PIPE, cwd=path.join(gopath, 'src'))
stdout, stderr = process.communicate()
if stdout != "":
print("!!! 'goimports -w' needs to be run on the following files: ")
print(stdout)
print('!!! Please run: make goimports')
sys.exit(1)

View File

@ -24,11 +24,12 @@ import (
type Distribution string type Distribution string
var ( var (
DistributionJessie Distribution = "jessie" DistributionJessie Distribution = "jessie"
DistributionXenial Distribution = "xenial" DistributionXenial Distribution = "xenial"
DistributionRhel7 Distribution = "rhel7" DistributionRhel7 Distribution = "rhel7"
DistributionCentos7 Distribution = "centos7" DistributionCentos7 Distribution = "centos7"
DistributionCoreOS Distribution = "coreos" DistributionCoreOS Distribution = "coreos"
DistributionContainerOS Distribution = "containeros"
) )
func (d Distribution) BuildTags() []string { func (d Distribution) BuildTags() []string {
@ -45,6 +46,8 @@ func (d Distribution) BuildTags() []string {
t = []string{"_rhel7"} t = []string{"_rhel7"}
case DistributionCoreOS: case DistributionCoreOS:
t = []string{"_coreos"} t = []string{"_coreos"}
case DistributionContainerOS:
t = []string{"_containeros"}
default: default:
glog.Fatalf("unknown distribution: %s", d) glog.Fatalf("unknown distribution: %s", d)
return nil return nil
@ -67,7 +70,7 @@ func (d Distribution) IsDebianFamily() bool {
switch d { switch d {
case DistributionJessie, DistributionXenial: case DistributionJessie, DistributionXenial:
return true return true
case DistributionCentos7, DistributionRhel7, DistributionCoreOS: case DistributionCentos7, DistributionRhel7, DistributionCoreOS, DistributionContainerOS:
return false return false
default: default:
glog.Fatalf("unknown distribution: %s", d) glog.Fatalf("unknown distribution: %s", d)
@ -79,7 +82,7 @@ func (d Distribution) IsRHELFamily() bool {
switch d { switch d {
case DistributionCentos7, DistributionRhel7: case DistributionCentos7, DistributionRhel7:
return true return true
case DistributionJessie, DistributionXenial, DistributionCoreOS: case DistributionJessie, DistributionXenial, DistributionCoreOS, DistributionContainerOS:
return false return false
default: default:
glog.Fatalf("unknown distribution: %s", d) glog.Fatalf("unknown distribution: %s", d)
@ -95,6 +98,8 @@ func (d Distribution) IsSystemd() bool {
return true return true
case DistributionCoreOS: case DistributionCoreOS:
return true return true
case DistributionContainerOS:
return true
default: default:
glog.Fatalf("unknown distribution: %s", d) glog.Fatalf("unknown distribution: %s", d)
return false return false

View File

@ -87,5 +87,21 @@ func FindDistribution(rootfs string) (Distribution, error) {
glog.Warningf("error reading /usr/lib/os-release: %v", err) glog.Warningf("error reading /usr/lib/os-release: %v", err)
} }
// ContainerOS uses /etc/os-release
{
osRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/os-release"))
if err == nil {
for _, line := range strings.Split(string(osRelease), "\n") {
line = strings.TrimSpace(line)
if line == "ID=cos" {
return DistributionContainerOS, nil
}
}
glog.Warningf("unhandled /etc/os-release info %q", string(osRelease))
} else if !os.IsNotExist(err) {
glog.Warningf("error reading /etc/os-release: %v", err)
}
}
return "", fmt.Errorf("cannot identify distro") return "", fmt.Errorf("cannot identify distro")
} }

View File

@ -19,10 +19,13 @@ package model
import ( import (
"k8s.io/kops/nodeup/pkg/distros" "k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
) )
type NodeupModelContext struct { type NodeupModelContext struct {
NodeupConfig *nodeup.NodeUpConfig
Cluster *kops.Cluster Cluster *kops.Cluster
InstanceGroup *kops.InstanceGroup InstanceGroup *kops.InstanceGroup
Architecture Architecture Architecture Architecture
@ -46,9 +49,39 @@ func (c *NodeupModelContext) SSLHostPaths() []string {
paths = append(paths, "/usr/share/ca-certificates") paths = append(paths, "/usr/share/ca-certificates")
case distros.DistributionContainerOS:
paths = append(paths, "/usr/share/ca-certificates")
default: default:
paths = append(paths, "/usr/share/ssl", "/usr/ssl", "/usr/lib/ssl", "/usr/local/openssl", "/var/ssl", "/etc/openssl") paths = append(paths, "/usr/share/ssl", "/usr/ssl", "/usr/lib/ssl", "/usr/local/openssl", "/var/ssl", "/etc/openssl")
} }
return paths return paths
} }
func (c *NodeupModelContext) PathSrvKubernetes() string {
switch c.Distribution {
case distros.DistributionContainerOS:
return "/etc/srv/kubernetes"
default:
return "/srv/kubernetes"
}
}
func (c *NodeupModelContext) PathSrvSshproxy() string {
switch c.Distribution {
case distros.DistributionContainerOS:
return "/etc/srv/sshproxy"
default:
return "/srv/sshproxy"
}
}
func (c *NodeupModelContext) NetworkPluginDir() string {
switch c.Distribution {
case distros.DistributionContainerOS:
return "/home/kubernetes/bin/"
default:
return "/opt/cni/bin/"
}
}

View File

@ -0,0 +1,50 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// DirectoryBuilder creates required directories
type DirectoryBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &DirectoryBuilder{}
func (b *DirectoryBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Distribution == distros.DistributionContainerOS {
dir := "/home/kubernetes/bin"
t := &nodetasks.File{
Path: dir,
Type: nodetasks.FileType_Directory,
Mode: s("0755"),
OnChangeExecute: [][]string{
{"/bin/mount", "--bind", "/home/kubernetes/bin", "/home/kubernetes/bin"},
{"/bin/mount", "-o", "remount,exec", "/home/kubernetes/bin"},
},
}
c.AddTask(t)
}
return nil
}

View File

@ -227,9 +227,14 @@ func (d *dockerVersion) matches(arch Architecture, dockerVersion string, distro
} }
func (b *DockerBuilder) Build(c *fi.ModelBuilderContext) error { func (b *DockerBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Distribution == distros.DistributionCoreOS { switch b.Distribution {
case distros.DistributionCoreOS:
glog.Infof("Detected CoreOS; won't install Docker") glog.Infof("Detected CoreOS; won't install Docker")
return nil return nil
case distros.DistributionContainerOS:
glog.Infof("Detected ContainerOS; won't install Docker")
return nil
} }
// Add Apache2 license // Add Apache2 license

View File

@ -35,9 +35,14 @@ func (b *EtcdBuilder) Build(c *fi.ModelBuilderContext) error {
return nil return nil
} }
if b.Distribution == distros.DistributionCoreOS { switch b.Distribution {
case distros.DistributionCoreOS:
glog.Infof("Detected CoreOS; skipping etcd user installation") glog.Infof("Detected CoreOS; skipping etcd user installation")
return nil return nil
case distros.DistributionContainerOS:
glog.Infof("Detected ContainerOS; skipping etcd user installation")
return nil
} }
// TODO: Do we actually use the user anywhere? // TODO: Do we actually use the user anywhere?

View File

@ -0,0 +1,100 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"github.com/golang/glog"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
)
// FirewallBuilder configures the firewall (iptables)
type FirewallBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &FirewallBuilder{}
func (b *FirewallBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Distribution == distros.DistributionContainerOS {
c.AddTask(b.buildFirewallScript())
c.AddTask(b.buildSystemdService())
}
return nil
}
func (b *FirewallBuilder) buildSystemdService() *nodetasks.Service {
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Configure iptables for kubernetes")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
manifest.Set("Unit", "Before", "network.target")
manifest.Set("Service", "Type", "oneshot")
manifest.Set("Service", "RemainAfterExit", "yes")
manifest.Set("Service", "ExecStart", "/home/kubernetes/bin/iptables-setup")
manifest.Set("Install", "WantedBy", "basic.target")
manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", "kubernetes-iptables-setup", manifestString)
service := &nodetasks.Service{
Name: "kubernetes-iptables-setup.service",
Definition: s(manifestString),
}
service.InitDefaults()
return service
}
func (b *FirewallBuilder) buildFirewallScript() *nodetasks.File {
// TODO: Do we want to rely on running nodeup on every boot, or do we want to install systemd units?
// TODO: The if statement in the script doesn't make it idempotent
// This is borrowed from gce/gci/configure-helper.sh
script := `#!/bin/bash
# Built by kops - do not edit
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP packets.
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
fi
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
fi
`
t := &nodetasks.File{
Path: "/home/kubernetes/bin/iptables-setup",
Contents: fi.NewStringResource(script),
Type: nodetasks.FileType_File,
Mode: s("0755"),
}
return t
}

View File

@ -18,6 +18,7 @@ package model
import ( import (
"fmt" "fmt"
"path/filepath"
"strings" "strings"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
@ -64,6 +65,15 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
} }
func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) { func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
kubeAPIServer := b.Cluster.Spec.KubeAPIServer
kubeAPIServer.ClientCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
kubeAPIServer.TLSCertFile = filepath.Join(b.PathSrvKubernetes(), "server.cert")
kubeAPIServer.TLSPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key")
kubeAPIServer.BasicAuthFile = filepath.Join(b.PathSrvKubernetes(), "basic_auth.csv")
kubeAPIServer.TokenAuthFile = filepath.Join(b.PathSrvKubernetes(), "known_tokens.csv")
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeAPIServer) flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeAPIServer)
if err != nil { if err != nil {
return nil, fmt.Errorf("error building kube-apiserver flags: %v", err) return nil, fmt.Errorf("error building kube-apiserver flags: %v", err)
@ -141,12 +151,14 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) {
addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath, true) addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath, true)
} }
if b.Cluster.Spec.KubeAPIServer.PathSrvKubernetes != "" { pathSrvKubernetes := b.PathSrvKubernetes()
addHostPathMapping(pod, container, "srvkube", b.Cluster.Spec.KubeAPIServer.PathSrvKubernetes, true) if pathSrvKubernetes != "" {
addHostPathMapping(pod, container, "srvkube", pathSrvKubernetes, true)
} }
if b.Cluster.Spec.KubeAPIServer.PathSrvSshproxy != "" { pathSrvSshproxy := b.PathSrvSshproxy()
addHostPathMapping(pod, container, "srvsshproxy", b.Cluster.Spec.KubeAPIServer.PathSrvSshproxy, false) if pathSrvSshproxy != "" {
addHostPathMapping(pod, container, "srvsshproxy", pathSrvSshproxy, false)
} }
addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log", false) addHostPathMapping(pod, container, "logfile", "/var/log/kube-apiserver.log", false)

View File

@ -18,6 +18,9 @@ package model
import ( import (
"fmt" "fmt"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
@ -25,7 +28,6 @@ import (
"k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"strings"
) )
// KubeControllerManagerBuilder install kube-controller-manager (just the manifest at the moment) // KubeControllerManagerBuilder install kube-controller-manager (just the manifest at the moment)
@ -63,7 +65,12 @@ func (b *KubeControllerManagerBuilder) Build(c *fi.ModelBuilderContext) error {
} }
func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) { func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
flags, err := flagbuilder.BuildFlags(b.Cluster.Spec.KubeControllerManager) kcm := b.Cluster.Spec.KubeControllerManager
kcm.RootCAFile = filepath.Join(b.PathSrvKubernetes(), "ca.crt")
kcm.ServiceAccountPrivateKeyFile = filepath.Join(b.PathSrvKubernetes(), "server.key")
flags, err := flagbuilder.BuildFlags(kcm)
if err != nil { if err != nil {
return nil, fmt.Errorf("error building kube-controller-manager flags: %v", err) return nil, fmt.Errorf("error building kube-controller-manager flags: %v", err)
} }
@ -127,8 +134,9 @@ func (b *KubeControllerManagerBuilder) buildPod() (*v1.Pod, error) {
addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath, true) addHostPathMapping(pod, container, "cloudconfig", CloudConfigFilePath, true)
} }
if b.Cluster.Spec.KubeControllerManager.PathSrvKubernetes != "" { pathSrvKubernetes := b.PathSrvKubernetes()
addHostPathMapping(pod, container, "srvkube", b.Cluster.Spec.KubeControllerManager.PathSrvKubernetes, true) if pathSrvKubernetes != "" {
addHostPathMapping(pod, container, "srvkube", pathSrvKubernetes, true)
} }
addHostPathMapping(pod, container, "logfile", "/var/log/kube-controller-manager.log", false) addHostPathMapping(pod, container, "logfile", "/var/log/kube-controller-manager.log", false)

View File

@ -66,5 +66,8 @@ func (b *KubectlBuilder) kubectlPath() string {
if b.Distribution == distros.DistributionCoreOS { if b.Distribution == distros.DistributionCoreOS {
kubeletCommand = "/opt/bin/kubectl" kubeletCommand = "/opt/bin/kubectl"
} }
if b.Distribution == distros.DistributionContainerOS {
kubeletCommand = "/home/kubernetes/bin/kubectl"
}
return kubeletCommand return kubeletCommand
} }

View File

@ -57,6 +57,8 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
flags += " --cloud-config=" + CloudConfigFilePath flags += " --cloud-config=" + CloudConfigFilePath
} }
flags += " --network-plugin-dir=" + b.NetworkPluginDir()
sysconfig := "DAEMON_ARGS=\"" + flags + "\"\n" sysconfig := "DAEMON_ARGS=\"" + flags + "\"\n"
t := &nodetasks.File{ t := &nodetasks.File{
@ -126,6 +128,9 @@ func (b *KubeletBuilder) kubeletPath() string {
if b.Distribution == distros.DistributionCoreOS { if b.Distribution == distros.DistributionCoreOS {
kubeletCommand = "/opt/kubernetes/bin/kubelet" kubeletCommand = "/opt/kubernetes/bin/kubelet"
} }
if b.Distribution == distros.DistributionContainerOS {
kubeletCommand = "/home/kubernetes/bin/kubelet"
}
return kubeletCommand return kubeletCommand
} }
@ -138,7 +143,7 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
manifest.Set("Unit", "After", "docker.service") manifest.Set("Unit", "After", "docker.service")
if b.Distribution == distros.DistributionCoreOS { if b.Distribution == distros.DistributionCoreOS {
// We add /opt/kubernetes/bin for our utilities // We add /opt/kubernetes/bin for our utilities (socat)
manifest.Set("Service", "Environment", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kubernetes/bin") manifest.Set("Service", "Environment", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kubernetes/bin")
} }
@ -150,7 +155,7 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
manifest.Set("Service", "KillMode", "process") manifest.Set("Service", "KillMode", "process")
manifestString := manifest.Render() manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", "docker", manifestString) glog.V(8).Infof("Built service manifest %q\n%s", "kubelet", manifestString)
service := &nodetasks.Service{ service := &nodetasks.Service{
Name: "kubelet.service", Name: "kubelet.service",

View File

@ -36,6 +36,11 @@ func (b *LogrotateBuilder) Build(c *fi.ModelBuilderContext) error {
return nil return nil
} }
if b.Distribution == distros.DistributionContainerOS {
glog.Infof("Detected ContainerOS; won't install logrotate")
return nil
}
c.AddTask(&nodetasks.Package{Name: "logrotate"}) c.AddTask(&nodetasks.Package{Name: "logrotate"})
return nil return nil

View File

@ -0,0 +1,85 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"path/filepath"
)
// NetworkBuilder writes CNI assets
type NetworkBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &NetworkBuilder{}
func (b *NetworkBuilder) Build(c *fi.ModelBuilderContext) error {
var assetNames []string
networking := b.Cluster.Spec.Networking
if networking == nil || networking.Classic != nil {
} else if networking.Kubenet != nil {
assetNames = append(assetNames, "bridge", "host-local", "loopback")
} else if networking.External != nil {
// external is based on kubenet
assetNames = append(assetNames, "bridge", "host-local", "loopback")
} else if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil {
assetNames = append(assetNames, "bridge", "host-local", "loopback", "ptp")
// Do we need tuning?
// TODO: Only when using flannel ?
assetNames = append(assetNames, "flannel")
} else if networking.Kopeio != nil {
// TODO combine with External
// Kopeio is based on kubenet / external
assetNames = append(assetNames, "bridge", "host-local", "loopback")
} else {
return fmt.Errorf("no networking mode set")
}
for _, assetName := range assetNames {
if err := b.addAsset(c, assetName); err != nil {
return err
}
}
return nil
}
func (b *NetworkBuilder) addAsset(c *fi.ModelBuilderContext, assetName string) error {
assetPath := ""
asset, err := b.Assets.Find(assetName, assetPath)
if err != nil {
return fmt.Errorf("error trying to locate asset %q: %v", assetName, err)
}
if asset == nil {
return fmt.Errorf("unable to locate asset %q", assetName)
}
t := &nodetasks.File{
Path: filepath.Join(b.NetworkPluginDir(), assetName),
Contents: asset,
Type: nodetasks.FileType_File,
Mode: s("0755"),
}
c.AddTask(t)
return nil
}

View File

@ -0,0 +1,182 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"github.com/blang/semver"
"github.com/golang/glog"
"k8s.io/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"strings"
)
// ProtokubeBuilder configures protokube
type ProtokubeBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &ProtokubeBuilder{}
func (b *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
// TODO: Should we run _protokube on the nodes?
service, err := b.buildSystemdService()
if err != nil {
return err
}
c.AddTask(service)
return nil
}
func (b *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
k8sVersion, err := util.ParseKubernetesVersion(b.Cluster.Spec.KubernetesVersion)
if err != nil || k8sVersion == nil {
return nil, fmt.Errorf("unable to parse KubernetesVersion %q", b.Cluster.Spec.KubernetesVersion)
}
protokubeFlags := b.ProtokubeFlags(*k8sVersion)
protokubeFlagsArgs, err := flagbuilder.BuildFlags(protokubeFlags)
if err != nil {
return nil, err
}
protokubeCommand := "/usr/bin/docker run -v /:/rootfs/ -v /var/run/dbus:/var/run/dbus -v /run/systemd:/run/systemd --net=host --privileged "
protokubeCommand += b.ProtokubeImageName() + " /usr/bin/protokube "
protokubeCommand += protokubeFlagsArgs
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Kubernetes Protokube Service")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
//manifest.Set("Service", "EnvironmentFile", "/etc/sysconfig/protokube")
manifest.Set("Service", "ExecStartPre", b.ProtokubeImagePullCommand())
manifest.Set("Service", "ExecStart", protokubeCommand)
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "2s")
manifest.Set("Service", "StartLimitInterval", "0")
manifest.Set("Install", "WantedBy", "multi-user.target")
manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", "protokube", manifestString)
service := &nodetasks.Service{
Name: "protokube.service",
Definition: s(manifestString),
}
service.InitDefaults()
return service, nil
}
// ProtokubeImageName returns the docker image for protokube
func (t *ProtokubeBuilder) ProtokubeImageName() string {
name := ""
if t.NodeupConfig.ProtokubeImage != nil && t.NodeupConfig.ProtokubeImage.Name != "" {
name = t.NodeupConfig.ProtokubeImage.Name
}
if name == "" {
// use current default corresponding to this version of nodeup
name = kops.DefaultProtokubeImageName()
}
return name
}
// ProtokubeImagePullCommand returns the command to pull the image
func (t *ProtokubeBuilder) ProtokubeImagePullCommand() string {
source := ""
if t.NodeupConfig.ProtokubeImage != nil {
source = t.NodeupConfig.ProtokubeImage.Source
}
if source == "" {
// Nothing to pull; return dummy value
return "/bin/true"
}
if strings.HasPrefix(source, "http:") || strings.HasPrefix(source, "https:") || strings.HasPrefix(source, "s3:") {
// We preloaded the image; return a dummy value
return "/bin/true"
}
return "/usr/bin/docker pull " + t.NodeupConfig.ProtokubeImage.Source
}
type ProtokubeFlags struct {
Master *bool `json:"master,omitempty" flag:"master"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
Zone []string `json:"zone,omitempty" flag:"zone"`
Channels []string `json:"channels,omitempty" flag:"channels"`
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
}
// ProtokubeFlags returns the flags object for protokube
func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeFlags {
f := &ProtokubeFlags{}
master := t.IsMaster
f.Master = fi.Bool(master)
if master {
f.Channels = t.NodeupConfig.Channels
}
f.LogLevel = fi.Int32(4)
f.Containerized = fi.Bool(true)
zone := t.Cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
f.Zone = append(f.Zone, zone)
} else {
// match by id
f.Zone = append(f.Zone, "*/"+zone)
}
} else {
glog.Warningf("DNSZone not specified; protokube won't be able to update DNS")
// TODO: Should we permit wildcard updates if zone is not specified?
//argv = append(argv, "--zone=*/*")
}
if t.Cluster.Spec.CloudProvider != "" {
f.Cloud = fi.String(t.Cluster.Spec.CloudProvider)
switch fi.CloudProviderID(t.Cluster.Spec.CloudProvider) {
case fi.CloudProviderAWS:
f.DNSProvider = fi.String("aws-route53")
case fi.CloudProviderGCE:
f.DNSProvider = fi.String("google-clouddns")
default:
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider")
}
}
f.DNSInternalSuffix = fi.String(".internal." + t.Cluster.ObjectMeta.Name)
return f
}

154
nodeup/pkg/model/secrets.go Normal file
View File

@ -0,0 +1,154 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"fmt"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"path/filepath"
"strings"
)
// SecretBuilder writes secrets
type SecretBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &SecretBuilder{}
func (b *SecretBuilder) Build(c *fi.ModelBuilderContext) error {
if b.KeyStore == nil {
return fmt.Errorf("KeyStore not set")
}
{
ca, err := b.KeyStore.CertificatePool(fi.CertificateId_CA)
if err != nil {
return err
}
serialized, err := ca.AsString()
if err != nil {
return err
}
t := &nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "ca.crt"),
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
}
{
cert, err := b.KeyStore.Cert("master")
if err != nil {
return err
}
serialized, err := cert.AsString()
if err != nil {
return err
}
t := &nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "server.cert"),
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
}
{
k, err := b.KeyStore.PrivateKey("master")
if err != nil {
return err
}
serialized, err := k.AsString()
if err != nil {
return err
}
t := &nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "server.key"),
Contents: fi.NewStringResource(serialized),
Type: nodetasks.FileType_File,
}
c.AddTask(t)
}
if b.SecretStore != nil {
key := "kube"
token, err := b.SecretStore.FindSecret(key)
if err != nil {
return err
}
if token == nil {
return fmt.Errorf("token not found: %q", key)
}
csv := string(token.Data) + ",admin,admin"
t := &nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "basic_auth.csv"),
Contents: fi.NewStringResource(csv),
Type: nodetasks.FileType_File,
Mode: s("0600"),
}
c.AddTask(t)
}
if b.SecretStore != nil {
allTokens, err := b.allTokens()
if err != nil {
return err
}
var lines []string
for id, token := range allTokens {
lines = append(lines, token+","+id+","+id)
}
csv := strings.Join(lines, "\n")
t := &nodetasks.File{
Path: filepath.Join(b.PathSrvKubernetes(), "known_tokens.csv"),
Contents: fi.NewStringResource(csv),
Type: nodetasks.FileType_File,
Mode: s("0600"),
}
c.AddTask(t)
}
return nil
}
// allTokens returns a map of all tokens
func (b *SecretBuilder) allTokens() (map[string]string, error) {
tokens := make(map[string]string)
ids, err := b.SecretStore.ListSecrets()
if err != nil {
return nil, err
}
for _, id := range ids {
token, err := b.SecretStore.FindSecret(id)
if err != nil {
return nil, err
}
tokens[id] = string(token.Data)
}
return tokens, nil
}

View File

@ -123,7 +123,7 @@ func (b *SysctlBuilder) Build(c *fi.ModelBuilderContext) error {
Path: "/etc/sysctl.d/99-k8s-general.conf", Path: "/etc/sysctl.d/99-k8s-general.conf",
Contents: fi.NewStringResource(strings.Join(sysctls, "\n")), Contents: fi.NewStringResource(strings.Join(sysctls, "\n")),
Type: nodetasks.FileType_File, Type: nodetasks.FileType_File,
OnChangeExecute: []string{"sysctl", "--system"}, OnChangeExecute: [][]string{{"sysctl", "--system"}},
} }
c.AddTask(t) c.AddTask(t)

View File

@ -314,6 +314,9 @@ type KubeletConfigSpec struct {
// The full path of the directory in which to search for additional third party volume plugins // The full path of the directory in which to search for additional third party volume plugins
VolumePluginDirectory string `json:"volumePluginDirectory,omitempty" flag:"volume-plugin-dir"` VolumePluginDirectory string `json:"volumePluginDirectory,omitempty" flag:"volume-plugin-dir"`
// Taints to add when registering a node in the cluster
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
} }
type KubeProxyConfig struct { type KubeProxyConfig struct {
@ -374,9 +377,12 @@ type KubeProxyConfig struct {
} }
type KubeAPIServerConfig struct { type KubeAPIServerConfig struct {
// TODO: Remove PathSrvKubernetes - unused
PathSrvKubernetes string `json:"pathSrvKubernetes,omitempty"` PathSrvKubernetes string `json:"pathSrvKubernetes,omitempty"`
PathSrvSshproxy string `json:"pathSrvSshproxy,omitempty"` // TODO: Remove PathSrvSshProxy - unused
Image string `json:"image,omitempty"` PathSrvSshproxy string `json:"pathSrvSshproxy,omitempty"`
Image string `json:"image,omitempty"`
LogLevel int32 `json:"logLevel,omitempty" flag:"v"` LogLevel int32 `json:"logLevel,omitempty" flag:"v"`
@ -387,13 +393,22 @@ type KubeAPIServerConfig struct {
EtcdServersOverrides []string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"` EtcdServersOverrides []string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"`
AdmissionControl []string `json:"admissionControl,omitempty" flag:"admission-control"` AdmissionControl []string `json:"admissionControl,omitempty" flag:"admission-control"`
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty" flag:"service-cluster-ip-range"` ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty" flag:"service-cluster-ip-range"`
ClientCAFile string `json:"clientCAFile,omitempty" flag:"client-ca-file"`
BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"` // TODO: Remove unused BasicAuthFile
TLSCertFile string `json:"tlsCertFile,omitempty" flag:"tls-cert-file"` BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"`
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty" flag:"tls-private-key-file"`
TokenAuthFile string `json:"tokenAuthFile,omitempty" flag:"token-auth-file"` // TODO: Remove unused ClientCAFile
AllowPrivileged *bool `json:"allowPrivileged,omitempty" flag:"allow-privileged"` ClientCAFile string `json:"clientCAFile,omitempty" flag:"client-ca-file"`
APIServerCount *int32 `json:"apiServerCount,omitempty" flag:"apiserver-count"` // TODO: Remove unused TLSCertFile
TLSCertFile string `json:"tlsCertFile,omitempty" flag:"tls-cert-file"`
// TODO: Remove unused TLSPrivateKeyFile
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty" flag:"tls-private-key-file"`
// TODO: Remove unused TokenAuthFile
TokenAuthFile string `json:"tokenAuthFile,omitempty" flag:"token-auth-file"`
AllowPrivileged *bool `json:"allowPrivileged,omitempty" flag:"allow-privileged"`
APIServerCount *int32 `json:"apiServerCount,omitempty" flag:"apiserver-count"`
// keys and values in RuntimeConfig are parsed into the `--runtime-config` parameter // keys and values in RuntimeConfig are parsed into the `--runtime-config` parameter
// for KubeAPIServer, concatenated with commas. ex: `--runtime-config=key1=value1,key2=value2`. // for KubeAPIServer, concatenated with commas. ex: `--runtime-config=key1=value1,key2=value2`.
// Use this to enable alpha resources on kube-apiserver // Use this to enable alpha resources on kube-apiserver
@ -442,10 +457,12 @@ type KubeControllerManagerConfig struct {
Master string `json:"master,omitempty" flag:"master"` Master string `json:"master,omitempty" flag:"master"`
LogLevel int32 `json:"logLevel,omitempty" flag:"v" flag-empty:"0"` LogLevel int32 `json:"logLevel,omitempty" flag:"v" flag-empty:"0"`
// TODO: Remove as unused
ServiceAccountPrivateKeyFile string `json:"serviceAccountPrivateKeyFile,omitempty" flag:"service-account-private-key-file"` ServiceAccountPrivateKeyFile string `json:"serviceAccountPrivateKeyFile,omitempty" flag:"service-account-private-key-file"`
Image string `json:"image,omitempty"` Image string `json:"image,omitempty"`
// TODO: Remove PathSrvKubernetes - unused
PathSrvKubernetes string `json:"pathSrvKubernetes,omitempty"` PathSrvKubernetes string `json:"pathSrvKubernetes,omitempty"`
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go // Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
@ -560,9 +577,12 @@ type KubeControllerManagerConfig struct {
// configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs
// to be configured on the cloud provider. // to be configured on the cloud provider.
ConfigureCloudRoutes *bool `json:"configureCloudRoutes,omitempty" flag:"configure-cloud-routes"` ConfigureCloudRoutes *bool `json:"configureCloudRoutes,omitempty" flag:"configure-cloud-routes"`
// TODO: Remove as unused
// rootCAFile is the root certificate authority will be included in service // rootCAFile is the root certificate authority will be included in service
// account's token secret. This must be a valid PEM-encoded CA bundle. // account's token secret. This must be a valid PEM-encoded CA bundle.
RootCAFile string `json:"rootCAFile,omitempty" flag:"root-ca-file"` RootCAFile string `json:"rootCAFile,omitempty" flag:"root-ca-file"`
//// contentType is contentType of requests sent to apiserver. //// contentType is contentType of requests sent to apiserver.
//ContentType string `json:"contentType"` //ContentType string `json:"contentType"`
//// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver. //// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.

View File

@ -22,9 +22,11 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kops/pkg/apis/kops/util"
) )
const LabelClusterName = "kops.k8s.io/cluster" const LabelClusterName = "kops.k8s.io/cluster"
const TaintNoScheduleMaster = "dedicated=master:NoSchedule"
// InstanceGroup represents a group of instances (either nodes or masters) with the same configuration // InstanceGroup represents a group of instances (either nodes or masters) with the same configuration
type InstanceGroup struct { type InstanceGroup struct {
@ -96,6 +98,9 @@ type InstanceGroupSpec struct {
// Kubelet overrides kubelet config from the ClusterSpec // Kubelet overrides kubelet config from the ClusterSpec
Kubelet *KubeletConfigSpec `json:"kubelet,omitempty"` Kubelet *KubeletConfigSpec `json:"kubelet,omitempty"`
// Taints indicates the kubernetes taints for nodes in this group
Taints []string `json:"taints,omitempty"`
} }
// PerformAssignmentsInstanceGroups populates InstanceGroups with default values // PerformAssignmentsInstanceGroups populates InstanceGroups with default values
@ -181,6 +186,11 @@ func (g *InstanceGroup) CrossValidate(cluster *Cluster, strict bool) error {
return err return err
} }
err = g.ValidateTaintsForKubeVersion(cluster)
if err != nil {
return err
}
// Check that instance groups are defined in valid zones // Check that instance groups are defined in valid zones
{ {
clusterSubnets := make(map[string]*ClusterSubnetSpec) clusterSubnets := make(map[string]*ClusterSubnetSpec)
@ -201,3 +211,19 @@ func (g *InstanceGroup) CrossValidate(cluster *Cluster, strict bool) error {
return nil return nil
} }
// Ensures that users don't try to specify custom taints on pre-1.6.0 IGs
func (g *InstanceGroup) ValidateTaintsForKubeVersion(cluster *Cluster) error {
kv, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
if err != nil {
return fmt.Errorf("Unable to determine kubernetes version from %q", cluster.Spec.KubernetesVersion)
}
if kv.Major == 1 && kv.Minor <= 5 && len(g.Spec.Taints) > 0 {
if !(g.IsMaster() && g.Spec.Taints[0] == TaintNoScheduleMaster && len(g.Spec.Taints) == 1) {
return fmt.Errorf("User-specified taints are not supported before kubernetes version 1.6.0")
}
}
return nil
}

View File

@ -16,7 +16,12 @@ limitations under the License.
package kops package kops
import "k8s.io/kops/upup/pkg/fi/utils" import (
"fmt"
"github.com/blang/semver"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/upup/pkg/fi/utils"
)
const RoleLabelName = "kubernetes.io/role" const RoleLabelName = "kubernetes.io/role"
const RoleMasterLabelValue = "master" const RoleMasterLabelValue = "master"
@ -60,5 +65,33 @@ func BuildKubeletConfigSpec(cluster *Cluster, instanceGroup *InstanceGroup) (*Ku
utils.JsonMergeStruct(c, instanceGroup.Spec.Kubelet) utils.JsonMergeStruct(c, instanceGroup.Spec.Kubelet)
} }
sv, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
if err != nil {
return c, fmt.Errorf("Failed to lookup kubernetes version: %v", err)
}
// --register-with-taints was available in the first 1.6.0 alpha, no need to rely on semver's pre/build ordering
sv.Pre = nil
sv.Build = nil
if sv.GTE(semver.Version{Major: 1, Minor: 6, Patch: 0, Pre: nil, Build: nil}) {
for i, t := range instanceGroup.Spec.Taints {
if c.Taints == nil {
c.Taints = make([]string, len(instanceGroup.Spec.Taints))
}
c.Taints[i] = t
}
// Enable scheduling since it can be controlled via taints.
// For pre-1.6.0 clusters, this is handled by tainter.go
registerSchedulable := true
c.RegisterSchedulable = &registerSchedulable
} else {
err = instanceGroup.ValidateTaintsForKubeVersion(cluster)
if err != nil {
return nil, err
}
}
return c, nil return c, nil
} }

View File

@ -20,14 +20,18 @@ import (
"testing" "testing"
) )
var taintValidationError = "User-specified taints are not supported before kubernetes version 1.6.0"
func Test_InstanceGroupKubeletMerge(t *testing.T) { func Test_InstanceGroupKubeletMerge(t *testing.T) {
var cluster = &Cluster{} var cluster = &Cluster{}
cluster.Spec.Kubelet = &KubeletConfigSpec{} cluster.Spec.Kubelet = &KubeletConfigSpec{}
cluster.Spec.Kubelet.NvidiaGPUs = 0 cluster.Spec.Kubelet.NvidiaGPUs = 0
cluster.Spec.KubernetesVersion = "1.6.0"
var instanceGroup = &InstanceGroup{} var instanceGroup = &InstanceGroup{}
instanceGroup.Spec.Kubelet = &KubeletConfigSpec{} instanceGroup.Spec.Kubelet = &KubeletConfigSpec{}
instanceGroup.Spec.Kubelet.NvidiaGPUs = 1 instanceGroup.Spec.Kubelet.NvidiaGPUs = 1
instanceGroup.Spec.Role = InstanceGroupRoleNode
var mergedKubeletSpec, err = BuildKubeletConfigSpec(cluster, instanceGroup) var mergedKubeletSpec, err = BuildKubeletConfigSpec(cluster, instanceGroup)
if err != nil { if err != nil {
@ -41,3 +45,99 @@ func Test_InstanceGroupKubeletMerge(t *testing.T) {
t.Errorf("InstanceGroup kubelet value (%d) should be reflected in merged output", instanceGroup.Spec.Kubelet.NvidiaGPUs) t.Errorf("InstanceGroup kubelet value (%d) should be reflected in merged output", instanceGroup.Spec.Kubelet.NvidiaGPUs)
} }
} }
func TestTaintsAppliedAfter160(t *testing.T) {
exp := map[string]bool{
"1.4.9": false,
"1.5.2": false,
"1.6.0-alpha.1": true,
"1.6.0": true,
"1.6.5": true,
"1.7.0": true,
}
for ver, e := range exp {
helpTestTaintsForV(t, ver, e)
}
}
func TestDefaultTaintsEnforcedBefore160(t *testing.T) {
type param struct {
ver string
role InstanceGroupRole
taints []string
shouldErr bool
}
params := []param{
{"1.5.0", InstanceGroupRoleNode, []string{TaintNoScheduleMaster}, true},
{"1.5.1", InstanceGroupRoleNode, nil, false},
{"1.5.2", InstanceGroupRoleNode, []string{}, false},
{"1.6.0", InstanceGroupRoleNode, []string{TaintNoScheduleMaster}, false},
{"1.6.1", InstanceGroupRoleNode, []string{"Foo"}, false},
}
for _, p := range params {
cluster := &Cluster{Spec: ClusterSpec{KubernetesVersion: p.ver}}
ig := &InstanceGroup{Spec: InstanceGroupSpec{
Taints: p.taints,
Role: p.role,
}}
_, err := BuildKubeletConfigSpec(cluster, ig)
if p.shouldErr {
if err == nil {
t.Fatal("Expected error building kubelet config, received nil.")
} else if err.Error() != taintValidationError {
t.Fatalf("Received an unexpected error validating taints: '%s'", err.Error())
}
} else {
if err != nil {
t.Fatalf("Received an unexpected error validating taints: '%s', params: '%v'", err.Error(), p)
}
}
}
}
func helpTestTaintsForV(t *testing.T, version string, shouldApply bool) {
cluster := &Cluster{Spec: ClusterSpec{KubernetesVersion: version}}
ig := &InstanceGroup{Spec: InstanceGroupSpec{Role: InstanceGroupRoleMaster, Taints: []string{"foo", "bar", "baz"}}}
c, err := BuildKubeletConfigSpec(cluster, ig)
var expTaints []string
if shouldApply {
expTaints = []string{"foo", "bar", "baz"}
if c.RegisterSchedulable == nil || !*c.RegisterSchedulable {
t.Fatalf("Expected RegisterSchedulable == &true, got %v", c.RegisterSchedulable)
}
if !aEqual(expTaints, c.Taints) {
t.Fatalf("Expected taints %v, got %v", expTaints, c.Taints)
}
} else if err == nil || err.Error() != taintValidationError {
t.Fatalf("Received an unexpected error: '%s'", err.Error())
}
}
func aEqual(exp, other []string) bool {
if exp == nil && other != nil {
return false
}
if exp != nil && other == nil {
return false
}
if len(exp) != len(other) {
return false
}
for i, e := range exp {
if other[i] != e {
return false
}
}
return true
}

View File

@ -313,6 +313,9 @@ type KubeletConfigSpec struct {
// The full path of the directory in which to search for additional third party volume plugins // The full path of the directory in which to search for additional third party volume plugins
VolumePluginDirectory string `json:"volumePluginDirectory,omitempty" flag:"volume-plugin-dir"` VolumePluginDirectory string `json:"volumePluginDirectory,omitempty" flag:"volume-plugin-dir"`
// Taints to add when registering a node in the cluster
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
} }
type KubeProxyConfig struct { type KubeProxyConfig struct {

View File

@ -80,4 +80,10 @@ type InstanceGroupSpec struct {
// Describes the tenancy of the instance group. Can be either default or dedicated. // Describes the tenancy of the instance group. Can be either default or dedicated.
// Currently only applies to AWS. // Currently only applies to AWS.
Tenancy string `json:"tenancy,omitempty"` Tenancy string `json:"tenancy,omitempty"`
// Kubelet overrides kubelet config from the ClusterSpec
Kubelet *KubeletConfigSpec `json:"kubelet,omitempty"`
// Taints indicates the kubernetes taints for nodes in this group
Taints []string `json:"taints,omitempty"`
} }

View File

@ -928,6 +928,16 @@ func autoConvert_v1alpha1_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
out.CloudLabels = in.CloudLabels out.CloudLabels = in.CloudLabels
out.NodeLabels = in.NodeLabels out.NodeLabels = in.NodeLabels
out.Tenancy = in.Tenancy out.Tenancy = in.Tenancy
if in.Kubelet != nil {
in, out := &in.Kubelet, &out.Kubelet
*out = new(kops.KubeletConfigSpec)
if err := Convert_v1alpha1_KubeletConfigSpec_To_kops_KubeletConfigSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Kubelet = nil
}
out.Taints = in.Taints
return nil return nil
} }
@ -946,6 +956,16 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha1_InstanceGroupSpec(in *kops.I
out.CloudLabels = in.CloudLabels out.CloudLabels = in.CloudLabels
out.NodeLabels = in.NodeLabels out.NodeLabels = in.NodeLabels
out.Tenancy = in.Tenancy out.Tenancy = in.Tenancy
if in.Kubelet != nil {
in, out := &in.Kubelet, &out.Kubelet
*out = new(KubeletConfigSpec)
if err := Convert_kops_KubeletConfigSpec_To_v1alpha1_KubeletConfigSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Kubelet = nil
}
out.Taints = in.Taints
return nil return nil
} }
@ -1237,6 +1257,7 @@ func autoConvert_v1alpha1_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *Kubele
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
out.VolumePluginDirectory = in.VolumePluginDirectory out.VolumePluginDirectory = in.VolumePluginDirectory
out.Taints = in.Taints
return nil return nil
} }
@ -1282,6 +1303,7 @@ func autoConvert_kops_KubeletConfigSpec_To_v1alpha1_KubeletConfigSpec(in *kops.K
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
out.VolumePluginDirectory = in.VolumePluginDirectory out.VolumePluginDirectory = in.VolumePluginDirectory
out.Taints = in.Taints
return nil return nil
} }

View File

@ -135,6 +135,9 @@ type KubeletConfigSpec struct {
// The full path of the directory in which to search for additional third party volume plugins // The full path of the directory in which to search for additional third party volume plugins
VolumePluginDirectory string `json:"volumePluginDirectory,omitempty" flag:"volume-plugin-dir"` VolumePluginDirectory string `json:"volumePluginDirectory,omitempty" flag:"volume-plugin-dir"`
// Taints to add when registering a node in the cluster
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
} }
type KubeProxyConfig struct { type KubeProxyConfig struct {

View File

@ -84,7 +84,13 @@ type InstanceGroupSpec struct {
// NodeLabels indicates the kubernetes labels for nodes in this group // NodeLabels indicates the kubernetes labels for nodes in this group
NodeLabels map[string]string `json:"nodeLabels,omitempty"` NodeLabels map[string]string `json:"nodeLabels,omitempty"`
// Describes the tenancy of the instance group. Can be either default or dedicated. // Describes the tenancy of the instance group. Can be either default or dedicated.
// Currently only applies to AWS. // Currently only applies to AWS.
Tenancy string `json:"tenancy,omitempty"` Tenancy string `json:"tenancy,omitempty"`
// Kubelet overrides kubelet config from the ClusterSpec
Kubelet *KubeletConfigSpec `json:"kubelet,omitempty"`
// Taints indicates the kubernetes taints for nodes in this group
Taints []string `json:"taints,omitempty"`
} }

View File

@ -1018,6 +1018,16 @@ func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
out.CloudLabels = in.CloudLabels out.CloudLabels = in.CloudLabels
out.NodeLabels = in.NodeLabels out.NodeLabels = in.NodeLabels
out.Tenancy = in.Tenancy out.Tenancy = in.Tenancy
if in.Kubelet != nil {
in, out := &in.Kubelet, &out.Kubelet
*out = new(kops.KubeletConfigSpec)
if err := Convert_v1alpha2_KubeletConfigSpec_To_kops_KubeletConfigSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Kubelet = nil
}
out.Taints = in.Taints
return nil return nil
} }
@ -1040,6 +1050,16 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.I
out.CloudLabels = in.CloudLabels out.CloudLabels = in.CloudLabels
out.NodeLabels = in.NodeLabels out.NodeLabels = in.NodeLabels
out.Tenancy = in.Tenancy out.Tenancy = in.Tenancy
if in.Kubelet != nil {
in, out := &in.Kubelet, &out.Kubelet
*out = new(KubeletConfigSpec)
if err := Convert_kops_KubeletConfigSpec_To_v1alpha2_KubeletConfigSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Kubelet = nil
}
out.Taints = in.Taints
return nil return nil
} }
@ -1335,6 +1355,7 @@ func autoConvert_v1alpha2_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *Kubele
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
out.VolumePluginDirectory = in.VolumePluginDirectory out.VolumePluginDirectory = in.VolumePluginDirectory
out.Taints = in.Taints
return nil return nil
} }
@ -1380,6 +1401,7 @@ func autoConvert_kops_KubeletConfigSpec_To_v1alpha2_KubeletConfigSpec(in *kops.K
out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod out.EvictionMaxPodGracePeriod = in.EvictionMaxPodGracePeriod
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
out.VolumePluginDirectory = in.VolumePluginDirectory out.VolumePluginDirectory = in.VolumePluginDirectory
out.Taints = in.Taints
return nil return nil
} }

View File

@ -18,17 +18,16 @@ package model
import ( import (
"k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/model/resources" "k8s.io/kops/pkg/model/resources"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup"
"text/template" "text/template"
) )
// BootstrapScript creates the bootstrap script // BootstrapScript creates the bootstrap script
type BootstrapScript struct { type BootstrapScript struct {
NodeUpSource string NodeUpSource string
NodeUpSourceHash string NodeUpSourceHash string
NodeUpConfigBuilder func(ig *kops.InstanceGroup) (*nodeup.NodeUpConfig, error) NodeUpConfigBuilder func(ig *kops.InstanceGroup) (*nodeup.NodeUpConfig, error)
} }

View File

@ -67,7 +67,7 @@ func UsesKubenet(clusterSpec *kops.ClusterSpec) (bool, error) {
// Kopeio is based on kubenet / external // Kopeio is based on kubenet / external
return true, nil return true, nil
} else { } else {
return false, fmt.Errorf("No networking mode set") return false, fmt.Errorf("no networking mode set")
} }
} }

View File

@ -108,10 +108,6 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
return fmt.Errorf("unknown cloud provider %q", clusterSpec.CloudProvider) return fmt.Errorf("unknown cloud provider %q", clusterSpec.CloudProvider)
} }
kcm.PathSrvKubernetes = "/srv/kubernetes"
kcm.RootCAFile = "/srv/kubernetes/ca.crt"
kcm.ServiceAccountPrivateKeyFile = "/srv/kubernetes/server.key"
kcm.Master = "127.0.0.1:8080" kcm.Master = "127.0.0.1:8080"
kcm.LogLevel = 2 kcm.LogLevel = 2
@ -140,7 +136,7 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
// Kopeio is based on kubenet / external // Kopeio is based on kubenet / external
kcm.ConfigureCloudRoutes = fi.Bool(true) kcm.ConfigureCloudRoutes = fi.Bool(true)
} else { } else {
return fmt.Errorf("No networking mode set") return fmt.Errorf("no networking mode set")
} }
return nil return nil

View File

@ -40,6 +40,10 @@ NODEUP_HASH={{ NodeUpSourceHash }}
function ensure-install-dir() { function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install" INSTALL_DIR="/var/cache/kubernetes-install"
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
fi
mkdir -p ${INSTALL_DIR} mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR} cd ${INSTALL_DIR}
} }
@ -122,7 +126,7 @@ function download-release() {
echo "Running nodeup" echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793 # We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8 ) ( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
} }
#################################################################################### ####################################################################################

View File

@ -73,7 +73,7 @@ func BuildEtcdManifest(c *EtcdCluster) *v1.Pod {
container.Env = append(container.Env, v1.EnvVar{Name: "ETCD_INITIAL_CLUSTER", Value: strings.Join(initialCluster, ",")}) container.Env = append(container.Env, v1.EnvVar{Name: "ETCD_INITIAL_CLUSTER", Value: strings.Join(initialCluster, ",")})
container.LivenessProbe = &v1.Probe{ container.LivenessProbe = &v1.Probe{
InitialDelaySeconds: 600, InitialDelaySeconds: 15,
TimeoutSeconds: 15, TimeoutSeconds: 15,
} }
container.LivenessProbe.HTTPGet = &v1.HTTPGetAction{ container.LivenessProbe.HTTPGet = &v1.HTTPGetAction{

View File

@ -44,9 +44,9 @@ type nodePatchSpec struct {
// Note that this is for k8s <= 1.5 only // Note that this is for k8s <= 1.5 only
const TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" const TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
// ApplyMasterTaints finds masters that have not yet been tainted, and applies the master taint // ApplyMasterTaints finds masters that have not yet been tainted, and applies the master taint.
// Once the kubelet support --taints (like --labels) this can probably go away entirely. // Once all supported kubelet versions accept the --register-with-taints flag introduced in 1.6.0, this can probably
// It also sets the unschedulable flag to false, so pods (with a toleration) can target the node // go away entirely. It also sets the unschedulable flag to false, so pods (with a toleration) can target the node
func ApplyMasterTaints(kubeContext *KubernetesContext) error { func ApplyMasterTaints(kubeContext *KubernetesContext) error {
client, err := kubeContext.KubernetesClient() client, err := kubeContext.KubernetesClient()
if err != nil { if err != nil {
@ -74,7 +74,7 @@ func ApplyMasterTaints(kubeContext *KubernetesContext) error {
nodeTaintJSON := node.Annotations[TaintsAnnotationKey] nodeTaintJSON := node.Annotations[TaintsAnnotationKey]
if nodeTaintJSON != "" { if nodeTaintJSON != "" {
if nodeTaintJSON != string(taintJSON) { if nodeTaintJSON != string(taintJSON) {
glog.Infof("Node %q had unexpected taint: %v", node.Name, nodeTaintJSON) glog.Infof("Node %q is registered with taint: %v", node.Name, nodeTaintJSON)
} }
continue continue
} }

View File

@ -58,6 +58,17 @@ func (k *VolumeMountController) mountMasterVolumes() ([]*Volume, error) {
glog.V(2).Infof("Master volume %q is attached at %q", v.ID, v.LocalDevice) glog.V(2).Infof("Master volume %q is attached at %q", v.ID, v.LocalDevice)
mountpoint := "/mnt/master-" + v.ID mountpoint := "/mnt/master-" + v.ID
// On ContainerOS, we mount to /mnt/disks instead (/mnt is readonly)
_, err := os.Stat(PathFor("/mnt/disks"))
if err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("error checking for /mnt/disks: %v", err)
}
} else {
mountpoint = "/mnt/disks/master-" + v.ID
}
glog.Infof("Doing safe-format-and-mount of %s to %s", v.LocalDevice, mountpoint) glog.Infof("Doing safe-format-and-mount of %s to %s", v.LocalDevice, mountpoint)
fstype := "" fstype := ""
err = k.safeFormatAndMount(v.LocalDevice, mountpoint, fstype) err = k.safeFormatAndMount(v.LocalDevice, mountpoint, fstype)

View File

@ -37,7 +37,7 @@ spec:
host: 127.0.0.1 host: 127.0.0.1
path: /health path: /health
port: 4001 port: 4001
initialDelaySeconds: 600 initialDelaySeconds: 15
timeoutSeconds: 15 timeoutSeconds: 15
name: etcd-container name: etcd-container
ports: ports:

View File

@ -62,6 +62,8 @@ spec:
maxSize: 1 maxSize: 1
minSize: 1 minSize: 1
role: Master role: Master
taints:
- dedicated=master:NoSchedule
zones: zones:
- us-test-1a - us-test-1a
@ -80,6 +82,8 @@ spec:
maxSize: 1 maxSize: 1
minSize: 1 minSize: 1
role: Master role: Master
taints:
- dedicated=master:NoSchedule
zones: zones:
- us-test-1b - us-test-1b
@ -98,6 +102,8 @@ spec:
maxSize: 1 maxSize: 1
minSize: 1 minSize: 1
role: Master role: Master
taints:
- dedicated=master:NoSchedule
zones: zones:
- us-test-1c - us-test-1c

View File

@ -72,6 +72,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1a - us-test-1a
taints:
- dedicated=master:NoSchedule
--- ---
@ -90,6 +92,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1b - us-test-1b
taints:
- dedicated=master:NoSchedule
--- ---
@ -108,6 +112,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1c - us-test-1c
taints:
- dedicated=master:NoSchedule
--- ---

View File

@ -76,6 +76,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1a - us-test-1a
taints:
- dedicated=master:NoSchedule
--- ---
@ -94,6 +96,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1a - us-test-1a
taints:
- dedicated=master:NoSchedule
--- ---
@ -112,6 +116,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1a - us-test-1a
taints:
- dedicated=master:NoSchedule
--- ---
@ -130,6 +136,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1b - us-test-1b
taints:
- dedicated=master:NoSchedule
--- ---
@ -148,6 +156,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1b - us-test-1b
taints:
- dedicated=master:NoSchedule
--- ---

View File

@ -50,6 +50,8 @@ spec:
maxSize: 1 maxSize: 1
minSize: 1 minSize: 1
role: Master role: Master
taints:
- dedicated=master:NoSchedule
zones: zones:
- us-test-1a - us-test-1a

View File

@ -56,6 +56,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1a - us-test-1a
taints:
- dedicated=master:NoSchedule
--- ---

View File

@ -74,6 +74,8 @@ spec:
maxSize: 1 maxSize: 1
minSize: 1 minSize: 1
role: Master role: Master
taints:
- dedicated=master:NoSchedule
zones: zones:
- us-test-1a - us-test-1a

View File

@ -82,6 +82,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1a - us-test-1a
taints:
- dedicated=master:NoSchedule
--- ---

View File

@ -80,6 +80,8 @@ spec:
maxSize: 1 maxSize: 1
minSize: 1 minSize: 1
role: Master role: Master
taints:
- dedicated=master:NoSchedule
zones: zones:
- us-test-1a - us-test-1a

View File

@ -88,6 +88,8 @@ spec:
role: Master role: Master
subnets: subnets:
- us-test-1a - us-test-1a
taints:
- dedicated=master:NoSchedule
--- ---

File diff suppressed because one or more lines are too long

View File

@ -1,18 +1,11 @@
KubeAPIServer: KubeAPIServer:
SecurePort: 443 SecurePort: 443
PathSrvKubernetes: /srv/kubernetes
PathSrvSshproxy: /srv/sshproxy
Address: 127.0.0.1 Address: 127.0.0.1
EtcdServers: EtcdServers:
- http://127.0.0.1:4001 - http://127.0.0.1:4001
EtcdServersOverrides: EtcdServersOverrides:
- /events#http://127.0.0.1:4002 - /events#http://127.0.0.1:4002
ServiceClusterIPRange: {{ .ServiceClusterIPRange }} ServiceClusterIPRange: {{ .ServiceClusterIPRange }}
ClientCAFile: /srv/kubernetes/ca.crt
BasicAuthFile: /srv/kubernetes/basic_auth.csv
TLSCertFile: /srv/kubernetes/server.cert
TLSPrivateKeyFile: /srv/kubernetes/server.key
TokenAuthFile: /srv/kubernetes/known_tokens.csv
LogLevel: 2 LogLevel: 2
AllowPrivileged: true AllowPrivileged: true
Image: {{ Image "kube-apiserver" }} Image: {{ Image "kube-apiserver" }}

View File

@ -1 +0,0 @@
{{ CACertificatePool.AsString }}

View File

@ -1 +0,0 @@
{{ (Certificate "master").AsString }}

View File

@ -1 +0,0 @@
{{ (PrivateKey "master").AsString }}

View File

@ -1,68 +0,0 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server-events",
"namespace": "kube-system",
"labels": {
"k8s-app" : "etcd-server-events"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "gcr.io/google_containers/etcd:2.2.1",
"resources": {
"requests": {
"cpu": "100m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2381 --addr 127.0.0.1:4002 --bind-addr 127.0.0.1:4002 --data-dir /var/etcd/data-events 1>>/var/log/etcd-events.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 4002,
"path": "/health"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"ports":[
{ "name": "serverport",
"containerPort": 2381,
"hostPort": 2381
},{
"name": "clientport",
"containerPort": 4002,
"hostPort": 4002
}
],
"volumeMounts": [
{"name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{"name": "varlogetcd",
"mountPath": "/var/log/etcd-events.log",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd-events.log"}
}
]
}}

View File

@ -1,68 +0,0 @@
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server",
"namespace": "kube-system",
"labels": {
"k8s-app" : "etcd-server"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "gcr.io/google_containers/etcd:2.2.1",
"resources": {
"requests": {
"cpu": "200m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2380 --addr 127.0.0.1:4001 --bind-addr 127.0.0.1:4001 --data-dir /var/etcd/data 1>>/var/log/etcd.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 4001,
"path": "/health"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"ports":[
{ "name": "serverport",
"containerPort": 2380,
"hostPort": 2380
},{
"name": "clientport",
"containerPort": 4001,
"hostPort": 4001
}
],
"volumeMounts": [
{"name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{"name": "varlogetcd",
"mountPath": "/var/log/etcd.log",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd.log"}
}
]
}}

View File

@ -1,3 +0,0 @@
{
"ifNotExists": true
}

View File

@ -1,3 +0,0 @@
{{ range $id, $token := AllTokens }}
{{ $token }},{{ $id }},{{ $id }}
{{ end }}

View File

@ -1 +0,0 @@
DAEMON_ARGS="{{ BuildFlags ProtokubeFlags }}"

View File

@ -1,15 +0,0 @@
[Unit]
Description=Kubernetes Protokube Service
Documentation=https://github.com/kubernetes/kube-deploy/protokube
After=docker.service
[Service]
EnvironmentFile=/etc/sysconfig/protokube
ExecStartPre={{ ProtokubeImagePullCommand }}
ExecStart=/usr/bin/docker run -v /:/rootfs/ -v /var/run/dbus:/var/run/dbus -v /run/systemd:/run/systemd --net=host --privileged {{ ProtokubeImageName }} /usr/bin/protokube "$DAEMON_ARGS"
Restart=always
RestartSec=2s
StartLimitInterval=0
[Install]
WantedBy=multi-user.target

View File

@ -1,3 +0,0 @@
{
"mode": "0755"
}

View File

@ -30,6 +30,7 @@ import (
"k8s.io/kops/pkg/apis/kops/registry" "k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/util" "k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation" "k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/client/simple" "k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/featureflag" "k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model" "k8s.io/kops/pkg/model"
@ -44,7 +45,6 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks" "k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform" "k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/fitasks" "k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/upup/pkg/fi/nodeup"
"k8s.io/kops/util/pkg/hashing" "k8s.io/kops/util/pkg/hashing"
"k8s.io/kops/util/pkg/vfs" "k8s.io/kops/util/pkg/vfs"
) )

View File

@ -53,7 +53,7 @@ func buildCloudupTags(cluster *api.Cluster) (sets.String, error) {
// TODO combine with External // TODO combine with External
tags.Insert("_networking_kubenet", "_networking_external") tags.Insert("_networking_kubenet", "_networking_external")
} else { } else {
return nil, fmt.Errorf("No networking mode set") return nil, fmt.Errorf("no networking mode set")
} }
switch cluster.Spec.CloudProvider { switch cluster.Spec.CloudProvider {
@ -116,9 +116,6 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
case api.InstanceGroupRoleNode: case api.InstanceGroupRoleNode:
tags.Insert("_kubernetes_pool") tags.Insert("_kubernetes_pool")
// TODO: Should we run _protokube on the nodes?
tags.Insert("_protokube")
case api.InstanceGroupRoleMaster: case api.InstanceGroupRoleMaster:
tags.Insert("_kubernetes_master") tags.Insert("_kubernetes_master")
@ -127,8 +124,6 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
tags.Insert("_kubernetes_pool") tags.Insert("_kubernetes_pool")
} }
tags.Insert("_protokube")
case api.InstanceGroupRoleBastion: case api.InstanceGroupRoleBastion:
// No tags // No tags
@ -136,12 +131,6 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
return nil, fmt.Errorf("Unrecognized role: %v", role) return nil, fmt.Errorf("Unrecognized role: %v", role)
} }
// TODO: Replace with list of CNI plugins ?
if usesCNI(cluster) {
tags.Insert("_cni_bridge", "_cni_host_local", "_cni_loopback", "_cni_ptp", "_cni_flannel")
//tags.Insert("_cni_tuning")
}
switch fi.StringValue(cluster.Spec.UpdatePolicy) { switch fi.StringValue(cluster.Spec.UpdatePolicy) {
case "": // default case "": // default
tags.Insert("_automatic_upgrades") tags.Insert("_automatic_upgrades")

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kops/nodeup/pkg/model" "k8s.io/kops/nodeup/pkg/model"
api "k8s.io/kops/pkg/apis/kops" api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry" "k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit" "k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
"k8s.io/kops/upup/pkg/fi/nodeup/local" "k8s.io/kops/upup/pkg/fi/nodeup/local"
@ -44,7 +45,7 @@ import (
const MaxTaskDuration = 365 * 24 * time.Hour const MaxTaskDuration = 365 * 24 * time.Hour
type NodeUpCommand struct { type NodeUpCommand struct {
config *NodeUpConfig config *nodeup.NodeUpConfig
cluster *api.Cluster cluster *api.Cluster
instanceGroup *api.InstanceGroup instanceGroup *api.InstanceGroup
ConfigLocation string ConfigLocation string
@ -195,6 +196,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
} }
modelContext := &model.NodeupModelContext{ modelContext := &model.NodeupModelContext{
NodeupConfig: c.config,
Cluster: c.cluster, Cluster: c.cluster,
Distribution: distribution, Distribution: distribution,
Architecture: model.ArchitectureAmd64, Architecture: model.ArchitectureAmd64,
@ -207,12 +209,18 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
} }
loader := NewLoader(c.config, c.cluster, assets, nodeTags) loader := NewLoader(c.config, c.cluster, assets, nodeTags)
loader.Builders = append(loader.Builders, &model.DirectoryBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.DockerBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.DockerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.ProtokubeBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.CloudConfigBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.CloudConfigBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeletBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.KubeletBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubectlBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.KubectlBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.EtcdBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.EtcdBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.LogrotateBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.LogrotateBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.PackagesBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.SecretBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.FirewallBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.NetworkBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.SysctlBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.SysctlBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeAPIServerBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.KubeAPIServerBuilder{NodeupModelContext: modelContext})
loader.Builders = append(loader.Builders, &model.KubeControllerManagerBuilder{NodeupModelContext: modelContext}) loader.Builders = append(loader.Builders, &model.KubeControllerManagerBuilder{NodeupModelContext: modelContext})

View File

@ -23,6 +23,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kops/pkg/apis/kops" api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/loader" "k8s.io/kops/upup/pkg/fi/loader"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
@ -35,7 +36,7 @@ type Loader struct {
Builders []fi.ModelBuilder Builders []fi.ModelBuilder
templates []*template.Template templates []*template.Template
config *NodeUpConfig config *nodeup.NodeUpConfig
cluster *api.Cluster cluster *api.Cluster
assets *fi.AssetStore assets *fi.AssetStore
@ -45,7 +46,7 @@ type Loader struct {
TemplateFunctions template.FuncMap TemplateFunctions template.FuncMap
} }
func NewLoader(config *NodeUpConfig, cluster *api.Cluster, assets *fi.AssetStore, tags sets.String) *Loader { func NewLoader(config *nodeup.NodeUpConfig, cluster *api.Cluster, assets *fi.AssetStore, tags sets.String) *Loader {
l := &Loader{} l := &Loader{}
l.assets = assets l.assets = assets
l.tasks = make(map[string]fi.Task) l.tasks = make(map[string]fi.Task)

View File

@ -42,7 +42,7 @@ type File struct {
Mode *string `json:"mode,omitempty"` Mode *string `json:"mode,omitempty"`
IfNotExists bool `json:"ifNotExists,omitempty"` IfNotExists bool `json:"ifNotExists,omitempty"`
OnChangeExecute []string `json:"onChangeExecute,omitempty"` OnChangeExecute [][]string `json:"onChangeExecute,omitempty"`
Symlink *string `json:"symlink,omitempty"` Symlink *string `json:"symlink,omitempty"`
Owner *string `json:"owner,omitempty"` Owner *string `json:"owner,omitempty"`
@ -96,6 +96,23 @@ func (f *File) GetDependencies(tasks map[string]fi.Task) []fi.Task {
} }
} }
// Files depend on parent directories
for _, v := range tasks {
dir, ok := v.(*File)
if !ok {
continue
}
if dir.Type == FileType_Directory {
dirPath := dir.Path
if !strings.HasSuffix(dirPath, "/") {
dirPath += "/"
}
if strings.HasPrefix(f.Path, dirPath) {
deps = append(deps, v)
}
}
}
return deps return deps
} }
@ -261,15 +278,16 @@ func (_ *File) RenderLocal(t *local.LocalTarget, a, e, changes *File) error {
} }
if changed && e.OnChangeExecute != nil { if changed && e.OnChangeExecute != nil {
args := e.OnChangeExecute for _, args := range e.OnChangeExecute {
human := strings.Join(args, " ") human := strings.Join(args, " ")
glog.Infof("Changed; will execute OnChangeExecute command: %q", human) glog.Infof("Changed; will execute OnChangeExecute command: %q", human)
cmd := exec.Command(args[0], args[1:]...) cmd := exec.Command(args[0], args[1:]...)
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
if err != nil { if err != nil {
return fmt.Errorf("error executing command %q: %v\nOutput: %s", human, err, output) return fmt.Errorf("error executing command %q: %v\nOutput: %s", human, err, output)
}
} }
} }
@ -303,7 +321,8 @@ func (_ *File) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *File
} }
if e.OnChangeExecute != nil { if e.OnChangeExecute != nil {
t.AddCommand(cloudinit.Always, e.OnChangeExecute...) return fmt.Errorf("OnChangeExecute not supported with CloudInit")
//t.AddCommand(cloudinit.Always, e.OnChangeExecute...)
} }
return nil return nil

View File

@ -42,6 +42,8 @@ const (
centosSystemdSystemPath = "/usr/lib/systemd/system" centosSystemdSystemPath = "/usr/lib/systemd/system"
coreosSystemdSystemPath = "/etc/systemd/system" coreosSystemdSystemPath = "/etc/systemd/system"
containerosSystemdSystemPath = "/etc/systemd/system"
) )
type Service struct { type Service struct {
@ -146,6 +148,8 @@ func (e *Service) systemdSystemPath(target tags.HasTags) (string, error) {
return centosSystemdSystemPath, nil return centosSystemdSystemPath, nil
} else if target.HasTag("_coreos") { } else if target.HasTag("_coreos") {
return coreosSystemdSystemPath, nil return coreosSystemdSystemPath, nil
} else if target.HasTag("_containeros") {
return containerosSystemdSystemPath, nil
} else { } else {
return "", fmt.Errorf("unsupported systemd system") return "", fmt.Errorf("unsupported systemd system")
} }

View File

@ -1,32 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodeup
type ProtokubeFlags struct {
Master *bool `json:"master,omitempty" flag:"master"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
Zone []string `json:"zone,omitempty" flag:"zone"`
Channels []string `json:"channels,omitempty" flag:"channels"`
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
}

View File

@ -20,13 +20,12 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"runtime" "runtime"
"strings"
"text/template" "text/template"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kops"
api "k8s.io/kops/pkg/apis/kops" api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/secrets" "k8s.io/kops/upup/pkg/fi/secrets"
@ -37,7 +36,7 @@ const TagMaster = "_kubernetes_master"
// templateFunctions is a simple helper-class for the functions accessible to templates // templateFunctions is a simple helper-class for the functions accessible to templates
type templateFunctions struct { type templateFunctions struct {
nodeupConfig *NodeUpConfig nodeupConfig *nodeup.NodeUpConfig
// cluster is populated with the current cluster // cluster is populated with the current cluster
cluster *api.Cluster cluster *api.Cluster
@ -53,7 +52,7 @@ type templateFunctions struct {
} }
// newTemplateFunctions is the constructor for templateFunctions // newTemplateFunctions is the constructor for templateFunctions
func newTemplateFunctions(nodeupConfig *NodeUpConfig, cluster *api.Cluster, instanceGroup *api.InstanceGroup, tags sets.String) (*templateFunctions, error) { func newTemplateFunctions(nodeupConfig *nodeup.NodeUpConfig, cluster *api.Cluster, instanceGroup *api.InstanceGroup, tags sets.String) (*templateFunctions, error) {
t := &templateFunctions{ t := &templateFunctions{
nodeupConfig: nodeupConfig, nodeupConfig: nodeupConfig,
cluster: cluster, cluster: cluster,
@ -93,11 +92,9 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
return runtime.GOARCH return runtime.GOARCH
} }
dest["CACertificatePool"] = t.CACertificatePool
dest["CACertificate"] = t.CACertificate dest["CACertificate"] = t.CACertificate
dest["PrivateKey"] = t.PrivateKey dest["PrivateKey"] = t.PrivateKey
dest["Certificate"] = t.Certificate dest["Certificate"] = t.Certificate
dest["AllTokens"] = t.AllTokens
dest["GetToken"] = t.GetToken dest["GetToken"] = t.GetToken
dest["BuildFlags"] = flagbuilder.BuildFlags dest["BuildFlags"] = flagbuilder.BuildFlags
@ -123,31 +120,6 @@ func (t *templateFunctions) populate(dest template.FuncMap) {
dest["ClusterName"] = func() string { dest["ClusterName"] = func() string {
return t.cluster.ObjectMeta.Name return t.cluster.ObjectMeta.Name
} }
dest["ProtokubeImageName"] = t.ProtokubeImageName
dest["ProtokubeImagePullCommand"] = t.ProtokubeImagePullCommand
dest["ProtokubeFlags"] = t.ProtokubeFlags
}
// CACertificatePool returns the set of valid CA certificates for the cluster
func (t *templateFunctions) CACertificatePool() (*fi.CertificatePool, error) {
if t.keyStore != nil {
return t.keyStore.CertificatePool(fi.CertificateId_CA)
}
// Fallback to direct properties
glog.Infof("Falling back to direct configuration for keystore")
cert, err := t.CACertificate()
if err != nil {
return nil, err
}
if cert == nil {
return nil, fmt.Errorf("CA certificate not found (with fallback)")
}
pool := &fi.CertificatePool{}
pool.Primary = cert
return pool, nil
} }
// CACertificate returns the primary CA certificate for the cluster // CACertificate returns the primary CA certificate for the cluster
@ -165,23 +137,6 @@ func (t *templateFunctions) Certificate(id string) (*fi.Certificate, error) {
return t.keyStore.Cert(id) return t.keyStore.Cert(id)
} }
// AllTokens returns a map of all tokens
func (t *templateFunctions) AllTokens() (map[string]string, error) {
tokens := make(map[string]string)
ids, err := t.secretStore.ListSecrets()
if err != nil {
return nil, err
}
for _, id := range ids {
token, err := t.secretStore.FindSecret(id)
if err != nil {
return nil, err
}
tokens[id] = string(token.Data)
}
return tokens, nil
}
// GetToken returns the specified token // GetToken returns the specified token
func (t *templateFunctions) GetToken(key string) (string, error) { func (t *templateFunctions) GetToken(key string) (string, error) {
token, err := t.secretStore.FindSecret(key) token, err := t.secretStore.FindSecret(key)
@ -194,36 +149,6 @@ func (t *templateFunctions) GetToken(key string) (string, error) {
return string(token.Data), nil return string(token.Data), nil
} }
// ProtokubeImageName returns the docker image for protokube
func (t *templateFunctions) ProtokubeImageName() string {
name := ""
if t.nodeupConfig.ProtokubeImage != nil && t.nodeupConfig.ProtokubeImage.Name != "" {
name = t.nodeupConfig.ProtokubeImage.Name
}
if name == "" {
// use current default corresponding to this version of nodeup
name = kops.DefaultProtokubeImageName()
}
return name
}
// ProtokubeImagePullCommand returns the command to pull the image
func (t *templateFunctions) ProtokubeImagePullCommand() string {
source := ""
if t.nodeupConfig.ProtokubeImage != nil {
source = t.nodeupConfig.ProtokubeImage.Source
}
if source == "" {
// Nothing to pull; return dummy value
return "/bin/true"
}
if strings.HasPrefix(source, "http:") || strings.HasPrefix(source, "https:") || strings.HasPrefix(source, "s3:") {
// We preloaded the image; return a dummy value
return "/bin/true"
}
return "/usr/bin/docker pull " + t.nodeupConfig.ProtokubeImage.Source
}
// IsMaster returns true if we are tagged as a master // IsMaster returns true if we are tagged as a master
func (t *templateFunctions) isMaster() bool { func (t *templateFunctions) isMaster() bool {
return t.hasTag(TagMaster) return t.hasTag(TagMaster)
@ -235,53 +160,6 @@ func (t *templateFunctions) hasTag(tag string) bool {
return found return found
} }
// ProtokubeFlags returns the flags object for protokube
func (t *templateFunctions) ProtokubeFlags() *ProtokubeFlags {
f := &ProtokubeFlags{}
master := t.isMaster()
f.Master = fi.Bool(master)
if master {
f.Channels = t.nodeupConfig.Channels
}
f.LogLevel = fi.Int32(4)
f.Containerized = fi.Bool(true)
zone := t.cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
f.Zone = append(f.Zone, zone)
} else {
// match by id
f.Zone = append(f.Zone, "*/"+zone)
}
} else {
glog.Warningf("DNSZone not specified; protokube won't be able to update DNS")
// TODO: Should we permit wildcard updates if zone is not specified?
//argv = append(argv, "--zone=*/*")
}
if t.cluster.Spec.CloudProvider != "" {
f.Cloud = fi.String(t.cluster.Spec.CloudProvider)
switch fi.CloudProviderID(t.cluster.Spec.CloudProvider) {
case fi.CloudProviderAWS:
f.DNSProvider = fi.String("aws-route53")
case fi.CloudProviderGCE:
f.DNSProvider = fi.String("google-clouddns")
default:
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider")
}
}
f.DNSInternalSuffix = fi.String(".internal." + t.cluster.ObjectMeta.Name)
return f
}
// KubeProxyConfig builds the KubeProxyConfig configuration object // KubeProxyConfig builds the KubeProxyConfig configuration object
func (t *templateFunctions) KubeProxyConfig() *api.KubeProxyConfig { func (t *templateFunctions) KubeProxyConfig() *api.KubeProxyConfig {
config := &api.KubeProxyConfig{} config := &api.KubeProxyConfig{}

View File

@ -203,22 +203,33 @@ func (c *RollingUpdateCluster) RollingUpdate(groups map[string]*CloudInstanceGro
{ {
var wg sync.WaitGroup var wg sync.WaitGroup
for k, nodeGroup := range nodeGroups { // We run nodes in series, even if they are in separate instance groups
wg.Add(1) // typically they will not being separate instance groups. If you roll the nodes in parallel
go func(k string, group *CloudInstanceGroup) { // you can get into a scenario where you can evict multiple statefulset pods from the same
// statefulset at the same time. Further improvements needs to be made to protect from this as
// well.
wg.Add(1)
go func() {
for k := range nodeGroups {
resultsMutex.Lock() resultsMutex.Lock()
results[k] = fmt.Errorf("function panic nodes") results[k] = fmt.Errorf("function panic nodes")
resultsMutex.Unlock() resultsMutex.Unlock()
}
defer wg.Done() defer wg.Done()
for k, group := range nodeGroups {
err := group.RollingUpdate(c, instanceGroups, false, c.NodeInterval) err := group.RollingUpdate(c, instanceGroups, false, c.NodeInterval)
resultsMutex.Lock() resultsMutex.Lock()
results[k] = err results[k] = err
resultsMutex.Unlock() resultsMutex.Unlock()
}(k, nodeGroup)
} // TODO: Bail on error?
}
}()
wg.Wait() wg.Wait()
} }