mirror of https://github.com/kubernetes/kops.git
Merge branch 'master' of https://github.com/kubernetes/kops into feature/extend_PR_398
This commit is contained in:
commit
968cf784a9
|
|
@ -274,3 +274,15 @@
|
||||||
[submodule "_vendor/github.com/golang/protobuf"]
|
[submodule "_vendor/github.com/golang/protobuf"]
|
||||||
path = _vendor/github.com/golang/protobuf
|
path = _vendor/github.com/golang/protobuf
|
||||||
url = https://github.com/golang/protobuf
|
url = https://github.com/golang/protobuf
|
||||||
|
[submodule "_vendor/github.com/vmware/govmomi"]
|
||||||
|
path = _vendor/github.com/vmware/govmomi
|
||||||
|
url = https://github.com/vmware/govmomi.git
|
||||||
|
[submodule "_vendor/github.com/coreos/go-semver"]
|
||||||
|
path = _vendor/github.com/coreos/go-semver
|
||||||
|
url = https://github.com/coreos/go-semver.git
|
||||||
|
[submodule "_vendor/github.com/miekg/coredns"]
|
||||||
|
path = _vendor/github.com/miekg/coredns
|
||||||
|
url = https://github.com/miekg/coredns.git
|
||||||
|
[submodule "_vendor/github.com/miekg/dns"]
|
||||||
|
path = _vendor/github.com/miekg/dns
|
||||||
|
url = https://github.com/miekg/dns.git
|
||||||
|
|
|
||||||
9
Makefile
9
Makefile
|
|
@ -31,8 +31,8 @@ MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))"))
|
||||||
# Keep in sync with upup/models/cloudup/resources/addons/dns-controller/
|
# Keep in sync with upup/models/cloudup/resources/addons/dns-controller/
|
||||||
DNS_CONTROLLER_TAG=1.6.0
|
DNS_CONTROLLER_TAG=1.6.0
|
||||||
|
|
||||||
KOPS_RELEASE_VERSION=1.6.0-alpha.1
|
KOPS_RELEASE_VERSION=1.6.0-alpha.2
|
||||||
KOPS_CI_VERSION=1.6.0-alpha.2
|
KOPS_CI_VERSION=1.6.0-alpha.3
|
||||||
|
|
||||||
GITSHA := $(shell cd ${GOPATH_1ST}/src/k8s.io/kops; git describe --always)
|
GITSHA := $(shell cd ${GOPATH_1ST}/src/k8s.io/kops; git describe --always)
|
||||||
|
|
||||||
|
|
@ -111,7 +111,8 @@ test:
|
||||||
go test k8s.io/kops/protokube/... -args -v=1 -logtostderr
|
go test k8s.io/kops/protokube/... -args -v=1 -logtostderr
|
||||||
go test k8s.io/kops/dns-controller/pkg/... -args -v=1 -logtostderr
|
go test k8s.io/kops/dns-controller/pkg/... -args -v=1 -logtostderr
|
||||||
go test k8s.io/kops/cmd/... -args -v=1 -logtostderr
|
go test k8s.io/kops/cmd/... -args -v=1 -logtostderr
|
||||||
go test k8s.io/kops/tests/... -args -v=1 -logtostderr
|
go test k8s.io/kops/cmd/... -args -v=1 -logtostderr
|
||||||
|
go test k8s.io/kops/channels/... -args -v=1 -logtostderr
|
||||||
go test k8s.io/kops/util/... -args -v=1 -logtostderr
|
go test k8s.io/kops/util/... -args -v=1 -logtostderr
|
||||||
|
|
||||||
crossbuild-nodeup:
|
crossbuild-nodeup:
|
||||||
|
|
@ -174,7 +175,7 @@ gcs-publish-ci: gcs-upload
|
||||||
gsutil -h "Cache-Control:private, max-age=0, no-transform" cp .build/upload/${LATEST_FILE} ${GCS_LOCATION}
|
gsutil -h "Cache-Control:private, max-age=0, no-transform" cp .build/upload/${LATEST_FILE} ${GCS_LOCATION}
|
||||||
|
|
||||||
gen-cli-docs:
|
gen-cli-docs:
|
||||||
KOPS_STATE_STORE= kops genhelpdocs --out docs/cli
|
@kops genhelpdocs --out docs/cli
|
||||||
|
|
||||||
# Will always push a linux-based build up to the server
|
# Will always push a linux-based build up to the server
|
||||||
push: crossbuild-nodeup
|
push: crossbuild-nodeup
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 5e3acbb5668c4c3deb4842615c4098eb61fb6b1e
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 757f49d8ff2687d289468b00835f360614357252
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 25ac7f171497271bc74ad3c6b5e1f86b4bab54fa
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 2d7d7b3702fddc23a76ac283c3a3d56bb0375e62
|
||||||
|
|
@ -15,6 +15,7 @@ spec:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: cluster-autoscaler
|
k8s-app: cluster-autoscaler
|
||||||
annotations:
|
annotations:
|
||||||
|
# For 1.6, we keep the old tolerations in case of a downgrade to 1.5
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated", "value":"master"}]'
|
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated", "value":"master"}]'
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
|
|
@ -43,4 +44,7 @@ spec:
|
||||||
hostPath:
|
hostPath:
|
||||||
path: {{SSL_CERT_PATH}}
|
path: {{SSL_CERT_PATH}}
|
||||||
nodeSelector:
|
nodeSelector:
|
||||||
kubernetes.io/role: master
|
node-role.kubernetes.io/master: ""
|
||||||
|
tolerations:
|
||||||
|
- key: "node-role.kubernetes.io/master"
|
||||||
|
effect: NoSchedule
|
||||||
|
|
|
||||||
|
|
@ -14,9 +14,7 @@ The project is created by wearemolecule, and maintained at
|
||||||
### Deploy To Cluster
|
### Deploy To Cluster
|
||||||
|
|
||||||
```
|
```
|
||||||
# Version 1.2.0
|
kubectl apply -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/route53-mapper/v1.3.0.yml
|
||||||
# https://github.com/wearemolecule/route53-kubernetes/tree/v1.2.0
|
|
||||||
$ kubectl apply -f https://raw.githubusercontent.com/kubernetes/kops/master/addons/route53-mapper/v1.2.0.yml
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Important:**
|
**Important:**
|
||||||
|
|
|
||||||
|
|
@ -7,3 +7,7 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: route53-mapper.addons.k8s.io
|
k8s-addon: route53-mapper.addons.k8s.io
|
||||||
manifest: v1.2.0.yaml
|
manifest: v1.2.0.yaml
|
||||||
|
- version: 1.3.0
|
||||||
|
selector:
|
||||||
|
k8s-addon: route53-mapper.addons.k8s.io
|
||||||
|
manifest: v1.3.0.yaml
|
||||||
|
|
@ -0,0 +1,26 @@
|
||||||
|
---
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: route53-mapper
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app: route53-mapper
|
||||||
|
k8s-addon: route53-mapper.addons.k8s.io
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: route53-mapper
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: route53-mapper
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated", "value":"master"}]'
|
||||||
|
spec:
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/role: master
|
||||||
|
containers:
|
||||||
|
- image: quay.io/molecule/route53-kubernetes:v1.3.0
|
||||||
|
name: route53-mapper
|
||||||
|
|
@ -18,16 +18,14 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"k8s.io/kops/channels/pkg/cmd"
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
Execute()
|
f := &cmd.DefaultFactory{}
|
||||||
}
|
if err := cmd.Execute(f, os.Stdout); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "\n%v\n", err)
|
||||||
// exitWithError will terminate execution with an error result
|
os.Exit(1)
|
||||||
// It prints the error to stderr and exits with a non-zero exit code
|
}
|
||||||
func exitWithError(err error) {
|
|
||||||
fmt.Fprintf(os.Stderr, "\n%v\n", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -47,4 +47,15 @@ type AddonSpec struct {
|
||||||
|
|
||||||
// Manifest is the URL to the manifest that should be applied
|
// Manifest is the URL to the manifest that should be applied
|
||||||
Manifest *string `json:"manifest,omitempty"`
|
Manifest *string `json:"manifest,omitempty"`
|
||||||
|
|
||||||
|
// KubernetesVersion is a semver version range on which this version of the addon can be applied
|
||||||
|
KubernetesVersion string `json:"kubernetesVersion,omitempty"`
|
||||||
|
|
||||||
|
// Id is an optional value which can be used to force a refresh even if the Version matches
|
||||||
|
// This is useful for when we have two manifests expressing the same addon version for two
|
||||||
|
// different kubernetes api versions. For example, we might label the 1.5 version "k8s-1.5"
|
||||||
|
// and the 1.6 version "k8s-1.6". Both would have the same Version, determined by the
|
||||||
|
// version of the software we are packaging. But we always want to reinstall when we
|
||||||
|
// switch kubernetes versions.
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -25,6 +25,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Addon is a wrapper around a single version of an addon
|
||||||
type Addon struct {
|
type Addon struct {
|
||||||
Name string
|
Name string
|
||||||
ChannelName string
|
ChannelName string
|
||||||
|
|
@ -32,16 +33,42 @@ type Addon struct {
|
||||||
Spec *api.AddonSpec
|
Spec *api.AddonSpec
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddonUpdate holds data about a proposed update to an addon
|
||||||
type AddonUpdate struct {
|
type AddonUpdate struct {
|
||||||
Name string
|
Name string
|
||||||
ExistingVersion *ChannelVersion
|
ExistingVersion *ChannelVersion
|
||||||
NewVersion *ChannelVersion
|
NewVersion *ChannelVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddonMenu is a collection of addons, with helpers for computing the latest versions
|
||||||
|
type AddonMenu struct {
|
||||||
|
Addons map[string]*Addon
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAddonMenu() *AddonMenu {
|
||||||
|
return &AddonMenu{
|
||||||
|
Addons: make(map[string]*Addon),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *AddonMenu) MergeAddons(o *AddonMenu) {
|
||||||
|
for k, v := range o.Addons {
|
||||||
|
existing := m.Addons[k]
|
||||||
|
if existing == nil {
|
||||||
|
m.Addons[k] = v
|
||||||
|
} else {
|
||||||
|
if existing.ChannelVersion().replaces(v.ChannelVersion()) {
|
||||||
|
m.Addons[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (a *Addon) ChannelVersion() *ChannelVersion {
|
func (a *Addon) ChannelVersion() *ChannelVersion {
|
||||||
return &ChannelVersion{
|
return &ChannelVersion{
|
||||||
Channel: &a.ChannelName,
|
Channel: &a.ChannelName,
|
||||||
Version: a.Spec.Version,
|
Version: a.Spec.Version,
|
||||||
|
Id: a.Spec.Id,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -67,7 +94,7 @@ func (a *Addon) GetRequiredUpdates(k8sClient kubernetes.Interface) (*AddonUpdate
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if existingVersion != nil && !newVersion.Replaces(existingVersion) {
|
if existingVersion != nil && !newVersion.replaces(existingVersion) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,7 @@ package channels
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/blang/semver"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kops/channels/pkg/api"
|
"k8s.io/kops/channels/pkg/api"
|
||||||
"k8s.io/kops/upup/pkg/fi/utils"
|
"k8s.io/kops/upup/pkg/fi/utils"
|
||||||
|
|
@ -58,28 +59,29 @@ func ParseAddons(name string, location *url.URL, data []byte) (*Addons, error) {
|
||||||
return &Addons{ChannelName: name, ChannelLocation: *location, APIObject: apiObject}, nil
|
return &Addons{ChannelName: name, ChannelLocation: *location, APIObject: apiObject}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Addons) GetCurrent() ([]*Addon, error) {
|
func (a *Addons) GetCurrent(kubernetesVersion semver.Version) (*AddonMenu, error) {
|
||||||
all, err := a.All()
|
all, err := a.wrapInAddons()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
specs := make(map[string]*Addon)
|
|
||||||
|
menu := NewAddonMenu()
|
||||||
for _, addon := range all {
|
for _, addon := range all {
|
||||||
|
if !addon.matches(kubernetesVersion) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
name := addon.Name
|
name := addon.Name
|
||||||
existing := specs[name]
|
|
||||||
if existing == nil || addon.ChannelVersion().Replaces(existing.ChannelVersion()) {
|
existing := menu.Addons[name]
|
||||||
specs[name] = addon
|
if existing == nil || addon.ChannelVersion().replaces(existing.ChannelVersion()) {
|
||||||
|
menu.Addons[name] = addon
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var addons []*Addon
|
return menu, nil
|
||||||
for _, addon := range specs {
|
|
||||||
addons = append(addons, addon)
|
|
||||||
}
|
|
||||||
return addons, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Addons) All() ([]*Addon, error) {
|
func (a *Addons) wrapInAddons() ([]*Addon, error) {
|
||||||
var addons []*Addon
|
var addons []*Addon
|
||||||
for _, s := range a.APIObject.Spec.Addons {
|
for _, s := range a.APIObject.Spec.Addons {
|
||||||
name := a.APIObject.ObjectMeta.Name
|
name := a.APIObject.ObjectMeta.Name
|
||||||
|
|
@ -98,3 +100,19 @@ func (a *Addons) All() ([]*Addon, error) {
|
||||||
}
|
}
|
||||||
return addons, nil
|
return addons, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Addon) matches(kubernetesVersion semver.Version) bool {
|
||||||
|
if s.Spec.KubernetesVersion != "" {
|
||||||
|
versionRange, err := semver.ParseRange(s.Spec.KubernetesVersion)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("unable to parse KubernetesVersion %q; skipping", s.Spec.KubernetesVersion)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !versionRange(kubernetesVersion) {
|
||||||
|
glog.V(4).Infof("Skipping version range %q that does not match current version %s", s.Spec.KubernetesVersion, kubernetesVersion)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,154 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package channels
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/blang/semver"
|
||||||
|
"k8s.io/kops/channels/pkg/api"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_Filtering(t *testing.T) {
|
||||||
|
grid := []struct {
|
||||||
|
Input api.AddonSpec
|
||||||
|
KubernetesVersion string
|
||||||
|
Expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
Input: api.AddonSpec{
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
},
|
||||||
|
KubernetesVersion: "1.6.0",
|
||||||
|
Expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Input: api.AddonSpec{
|
||||||
|
KubernetesVersion: "<1.6.0",
|
||||||
|
},
|
||||||
|
KubernetesVersion: "1.6.0",
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Input: api.AddonSpec{
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
},
|
||||||
|
KubernetesVersion: "1.5.9",
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Input: api.AddonSpec{
|
||||||
|
KubernetesVersion: ">=1.4.0 <1.6.0",
|
||||||
|
},
|
||||||
|
KubernetesVersion: "1.5.9",
|
||||||
|
Expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Input: api.AddonSpec{
|
||||||
|
KubernetesVersion: ">=1.4.0 <1.6.0",
|
||||||
|
},
|
||||||
|
KubernetesVersion: "1.6.0",
|
||||||
|
Expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, g := range grid {
|
||||||
|
k8sVersion := semver.MustParse(g.KubernetesVersion)
|
||||||
|
addon := &Addon{
|
||||||
|
Spec: &g.Input,
|
||||||
|
}
|
||||||
|
actual := addon.matches(k8sVersion)
|
||||||
|
if actual != g.Expected {
|
||||||
|
t.Errorf("unexpected result from %v, %s. got %v", g.Input.KubernetesVersion, g.KubernetesVersion, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_Replacement(t *testing.T) {
|
||||||
|
grid := []struct {
|
||||||
|
Old *ChannelVersion
|
||||||
|
New *ChannelVersion
|
||||||
|
Replaces bool
|
||||||
|
}{
|
||||||
|
// With no id, update iff newer semver
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: ""},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.0"), Id: ""},
|
||||||
|
Replaces: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: ""},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.1"), Id: ""},
|
||||||
|
Replaces: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.1"), Id: ""},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.0"), Id: ""},
|
||||||
|
Replaces: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.1.0"), Id: ""},
|
||||||
|
New: &ChannelVersion{Version: s("1.1.1"), Id: ""},
|
||||||
|
Replaces: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.1.1"), Id: ""},
|
||||||
|
New: &ChannelVersion{Version: s("1.1.0"), Id: ""},
|
||||||
|
Replaces: false,
|
||||||
|
},
|
||||||
|
|
||||||
|
// With id, update if different id and same version, otherwise follow semver
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
|
||||||
|
Replaces: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.0"), Id: "b"},
|
||||||
|
Replaces: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: "b"},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
|
||||||
|
Replaces: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.1"), Id: "a"},
|
||||||
|
Replaces: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.1"), Id: "a"},
|
||||||
|
Replaces: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Old: &ChannelVersion{Version: s("1.0.0"), Id: "a"},
|
||||||
|
New: &ChannelVersion{Version: s("1.0.1"), Id: "a"},
|
||||||
|
Replaces: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, g := range grid {
|
||||||
|
actual := g.New.replaces(g.Old)
|
||||||
|
if actual != g.Replaces {
|
||||||
|
t.Errorf("unexpected result from %v -> %v, expect %t. actual %v", g.Old, g.New, g.Replaces, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func s(v string) *string {
|
||||||
|
return &v
|
||||||
|
}
|
||||||
|
|
@ -38,6 +38,7 @@ type Channel struct {
|
||||||
type ChannelVersion struct {
|
type ChannelVersion struct {
|
||||||
Version *string `json:"version,omitempty"`
|
Version *string `json:"version,omitempty"`
|
||||||
Channel *string `json:"channel,omitempty"`
|
Channel *string `json:"channel,omitempty"`
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func stringValue(s *string) string {
|
func stringValue(s *string) string {
|
||||||
|
|
@ -48,7 +49,11 @@ func stringValue(s *string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChannelVersion) String() string {
|
func (c *ChannelVersion) String() string {
|
||||||
return "Version=" + stringValue(c.Version) + " Channel=" + stringValue(c.Channel)
|
s := "Version=" + stringValue(c.Version) + " Channel=" + stringValue(c.Channel)
|
||||||
|
if c.Id != "" {
|
||||||
|
s += " Id=" + c.Id
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseChannelVersion(s string) (*ChannelVersion, error) {
|
func ParseChannelVersion(s string) (*ChannelVersion, error) {
|
||||||
|
|
@ -91,7 +96,7 @@ func (c *Channel) AnnotationName() string {
|
||||||
return AnnotationPrefix + c.Name
|
return AnnotationPrefix + c.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ChannelVersion) Replaces(existing *ChannelVersion) bool {
|
func (c *ChannelVersion) replaces(existing *ChannelVersion) bool {
|
||||||
if existing.Version != nil {
|
if existing.Version != nil {
|
||||||
if c.Version == nil {
|
if c.Version == nil {
|
||||||
return false
|
return false
|
||||||
|
|
@ -106,13 +111,25 @@ func (c *ChannelVersion) Replaces(existing *ChannelVersion) bool {
|
||||||
glog.Warningf("error parsing existing version %q", *existing.Version)
|
glog.Warningf("error parsing existing version %q", *existing.Version)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return cVersion.GT(existingVersion)
|
if cVersion.LT(existingVersion) {
|
||||||
|
return false
|
||||||
|
} else if cVersion.GT(existingVersion) {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
// Same version; check ids
|
||||||
|
if c.Id == existing.Id {
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
glog.V(4).Infof("Channels had same version %q but different ids (%q vs %q); will replace", *c.Version, c.Id, existing.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.Warningf("ChannelVersion did not have a version; can't perform real version check")
|
glog.Warningf("ChannelVersion did not have a version; can't perform real version check")
|
||||||
if c.Version == nil {
|
if c.Version == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,18 +14,21 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// applyCmd represents the apply command
|
func NewCmdApply(f Factory, out io.Writer) *cobra.Command {
|
||||||
var applyCmd = &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "apply",
|
Use: "apply",
|
||||||
Short: "apply resources from a channel",
|
Short: "apply resources from a channel",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
// create subcommands
|
||||||
rootCommand.AddCommand(applyCmd)
|
cmd.AddCommand(NewCmdApplyChannel(f, out))
|
||||||
|
|
||||||
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
@ -14,11 +14,13 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/blang/semver"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"io"
|
||||||
"k8s.io/kops/channels/pkg/channels"
|
"k8s.io/kops/channels/pkg/channels"
|
||||||
"k8s.io/kops/util/pkg/tables"
|
"k8s.io/kops/util/pkg/tables"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
@ -26,38 +28,54 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ApplyChannelCmd struct {
|
type ApplyChannelOptions struct {
|
||||||
Yes bool
|
Yes bool
|
||||||
Files []string
|
Files []string
|
||||||
}
|
}
|
||||||
|
|
||||||
var applyChannel ApplyChannelCmd
|
func NewCmdApplyChannel(f Factory, out io.Writer) *cobra.Command {
|
||||||
|
var options ApplyChannelOptions
|
||||||
|
|
||||||
func init() {
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "channel",
|
Use: "channel",
|
||||||
Short: "Apply channel",
|
Short: "Apply channel",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
err := applyChannel.Run(args)
|
return RunApplyChannel(f, out, &options, args)
|
||||||
if err != nil {
|
|
||||||
exitWithError(err)
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.Flags().BoolVar(&applyChannel.Yes, "yes", false, "Apply update")
|
cmd.Flags().BoolVar(&options.Yes, "yes", false, "Apply update")
|
||||||
cmd.Flags().StringSliceVar(&applyChannel.Files, "f", []string{}, "Apply from a local file")
|
cmd.Flags().StringSliceVar(&options.Files, "f", []string{}, "Apply from a local file")
|
||||||
|
|
||||||
applyCmd.AddCommand(cmd)
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ApplyChannelCmd) Run(args []string) error {
|
func RunApplyChannel(f Factory, out io.Writer, options *ApplyChannelOptions, args []string) error {
|
||||||
k8sClient, err := rootCommand.KubernetesClient()
|
k8sClient, err := f.KubernetesClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var addons []*channels.Addon
|
kubernetesVersionInfo, err := k8sClient.Discovery().ServerVersion()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error querying kubernetes version: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
//kubernetesVersion, err := semver.Parse(kubernetesVersionInfo.Major + "." + kubernetesVersionInfo.Minor + ".0")
|
||||||
|
//if err != nil {
|
||||||
|
// return fmt.Errorf("cannot parse kubernetes version %q", kubernetesVersionInfo.Major+"."+kubernetesVersionInfo.Minor + ".0")
|
||||||
|
//}
|
||||||
|
|
||||||
|
kubernetesVersion, err := semver.ParseTolerant(kubernetesVersionInfo.GitVersion)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot parse kubernetes version %q", kubernetesVersionInfo.GitVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove Pre and Patch, as they make semver comparisons impractical
|
||||||
|
kubernetesVersion.Pre = nil
|
||||||
|
|
||||||
|
menu := channels.NewAddonMenu()
|
||||||
|
|
||||||
for _, name := range args {
|
for _, name := range args {
|
||||||
location, err := url.Parse(name)
|
location, err := url.Parse(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -80,14 +98,14 @@ func (c *ApplyChannelCmd) Run(args []string) error {
|
||||||
return fmt.Errorf("error loading channel %q: %v", location, err)
|
return fmt.Errorf("error loading channel %q: %v", location, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
current, err := o.GetCurrent()
|
current, err := o.GetCurrent(kubernetesVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error processing latest versions in %q: %v", location, err)
|
return fmt.Errorf("error processing latest versions in %q: %v", location, err)
|
||||||
}
|
}
|
||||||
addons = append(addons, current...)
|
menu.MergeAddons(current)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range c.Files {
|
for _, f := range options.Files {
|
||||||
location, err := url.Parse(f)
|
location, err := url.Parse(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to parse argument %q as url", f)
|
return fmt.Errorf("unable to parse argument %q as url", f)
|
||||||
|
|
@ -108,16 +126,16 @@ func (c *ApplyChannelCmd) Run(args []string) error {
|
||||||
return fmt.Errorf("error loading file %q: %v", f, err)
|
return fmt.Errorf("error loading file %q: %v", f, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
current, err := o.GetCurrent()
|
current, err := o.GetCurrent(kubernetesVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error processing latest versions in %q: %v", f, err)
|
return fmt.Errorf("error processing latest versions in %q: %v", f, err)
|
||||||
}
|
}
|
||||||
addons = append(addons, current...)
|
menu.MergeAddons(current)
|
||||||
}
|
}
|
||||||
|
|
||||||
var updates []*channels.AddonUpdate
|
var updates []*channels.AddonUpdate
|
||||||
var needUpdates []*channels.Addon
|
var needUpdates []*channels.Addon
|
||||||
for _, addon := range addons {
|
for _, addon := range menu.Addons {
|
||||||
// TODO: Cache lookups to prevent repeated lookups?
|
// TODO: Cache lookups to prevent repeated lookups?
|
||||||
update, err := addon.GetRequiredUpdates(k8sClient)
|
update, err := addon.GetRequiredUpdates(k8sClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -165,7 +183,7 @@ func (c *ApplyChannelCmd) Run(args []string) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !c.Yes {
|
if !options.Yes {
|
||||||
fmt.Printf("\nMust specify --yes to update\n")
|
fmt.Printf("\nMust specify --yes to update\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
@ -178,7 +196,7 @@ func (c *ApplyChannelCmd) Run(args []string) error {
|
||||||
// Could have been a concurrent request
|
// Could have been a concurrent request
|
||||||
if update != nil {
|
if update != nil {
|
||||||
if update.NewVersion.Version != nil {
|
if update.NewVersion.Version != nil {
|
||||||
fmt.Printf("Updated %q to %d\n", update.Name, *update.NewVersion)
|
fmt.Printf("Updated %q to %s\n", update.Name, *update.NewVersion.Version)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("Updated %q\n", update.Name)
|
fmt.Printf("Updated %q\n", update.Name)
|
||||||
}
|
}
|
||||||
|
|
@ -0,0 +1,59 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Factory interface {
|
||||||
|
KubernetesClient() (kubernetes.Interface, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type DefaultFactory struct {
|
||||||
|
kubernetesClient kubernetes.Interface
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Factory = &DefaultFactory{}
|
||||||
|
|
||||||
|
func (f *DefaultFactory) KubernetesClient() (kubernetes.Interface, error) {
|
||||||
|
if f.kubernetesClient == nil {
|
||||||
|
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||||
|
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
|
||||||
|
|
||||||
|
configOverrides := &clientcmd.ConfigOverrides{
|
||||||
|
ClusterDefaults: clientcmd.ClusterDefaults,
|
||||||
|
}
|
||||||
|
|
||||||
|
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||||
|
config, err := kubeConfig.ClientConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot load kubecfg settings: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
k8sClient, err := kubernetes.NewForConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot build kube client: %v", err)
|
||||||
|
}
|
||||||
|
f.kubernetesClient = k8sClient
|
||||||
|
}
|
||||||
|
|
||||||
|
return f.kubernetesClient, nil
|
||||||
|
}
|
||||||
|
|
@ -14,36 +14,22 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetCmd represents the get command
|
func NewCmdGet(f Factory, out io.Writer) *cobra.Command {
|
||||||
type GetCmd struct {
|
cmd := &cobra.Command{
|
||||||
output string
|
|
||||||
|
|
||||||
cobraCommand *cobra.Command
|
|
||||||
}
|
|
||||||
|
|
||||||
var getCmd = GetCmd{
|
|
||||||
cobraCommand: &cobra.Command{
|
|
||||||
Use: "get",
|
Use: "get",
|
||||||
SuggestFor: []string{"list"},
|
SuggestFor: []string{"list"},
|
||||||
Short: "list or get objects",
|
Short: "list or get objects",
|
||||||
},
|
}
|
||||||
}
|
|
||||||
|
// create subcommands
|
||||||
const (
|
cmd.AddCommand(NewCmdGetAddons(f, out))
|
||||||
OutputYaml = "yaml"
|
|
||||||
OutputTable = "table"
|
return cmd
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
cmd := getCmd.cobraCommand
|
|
||||||
|
|
||||||
rootCommand.AddCommand(cmd)
|
|
||||||
|
|
||||||
cmd.PersistentFlags().StringVarP(&getCmd.output, "output", "o", OutputTable, "output format. One of: table, yaml")
|
|
||||||
}
|
}
|
||||||
|
|
@ -14,11 +14,28 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"io"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/pkg/api/v1"
|
"k8s.io/client-go/pkg/api/v1"
|
||||||
"k8s.io/kops/channels/pkg/channels"
|
"k8s.io/kops/channels/pkg/channels"
|
||||||
|
|
@ -26,26 +43,23 @@ import (
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GetAddonsCmd struct {
|
type GetAddonsOptions struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var getAddonsCmd GetAddonsCmd
|
func NewCmdGetAddons(f Factory, out io.Writer) *cobra.Command {
|
||||||
|
var options GetAddonsOptions
|
||||||
|
|
||||||
func init() {
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "addons",
|
Use: "addons",
|
||||||
Aliases: []string{"addon"},
|
Aliases: []string{"addon"},
|
||||||
Short: "get addons",
|
Short: "get addons",
|
||||||
Long: `List or get addons.`,
|
Long: `List or get addons.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
err := getAddonsCmd.Run(args)
|
return RunGetAddons(f, out, &options)
|
||||||
if err != nil {
|
|
||||||
exitWithError(err)
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
getCmd.cobraCommand.AddCommand(cmd)
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
type addonInfo struct {
|
type addonInfo struct {
|
||||||
|
|
@ -54,8 +68,8 @@ type addonInfo struct {
|
||||||
Namespace *v1.Namespace
|
Namespace *v1.Namespace
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GetAddonsCmd) Run(args []string) error {
|
func RunGetAddons(f Factory, out io.Writer, options *GetAddonsOptions) error {
|
||||||
k8sClient, err := rootCommand.KubernetesClient()
|
k8sClient, err := f.KubernetesClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package main
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
goflag "flag"
|
goflag "flag"
|
||||||
|
|
@ -22,48 +22,44 @@ import (
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"k8s.io/client-go/kubernetes"
|
"io"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type RootCmd struct {
|
type CmdRootOptions struct {
|
||||||
configFile string
|
configFile string
|
||||||
|
|
||||||
cobraCommand *cobra.Command
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var rootCommand = RootCmd{
|
func Execute(f Factory, out io.Writer) error {
|
||||||
cobraCommand: &cobra.Command{
|
|
||||||
Use: "channels",
|
|
||||||
Short: "channels applies software from a channel",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func Execute() {
|
|
||||||
goflag.Set("logtostderr", "true")
|
|
||||||
goflag.CommandLine.Parse([]string{})
|
|
||||||
if err := rootCommand.cobraCommand.Execute(); err != nil {
|
|
||||||
exitWithError(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
cobra.OnInitialize(initConfig)
|
cobra.OnInitialize(initConfig)
|
||||||
|
|
||||||
cmd := rootCommand.cobraCommand
|
cmd := NewCmdRoot(f, out)
|
||||||
|
|
||||||
|
goflag.Set("logtostderr", "true")
|
||||||
|
goflag.CommandLine.Parse([]string{})
|
||||||
|
return cmd.Execute()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewCmdRoot(f Factory, out io.Writer) *cobra.Command {
|
||||||
|
options := &CmdRootOptions{}
|
||||||
|
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "channels",
|
||||||
|
Short: "channels applies software from a channel",
|
||||||
|
}
|
||||||
|
|
||||||
cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine)
|
cmd.PersistentFlags().AddGoFlagSet(goflag.CommandLine)
|
||||||
|
|
||||||
cmd.PersistentFlags().StringVar(&rootCommand.configFile, "config", "", "config file (default is $HOME/.channels.yaml)")
|
cmd.PersistentFlags().StringVar(&options.configFile, "config", "", "config file (default is $HOME/.channels.yaml)")
|
||||||
|
|
||||||
|
// create subcommands
|
||||||
|
cmd.AddCommand(NewCmdApply(f, out))
|
||||||
|
cmd.AddCommand(NewCmdGet(f, out))
|
||||||
|
|
||||||
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
// initConfig reads in config file and ENV variables if set.
|
// initConfig reads in config file and ENV variables if set.
|
||||||
func initConfig() {
|
func initConfig() {
|
||||||
if rootCommand.configFile != "" {
|
|
||||||
// enable ability to specify config file via flag
|
|
||||||
viper.SetConfigFile(rootCommand.configFile)
|
|
||||||
}
|
|
||||||
|
|
||||||
viper.SetConfigName(".channels") // name of config file (without extension)
|
viper.SetConfigName(".channels") // name of config file (without extension)
|
||||||
viper.AddConfigPath("$HOME") // adding home directory as first search path
|
viper.AddConfigPath("$HOME") // adding home directory as first search path
|
||||||
viper.AutomaticEnv() // read in environment variables that match
|
viper.AutomaticEnv() // read in environment variables that match
|
||||||
|
|
@ -73,28 +69,3 @@ func initConfig() {
|
||||||
fmt.Println("Using config file:", viper.ConfigFileUsed())
|
fmt.Println("Using config file:", viper.ConfigFileUsed())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *RootCmd) AddCommand(cmd *cobra.Command) {
|
|
||||||
c.cobraCommand.AddCommand(cmd)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *RootCmd) KubernetesClient() (kubernetes.Interface, error) {
|
|
||||||
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
|
|
||||||
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
|
|
||||||
|
|
||||||
configOverrides := &clientcmd.ConfigOverrides{
|
|
||||||
ClusterDefaults: clientcmd.ClusterDefaults,
|
|
||||||
}
|
|
||||||
|
|
||||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
|
||||||
config, err := kubeConfig.ClientConfig()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot load kubecfg settings: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
k8sClient, err := kubernetes.NewForConfig(config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("cannot build kube client: %v", err)
|
|
||||||
}
|
|
||||||
return k8sClient, err
|
|
||||||
}
|
|
||||||
|
|
@ -690,14 +690,6 @@ func (m *MockEC2) DescribeInstancesPages(*ec2.DescribeInstancesInput, func(*ec2.
|
||||||
panic("Not implemented")
|
panic("Not implemented")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
func (m *MockEC2) DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) {
|
|
||||||
panic("Not implemented")
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (m *MockEC2) DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) {
|
|
||||||
panic("Not implemented")
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
func (m *MockEC2) DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) {
|
func (m *MockEC2) DescribeMovingAddressesRequest(*ec2.DescribeMovingAddressesInput) (*request.Request, *ec2.DescribeMovingAddressesOutput) {
|
||||||
panic("Not implemented")
|
panic("Not implemented")
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
|
|
||||||
|
|
@ -144,3 +144,15 @@ func (m *MockEC2) DescribeVpcAttribute(request *ec2.DescribeVpcAttributeInput) (
|
||||||
|
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *MockEC2) DescribeInternetGatewaysRequest(*ec2.DescribeInternetGatewaysInput) (*request.Request, *ec2.DescribeInternetGatewaysOutput) {
|
||||||
|
panic("Not implemented")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
func (m *MockEC2) DescribeInternetGateways(*ec2.DescribeInternetGatewaysInput) (*ec2.DescribeInternetGatewaysOutput, error) {
|
||||||
|
return &ec2.DescribeInternetGatewaysOutput{
|
||||||
|
InternetGateways: []*ec2.InternetGateway{{
|
||||||
|
InternetGatewayId: aws.String("fake-ig"),
|
||||||
|
}},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,7 @@ type zoneInfo struct {
|
||||||
ID string
|
ID string
|
||||||
hostedZone *route53.HostedZone
|
hostedZone *route53.HostedZone
|
||||||
records []*route53.ResourceRecordSet
|
records []*route53.ResourceRecordSet
|
||||||
|
vpcs []*route53.VPC
|
||||||
}
|
}
|
||||||
|
|
||||||
type MockRoute53 struct {
|
type MockRoute53 struct {
|
||||||
|
|
@ -48,10 +49,11 @@ func (m *MockRoute53) findZone(hostedZoneId string) *zoneInfo {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockRoute53) MockCreateZone(z *route53.HostedZone) {
|
func (m *MockRoute53) MockCreateZone(z *route53.HostedZone, vpcs []*route53.VPC) {
|
||||||
zi := &zoneInfo{
|
zi := &zoneInfo{
|
||||||
ID: aws.StringValue(z.Id),
|
ID: aws.StringValue(z.Id),
|
||||||
hostedZone: z,
|
hostedZone: z,
|
||||||
|
vpcs: vpcs,
|
||||||
}
|
}
|
||||||
m.Zones = append(m.Zones, zi)
|
m.Zones = append(m.Zones, zi)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ func (m *MockRoute53) GetHostedZone(request *route53.GetHostedZoneInput) (*route
|
||||||
response := &route53.GetHostedZoneOutput{
|
response := &route53.GetHostedZoneOutput{
|
||||||
// DelegationSet ???
|
// DelegationSet ???
|
||||||
HostedZone: ©,
|
HostedZone: ©,
|
||||||
// VPCs
|
VPCs: zone.vpcs,
|
||||||
}
|
}
|
||||||
return response, nil
|
return response, nil
|
||||||
}
|
}
|
||||||
|
|
@ -89,6 +89,13 @@ func (m *MockRoute53) ListHostedZonesByNameRequest(*route53.ListHostedZonesByNam
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MockRoute53) ListHostedZonesByName(*route53.ListHostedZonesByNameInput) (*route53.ListHostedZonesByNameOutput, error) {
|
func (m *MockRoute53) ListHostedZonesByName(*route53.ListHostedZonesByNameInput) (*route53.ListHostedZonesByNameOutput, error) {
|
||||||
panic("MockRoute53 ListHostedZonesByName not implemented")
|
var zones []*route53.HostedZone
|
||||||
return nil, nil
|
|
||||||
|
for _, z := range m.Zones {
|
||||||
|
zones = append(zones, z.hostedZone)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &route53.ListHostedZonesByNameOutput{
|
||||||
|
HostedZones: zones,
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -17,25 +17,48 @@ limitations under the License.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"bytes"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/kops/cmd/kops/util"
|
"k8s.io/kops/cmd/kops/util"
|
||||||
|
kopsapi "k8s.io/kops/pkg/apis/kops"
|
||||||
|
"k8s.io/kops/pkg/apis/kops/v1alpha1"
|
||||||
|
"k8s.io/kops/util/pkg/vfs"
|
||||||
|
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||||
|
"k8s.io/kubernetes/pkg/kubectl/resource"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeleteOptions struct {
|
type DeleteOptions struct {
|
||||||
|
resource.FilenameOptions
|
||||||
|
Yes bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCmdDelete(f *util.Factory, out io.Writer) *cobra.Command {
|
func NewCmdDelete(f *util.Factory, out io.Writer) *cobra.Command {
|
||||||
//options := &DeleteOptions{}
|
options := &DeleteOptions{}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "delete",
|
Use: "delete -f FILENAME [--yes]",
|
||||||
Short: "Delete clusters and other resources.",
|
Short: "Delete clusters and instancegroups",
|
||||||
Long: `Delete clusters`,
|
Long: `Delete clusters and instancegroups`,
|
||||||
SuggestFor: []string{"rm"},
|
SuggestFor: []string{"rm"},
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
if cmdutil.IsFilenameEmpty(options.Filenames) {
|
||||||
|
cmd.Help()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cmdutil.CheckErr(RunDelete(f, out, options))
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cmd.Flags().StringSliceVarP(&options.Filenames, "filename", "f", options.Filenames, "Filename to use to delete the resource")
|
||||||
|
cmd.Flags().BoolVarP(&options.Yes, "yes", "y", options.Yes, "Specify --yes to delete the resource")
|
||||||
|
cmd.MarkFlagRequired("filename")
|
||||||
|
|
||||||
// create subcommands
|
// create subcommands
|
||||||
cmd.AddCommand(NewCmdDeleteCluster(f, out))
|
cmd.AddCommand(NewCmdDeleteCluster(f, out))
|
||||||
cmd.AddCommand(NewCmdDeleteInstanceGroup(f, out))
|
cmd.AddCommand(NewCmdDeleteInstanceGroup(f, out))
|
||||||
|
|
@ -43,3 +66,68 @@ func NewCmdDelete(f *util.Factory, out io.Writer) *cobra.Command {
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func RunDelete(factory *util.Factory, out io.Writer, d *DeleteOptions) error {
|
||||||
|
// Codecs provides access to encoding and decoding for the scheme
|
||||||
|
codecs := kopsapi.Codecs //serializer.NewCodecFactory(scheme)
|
||||||
|
|
||||||
|
codec := codecs.UniversalDecoder(kopsapi.SchemeGroupVersion)
|
||||||
|
|
||||||
|
var sb bytes.Buffer
|
||||||
|
fmt.Fprintf(&sb, "\n")
|
||||||
|
for _, f := range d.Filenames {
|
||||||
|
contents, err := vfs.Context.ReadFile(f)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading file %q: %v", f, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sections := bytes.Split(contents, []byte("\n---\n"))
|
||||||
|
for _, section := range sections {
|
||||||
|
defaults := &schema.GroupVersionKind{
|
||||||
|
Group: v1alpha1.SchemeGroupVersion.Group,
|
||||||
|
Version: v1alpha1.SchemeGroupVersion.Version,
|
||||||
|
}
|
||||||
|
o, gvk, err := codec.Decode(section, defaults, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing file %q: %v", f, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := o.(type) {
|
||||||
|
case *kopsapi.Cluster:
|
||||||
|
options := &DeleteClusterOptions{}
|
||||||
|
options.ClusterName = v.ObjectMeta.Name
|
||||||
|
options.Yes = d.Yes
|
||||||
|
err = RunDeleteCluster(factory, out, options)
|
||||||
|
if err != nil {
|
||||||
|
exitWithError(err)
|
||||||
|
}
|
||||||
|
if d.Yes {
|
||||||
|
fmt.Fprintf(&sb, "Deleted cluster/%s\n", v.ObjectMeta.Name)
|
||||||
|
}
|
||||||
|
case *kopsapi.InstanceGroup:
|
||||||
|
options := &DeleteInstanceGroupOptions{}
|
||||||
|
options.GroupName = v.ObjectMeta.Name
|
||||||
|
options.ClusterName = v.ObjectMeta.Labels[kopsapi.LabelClusterName]
|
||||||
|
options.Yes = d.Yes
|
||||||
|
err := RunDeleteInstanceGroup(factory, out, options)
|
||||||
|
if err != nil {
|
||||||
|
exitWithError(err)
|
||||||
|
}
|
||||||
|
if d.Yes {
|
||||||
|
fmt.Fprintf(&sb, "Deleted instancegroup/%s\n", v.ObjectMeta.Name)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
glog.V(2).Infof("Type of object was %T", v)
|
||||||
|
return fmt.Errorf("Unhandled kind %q in %s", gvk, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
_, err := out.Write(sb.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error writing to output: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -102,6 +102,16 @@ func TestPrivateKopeio(t *testing.T) {
|
||||||
runTest(t, "privatekopeio.example.com", "../../tests/integration/privatekopeio", "v1alpha2", true, 1)
|
runTest(t, "privatekopeio.example.com", "../../tests/integration/privatekopeio", "v1alpha2", true, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestPrivateDns runs the test on a configuration with private topology, private dns
|
||||||
|
func TestPrivateDns1(t *testing.T) {
|
||||||
|
runTest(t, "privatedns1.example.com", "../../tests/integration/privatedns1", "v1alpha2", true, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestPrivateDns runs the test on a configuration with private topology, private dns, extant vpc
|
||||||
|
func TestPrivateDns2(t *testing.T) {
|
||||||
|
runTest(t, "privatedns2.example.com", "../../tests/integration/privatedns2", "v1alpha2", true, 1)
|
||||||
|
}
|
||||||
|
|
||||||
func runTest(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
|
func runTest(t *testing.T, clusterName string, srcDir string, version string, private bool, zones int) {
|
||||||
var stdout bytes.Buffer
|
var stdout bytes.Buffer
|
||||||
|
|
||||||
|
|
@ -369,7 +379,28 @@ func (h *IntegrationTestHarness) SetupMockAWS() {
|
||||||
mockRoute53.MockCreateZone(&route53.HostedZone{
|
mockRoute53.MockCreateZone(&route53.HostedZone{
|
||||||
Id: aws.String("/hostedzone/Z1AFAKE1ZON3YO"),
|
Id: aws.String("/hostedzone/Z1AFAKE1ZON3YO"),
|
||||||
Name: aws.String("example.com."),
|
Name: aws.String("example.com."),
|
||||||
})
|
Config: &route53.HostedZoneConfig{
|
||||||
|
PrivateZone: aws.Bool(false),
|
||||||
|
},
|
||||||
|
}, nil)
|
||||||
|
mockRoute53.MockCreateZone(&route53.HostedZone{
|
||||||
|
Id: aws.String("/hostedzone/Z2AFAKE1ZON3NO"),
|
||||||
|
Name: aws.String("internal.example.com."),
|
||||||
|
Config: &route53.HostedZoneConfig{
|
||||||
|
PrivateZone: aws.Bool(true),
|
||||||
|
},
|
||||||
|
}, []*route53.VPC{{
|
||||||
|
VPCId: aws.String("vpc-234"),
|
||||||
|
}})
|
||||||
|
mockRoute53.MockCreateZone(&route53.HostedZone{
|
||||||
|
Id: aws.String("/hostedzone/Z3AFAKE1ZOMORE"),
|
||||||
|
Name: aws.String("private.example.com."),
|
||||||
|
Config: &route53.HostedZoneConfig{
|
||||||
|
PrivateZone: aws.Bool(true),
|
||||||
|
},
|
||||||
|
}, []*route53.VPC{{
|
||||||
|
VPCId: aws.String("vpc-123"),
|
||||||
|
}})
|
||||||
|
|
||||||
mockEC2.Images = append(mockEC2.Images, &ec2.Image{
|
mockEC2.Images = append(mockEC2.Images, &ec2.Image{
|
||||||
ImageId: aws.String("ami-12345678"),
|
ImageId: aws.String("ami-12345678"),
|
||||||
|
|
|
||||||
|
|
@ -95,7 +95,7 @@ func NewCmdRollingUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
|
||||||
Short: "Rolling update a cluster",
|
Short: "Rolling update a cluster",
|
||||||
Long: `Rolling update a cluster instance groups.
|
Long: `Rolling update a cluster instance groups.
|
||||||
|
|
||||||
This command updates a kubernetes cluseter to match the cloud, and kops specifications.
|
This command updates a kubernetes cluster to match the cloud, and kops specifications.
|
||||||
|
|
||||||
To perform rolling update, you need to update the cloud resources first with "kops update cluster"
|
To perform rolling update, you need to update the cloud resources first with "kops update cluster"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -86,7 +86,7 @@ func NewCmdUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
|
||||||
cmd.Flags().StringVar(&options.Models, "model", options.Models, "Models to apply (separate multiple models with commas)")
|
cmd.Flags().StringVar(&options.Models, "model", options.Models, "Models to apply (separate multiple models with commas)")
|
||||||
cmd.Flags().StringVar(&options.SSHPublicKey, "ssh-public-key", options.SSHPublicKey, "SSH public key to use (deprecated: use kops create secret instead)")
|
cmd.Flags().StringVar(&options.SSHPublicKey, "ssh-public-key", options.SSHPublicKey, "SSH public key to use (deprecated: use kops create secret instead)")
|
||||||
cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output")
|
cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output")
|
||||||
|
cmd.Flags().BoolVar(&options.CreateKubecfg, "create-kube-config", options.CreateKubecfg, "Will control automatically creating the kube config file on your local filesystem")
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -89,6 +89,9 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
fi, err := os.Lstat(procSelfExe)
|
fi, err := os.Lstat(procSelfExe)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("error doing lstat on %q: %v", procSelfExe, err)
|
||||||
|
}
|
||||||
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
if fi.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||||
glog.Fatalf("file %v is not a symlink", procSelfExe)
|
glog.Fatalf("file %v is not a symlink", procSelfExe)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -299,7 +299,7 @@ func (c *DNSController) runOnce() error {
|
||||||
for key, changeset := range op.changesets {
|
for key, changeset := range op.changesets {
|
||||||
glog.V(2).Infof("applying DNS changeset for zone %s", key)
|
glog.V(2).Infof("applying DNS changeset for zone %s", key)
|
||||||
if err := changeset.Apply(); err != nil {
|
if err := changeset.Apply(); err != nil {
|
||||||
glog.Warningf("error applying DNS changset for zone %s: %v", key, err)
|
glog.Warningf("error applying DNS changeset for zone %s: %v", key, err)
|
||||||
errors = append(errors, fmt.Errorf("error applying DNS changeset for zone %s: %v", key, err))
|
errors = append(errors, fmt.Errorf("error applying DNS changeset for zone %s: %v", key, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,137 @@
|
||||||
|
## Addons Management
|
||||||
|
|
||||||
|
kops incorporates management of some addons; we _have_ to manage some addons which are needed before
|
||||||
|
the kubernetes API is functional.
|
||||||
|
|
||||||
|
In addition, kops offers end-user management of addons via the `channels` tool (which is still experimental,
|
||||||
|
but we are working on making it a recommended part of kubernetes addon management). We ship some
|
||||||
|
curated addons in the [addons directory](/addons), more information in the [addons document](addons.md).
|
||||||
|
|
||||||
|
|
||||||
|
kops uses the `channels` tool for system addon management also. Because kops uses the same tool
|
||||||
|
for *system* addon management as it does for *user* addon management, this means that
|
||||||
|
addons installed by kops as part of cluster bringup can be managed alongside additional addons.
|
||||||
|
(Though note that bootstrap addons are much more likely to be replaced during a kops upgrade).
|
||||||
|
|
||||||
|
The general kops philosophy is to try to make the set of bootstrap addons minimal, and
|
||||||
|
to make installation of subsequent addons easy.
|
||||||
|
|
||||||
|
Thus, `kube-dns` and the networking overlay (if any) are the canonical bootstrap addons.
|
||||||
|
But addons such as the dashboard or the EFK stack are easily installed after kops bootstrap,
|
||||||
|
with a `kubectl apply -f https://...` or with the channels tool.
|
||||||
|
|
||||||
|
In future, we may as a convenience make it easy to add optional addons to the kops manifest,
|
||||||
|
though this will just be a convenience wrapper around doing it manually.
|
||||||
|
|
||||||
|
## Versioning
|
||||||
|
|
||||||
|
The channels tool adds a manifest-of-manifests file, of `Kind: Addons`, which allows for a description
|
||||||
|
of the various manifest versions that are available. In this way kops can manage updates
|
||||||
|
as new versions of the addon are released. For example,
|
||||||
|
the [dashboard addon](https://github.com/kubernetes/kops/blob/master/addons/kubernetes-dashboard/addon.yaml)
|
||||||
|
lists multiple versions.
|
||||||
|
|
||||||
|
For example, a typical addons declaration might looks like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
- version: 1.4.0
|
||||||
|
selector:
|
||||||
|
k8s-addon: kubernetes-dashboard.addons.k8s.io
|
||||||
|
manifest: v1.4.0.yaml
|
||||||
|
- version: 1.5.0
|
||||||
|
selector:
|
||||||
|
k8s-addon: kubernetes-dashboard.addons.k8s.io
|
||||||
|
manifest: v1.5.0.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
That declares two versions of an addon, with manifests at `v1.4.0.yaml` and at `v1.5.0.yaml`.
|
||||||
|
These are evaluated as relative paths to the Addons file itself. (The channels tool supports
|
||||||
|
a few more protocols than `kubectl` - for example `s3://...` for S3 hosted manifests).
|
||||||
|
|
||||||
|
The `version` field gives meaning to the alternative manifests. This is interpreted as a
|
||||||
|
semver. The channels tool keeps track of the current version installed (currently by means
|
||||||
|
of an annotation on the `kube-system` namespace), and it will not reapply the same version
|
||||||
|
of the manifest. This means that a user can edit a deployed addon, and changes will not
|
||||||
|
be replaced, until a new version of the addon is installed.
|
||||||
|
|
||||||
|
The long-term direction here is that addons will mostly be configured through a ConfigMap or Secret object,
|
||||||
|
and that the addon manager will (TODO) not replace the ConfigMap.
|
||||||
|
|
||||||
|
The `selector` determines the objects which make up the addon. This will be used
|
||||||
|
to construct a `--prune` argument (TODO), so that objects that existed in the
|
||||||
|
previous but not the new version will be removed as part of an upgrade.
|
||||||
|
|
||||||
|
## Kubernetes Version Selection
|
||||||
|
|
||||||
|
The addon manager now supports a `kubernetesVersion` field, which is a semver range specifier
|
||||||
|
on the kubernetes version. If the targeted version of kubernetes does not match the semver
|
||||||
|
specified, the addon version will be ignored.
|
||||||
|
|
||||||
|
This allows you to have different versions of the manifest for significant changes to the
|
||||||
|
kubernetes API. For example, 1.6 changed the taints & tolerations to a field, and RBAC moved
|
||||||
|
to beta. As such it is easier to have two separate manifests.
|
||||||
|
|
||||||
|
For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
- version: 1.5.0
|
||||||
|
selector:
|
||||||
|
k8s-addon: kube-dashboard.addons.k8s.io
|
||||||
|
manifest: v1.5.0.yaml
|
||||||
|
kubernetesVersion: "<1.6.0"
|
||||||
|
id: "pre-k8s-16"
|
||||||
|
- version: 1.6.0
|
||||||
|
selector:
|
||||||
|
k8s-addon: kube-dashboard.addons.k8s.io
|
||||||
|
manifest: v1.6.0.yaml
|
||||||
|
kubernetesVersion: ">=1.6.0"
|
||||||
|
id: "k8s-16"
|
||||||
|
```
|
||||||
|
|
||||||
|
On kubernetes versions before 1.6, we will install `v1.5.0.yaml`, whereas from kubernetes
|
||||||
|
versions 1.6 on we will install `v1.6.0.yaml`.
|
||||||
|
|
||||||
|
Note that we remove the `pre-release` field of the kubernetes semver, so that `1.6.0-beta.1`
|
||||||
|
will match `>=1.6.0`. This matches the way kubernetes does pre-releases.
|
||||||
|
|
||||||
|
## Semver is not enough: `id`
|
||||||
|
|
||||||
|
However, semver is insufficient here with the kubernetes version selection. The problem is
|
||||||
|
arises in the following scenario:
|
||||||
|
|
||||||
|
* Install k8s 1.5, 1.5 version of manifest is installed
|
||||||
|
* Upgrade to k8s 1.6, 1.6 version of manifest is installed
|
||||||
|
* Downgrade to k8s 1.5; we want the 1.5 version of the manifest to be installed but the 1.6 version
|
||||||
|
will have a semver that is greater than or equal to the 1.5 semver.
|
||||||
|
|
||||||
|
We need a way to break the ties between the semvers, and thus we introduce the `id` field.
|
||||||
|
|
||||||
|
Thus a manifest will actually look like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
- version: 1.6.0
|
||||||
|
selector:
|
||||||
|
k8s-addon: kube-dns.addons.k8s.io
|
||||||
|
manifest: pre-k8s-16.yaml
|
||||||
|
kubernetesVersion: "<1.6.0"
|
||||||
|
id: "pre-k8s-16"
|
||||||
|
- version: 1.6.0
|
||||||
|
selector:
|
||||||
|
k8s-addon: kube-dns.addons.k8s.io
|
||||||
|
manifest: k8s-16.yaml
|
||||||
|
kubernetesVersion: ">=1.6.0"
|
||||||
|
id: "k8s-16"
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the two addons have the same version, but a different `kubernetesVersion` selector.
|
||||||
|
But they have different `id` values; addons with matching semvers but different `id`s will
|
||||||
|
be upgraded. (We will never downgrade to an older semver though, regardless of `id`)
|
||||||
|
|
||||||
|
So now in the above scenario after the downgrade to 1.5, although the semver is the same,
|
||||||
|
the id will not match, and the `pre-k8s-16` will be installed. (And when we upgrade back
|
||||||
|
to 1.6, the `k8s-16` version will be installed.
|
||||||
|
|
||||||
|
A few tips:
|
||||||
|
|
||||||
|
* The `version` can now more closely mirror the upstream version.
|
||||||
|
* The manifest names should probably incorporate the `id`, for maintainability.
|
||||||
|
|
@ -2,6 +2,8 @@
|
||||||
|
|
||||||
With kops you manage addons by using kubectl.
|
With kops you manage addons by using kubectl.
|
||||||
|
|
||||||
|
(For a description of the addon-manager, please see [addon_manager.md](addon_manager.md).)
|
||||||
|
|
||||||
Addons in kubernetes are traditionally done by copying files to `/etc/kubernetes/addons` on the master. But this
|
Addons in kubernetes are traditionally done by copying files to `/etc/kubernetes/addons` on the master. But this
|
||||||
doesn't really make sense in HA master configurations. We also have kubectl available, and addons is just a thin
|
doesn't really make sense in HA master configurations. We also have kubectl available, and addons is just a thin
|
||||||
wrapper over calling kubectl.
|
wrapper over calling kubectl.
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,21 @@
|
||||||
## kops delete
|
## kops delete
|
||||||
|
|
||||||
delete clusters
|
Deletes a resource by filename or stdin
|
||||||
|
|
||||||
### Synopsis
|
### Synopsis
|
||||||
|
|
||||||
|
Delete clusters or instancegroups by filename or stdin
|
||||||
|
|
||||||
Delete clusters
|
```
|
||||||
|
kops delete -f FILENAME [--yes]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
```
|
||||||
|
-f, --filename stringSlice Filename to use to delete the resource
|
||||||
|
-y, --yes Specify --yes to delete the resource
|
||||||
|
```
|
||||||
|
|
||||||
### Options inherited from parent commands
|
### Options inherited from parent commands
|
||||||
|
|
||||||
|
|
@ -27,4 +37,3 @@ Delete clusters
|
||||||
* [kops delete cluster](kops_delete_cluster.md) - Delete cluster
|
* [kops delete cluster](kops_delete_cluster.md) - Delete cluster
|
||||||
* [kops delete instancegroup](kops_delete_instancegroup.md) - Delete instancegroup
|
* [kops delete instancegroup](kops_delete_instancegroup.md) - Delete instancegroup
|
||||||
* [kops delete secret](kops_delete_secret.md) - Delete secret
|
* [kops delete secret](kops_delete_secret.md) - Delete secret
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ Rolling update a cluster
|
||||||
|
|
||||||
Rolling update a cluster instance groups.
|
Rolling update a cluster instance groups.
|
||||||
|
|
||||||
This command updates a kubernetes cluseter to match the cloud, and kops specifications.
|
This command updates a kubernetes cluster to match the cloud, and kops specifications.
|
||||||
|
|
||||||
To perform rolling update, you need to update the cloud resources first with "kops update cluster"
|
To perform rolling update, you need to update the cloud resources first with "kops update cluster"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ kops update cluster
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
```
|
```
|
||||||
|
--create-kube-config Will control automatically creating the kube config file on your local filesystem (default true)
|
||||||
--model string Models to apply (separate multiple models with commas) (default "config,proto,cloudup")
|
--model string Models to apply (separate multiple models with commas) (default "config,proto,cloudup")
|
||||||
--out string Path to write any local output
|
--out string Path to write any local output
|
||||||
--ssh-public-key string SSH public key to use (deprecated: use kops create secret instead)
|
--ssh-public-key string SSH public key to use (deprecated: use kops create secret instead)
|
||||||
|
|
|
||||||
|
|
@ -118,6 +118,23 @@ spec:
|
||||||
|
|
||||||
Will result in the flag `--runtime-config=batch/v2alpha1=true,apps/v1alpha1=true`. Note that `kube-apiserver` accepts `true` as a value for switch-like flags.
|
Will result in the flag `--runtime-config=batch/v2alpha1=true,apps/v1alpha1=true`. Note that `kube-apiserver` accepts `true` as a value for switch-like flags.
|
||||||
|
|
||||||
|
### kubelet
|
||||||
|
|
||||||
|
This block contains configurations for `kubelet`. See https://kubernetes.io/docs/admin/kubelet/
|
||||||
|
|
||||||
|
#### Feature Gates
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
spec:
|
||||||
|
kubelet:
|
||||||
|
featureGates:
|
||||||
|
ExperimentalCriticalPodAnnotation: "true"
|
||||||
|
AllowExtTrafficLocalEndpoints: "false"
|
||||||
|
```
|
||||||
|
|
||||||
|
Will result in the flag `--feature-gates=ExperimentalCriticalPodAnnotation=true,AllowExtTrafficLocalEndpoints=false`
|
||||||
|
|
||||||
|
|
||||||
### networkID
|
### networkID
|
||||||
|
|
||||||
On AWS, this is the id of the VPC the cluster is created in. If creating a cluster from scratch, this field doesn't need to be specified at create time; `kops` will create a `VPC` for you.
|
On AWS, this is the id of the VPC the cluster is created in. If creating a cluster from scratch, this field doesn't need to be specified at create time; `kops` will create a `VPC` for you.
|
||||||
|
|
|
||||||
|
|
@ -99,7 +99,7 @@ CoreOS support is highly experimental. Please report any issues.
|
||||||
The following steps are known:
|
The following steps are known:
|
||||||
|
|
||||||
* CoreOS AMIs can be found using `aws ec2 describe-images --region=us-east-1 --owner=595879546273 --filters Name=virtualization-type,Values=hvm`
|
* CoreOS AMIs can be found using `aws ec2 describe-images --region=us-east-1 --owner=595879546273 --filters Name=virtualization-type,Values=hvm`
|
||||||
* You can specify the name using the 'coreos.com` owner alias, for example `coreos.com/CoreOS-stable-1235.9.0-hvm`
|
* You can specify the name using the `coreos.com` owner alias, for example `coreos.com/CoreOS-stable-1235.9.0-hvm`
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ By default, a cluster has:
|
||||||
## Listing instance groups
|
## Listing instance groups
|
||||||
|
|
||||||
`kops get instancegroups`
|
`kops get instancegroups`
|
||||||
> ```
|
```
|
||||||
NAME ROLE MACHINETYPE MIN MAX ZONES
|
NAME ROLE MACHINETYPE MIN MAX ZONES
|
||||||
master-us-east-1c Master 1 1 us-east-1c
|
master-us-east-1c Master 1 1 us-east-1c
|
||||||
nodes Node t2.medium 2 2
|
nodes Node t2.medium 2 2
|
||||||
|
|
@ -32,7 +32,7 @@ have not yet been applied (this may change soon though!).
|
||||||
To preview the change:
|
To preview the change:
|
||||||
|
|
||||||
`kops update cluster <clustername>`
|
`kops update cluster <clustername>`
|
||||||
> ```
|
```
|
||||||
...
|
...
|
||||||
Will modify resources:
|
Will modify resources:
|
||||||
*awstasks.LaunchConfiguration launchConfiguration/mycluster.mydomain.com
|
*awstasks.LaunchConfiguration launchConfiguration/mycluster.mydomain.com
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,10 @@ There are two main types of labels that kops can create:
|
||||||
|
|
||||||
Both are specified at the InstanceGroup level.
|
Both are specified at the InstanceGroup level.
|
||||||
|
|
||||||
A nice use for CloudLabels is to specify [AWS cost allocation tags](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html)
|
A nice use for cloudLabels is to specify [AWS cost allocation tags](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html).
|
||||||
|
|
||||||
|
A good use for nodeLables is to implement [nodeSelector labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#step-two-add-a-nodeselector-field-to-your-pod-configuration).
|
||||||
|
CloudLables and nodeLables are automatically applied to new nodes created by [AWS EC2 auto scaling groups](https://aws.amazon.com/autoscaling/).
|
||||||
|
|
||||||
An example:
|
An example:
|
||||||
|
|
||||||
|
|
@ -28,3 +31,10 @@ Note that keys and values are strings, so you need quotes around values that YAM
|
||||||
would otherwise treat as numbers or booleans.
|
would otherwise treat as numbers or booleans.
|
||||||
|
|
||||||
To apply changes, you'll need to do a `kops update cluster` and then likely a `kops rolling-update cluster`
|
To apply changes, you'll need to do a `kops update cluster` and then likely a `kops rolling-update cluster`
|
||||||
|
|
||||||
|
For AWS if `kops rolling-update cluster --instance-group nodes` returns "No rolling-update required." the
|
||||||
|
[kops rolling-update cluster](https://github.com/kubernetes/kops/blob/8bc48ef10a44a3e481b604f5dbb663420c68dcab/docs/cli/kops_rolling-update_cluster.md) `--force` flag can be used to force a rolling update, even when no changes are identified.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
`kops rolling-update cluster --instance-group nodes --force`
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
## Building Kubernetes clusters with Terraform
|
## Building Kubernetes clusters with Terraform
|
||||||
|
|
||||||
Kops can generate Terraform configurations, and then you can then apply them using the `terraform plan` and `terraform apply` tools. This is very handy if you are already using Terraform, or if you want to check in the Terraform output into version control.
|
Kops can generate Terraform configurations, and then you can apply them using the `terraform plan` and `terraform apply` tools. This is very handy if you are already using Terraform, or if you want to check in the Terraform output into version control.
|
||||||
|
|
||||||
The gist of it is that, instead of letting kops apply the changes, you tell kops what you want, and then kops spits out what it wants done into a `.tf` file. **_You_** are then responsible for turning those plans into reality.
|
The gist of it is that, instead of letting kops apply the changes, you tell kops what you want, and then kops spits out what it wants done into a `.tf` file. **_You_** are then responsible for turning those plans into reality.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ k8s.io/kops
|
||||||
k8s.io/kops/channels/cmd/channels
|
k8s.io/kops/channels/cmd/channels
|
||||||
k8s.io/kops/channels/pkg/api
|
k8s.io/kops/channels/pkg/api
|
||||||
k8s.io/kops/channels/pkg/channels
|
k8s.io/kops/channels/pkg/channels
|
||||||
|
k8s.io/kops/channels/pkg/cmd
|
||||||
k8s.io/kops/cloudmock/aws/mockautoscaling
|
k8s.io/kops/cloudmock/aws/mockautoscaling
|
||||||
k8s.io/kops/cloudmock/aws/mockec2
|
k8s.io/kops/cloudmock/aws/mockec2
|
||||||
k8s.io/kops/cloudmock/aws/mockroute53
|
k8s.io/kops/cloudmock/aws/mockroute53
|
||||||
|
|
|
||||||
|
|
@ -76,6 +76,7 @@ NODE_ZONES=${NODE_ZONES:-"us-west-2a,us-west-2b,us-west-2c"}
|
||||||
NODE_SIZE=${NODE_SIZE:-m4.xlarge}
|
NODE_SIZE=${NODE_SIZE:-m4.xlarge}
|
||||||
MASTER_ZONES=${MASTER_ZONES:-"us-west-2a,us-west-2b,us-west-2c"}
|
MASTER_ZONES=${MASTER_ZONES:-"us-west-2a,us-west-2b,us-west-2c"}
|
||||||
MASTER_SIZE=${MASTER_SIZE:-m4.large}
|
MASTER_SIZE=${MASTER_SIZE:-m4.large}
|
||||||
|
KOPS_CREATE=${KOPS_CREATE:-yes}
|
||||||
|
|
||||||
|
|
||||||
# NETWORK
|
# NETWORK
|
||||||
|
|
@ -90,15 +91,18 @@ cd $KOPS_DIRECTORY/..
|
||||||
GIT_VER=git-$(git describe --always)
|
GIT_VER=git-$(git describe --always)
|
||||||
[ -z "$GIT_VER" ] && echo "we do not have GIT_VER something is very wrong" && exit 1;
|
[ -z "$GIT_VER" ] && echo "we do not have GIT_VER something is very wrong" && exit 1;
|
||||||
|
|
||||||
|
|
||||||
echo ==========
|
echo ==========
|
||||||
echo "Starting build"
|
echo "Starting build"
|
||||||
|
|
||||||
make ci && S3_BUCKET=s3://${NODEUP_BUCKET} make upload
|
export CI=1
|
||||||
|
make && make test && S3_BUCKET=s3://${NODEUP_BUCKET} make upload
|
||||||
|
|
||||||
KOPS_CHANNEL=$(kops version | awk '{ print $2 }')
|
KOPS_CHANNEL=$(kops version | awk '{ print $2 }' |sed 's/\+/%2B/')
|
||||||
KOPS_BASE_URL="http://${NODEUP_BUCKET}.s3.amazonaws.com/kops/${KOPS_CHANNEL}/"
|
KOPS_BASE_URL="http://${NODEUP_BUCKET}.s3.amazonaws.com/kops/${KOPS_CHANNEL}/"
|
||||||
|
|
||||||
|
echo "KOPS_BASE_URL=${KOPS_BASE_URL}"
|
||||||
|
echo "NODEUP_URL=${KOPS_BASE_URL}linux/amd64/nodeup"
|
||||||
|
|
||||||
echo ==========
|
echo ==========
|
||||||
echo "Deleting cluster ${CLUSTER_NAME}. Elle est finie."
|
echo "Deleting cluster ${CLUSTER_NAME}. Elle est finie."
|
||||||
|
|
||||||
|
|
@ -111,25 +115,21 @@ kops delete cluster \
|
||||||
echo ==========
|
echo ==========
|
||||||
echo "Creating cluster ${CLUSTER_NAME}"
|
echo "Creating cluster ${CLUSTER_NAME}"
|
||||||
|
|
||||||
NODEUP_URL=${KOPS_BASE_URL}linux/amd64/nodeup \
|
kops_command="NODEUP_URL=${KOPS_BASE_URL}linux/amd64/nodeup KOPS_BASE_URL=${KOPS_BASE_URL} kops create cluster --name $CLUSTER_NAME --state $KOPS_STATE_STORE --node-count $NODE_COUNT --zones $NODE_ZONES --master-zones $MASTER_ZONES --node-size $NODE_SIZE --master-size $MASTER_SIZE -v $VERBOSITY --image $IMAGE --channel alpha --topology $TOPOLOGY --networking $NETWORKING"
|
||||||
KOPS_BASE_URL=${KOPS_BASE_URL} \
|
|
||||||
kops create cluster \
|
|
||||||
--name $CLUSTER_NAME \
|
|
||||||
--state $KOPS_STATE_STORE \
|
|
||||||
--node-count $NODE_COUNT \
|
|
||||||
--zones $NODE_ZONES \
|
|
||||||
--master-zones $MASTER_ZONES \
|
|
||||||
--cloud aws \
|
|
||||||
--node-size $NODE_SIZE \
|
|
||||||
--master-size $MASTER_SIZE \
|
|
||||||
-v $VERBOSITY \
|
|
||||||
--image $IMAGE \
|
|
||||||
--kubernetes-version "1.5.2" \
|
|
||||||
--topology $TOPOLOGY \
|
|
||||||
--networking $NETWORKING \
|
|
||||||
--bastion="true" \
|
|
||||||
--yes
|
|
||||||
|
|
||||||
|
if [[ $TOPOLOGY == "private" ]]; then
|
||||||
|
kops_command+=" --bastion='true'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "${KOPS_FEATURE_FLAGS+x}" ]; then
|
||||||
|
kops_command=KOPS_FEATURE_FLAGS="${KOPS_FEATURE_FLAGS}" $kops_command
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $KOPS_CREATE == "yes" ]]; then
|
||||||
|
kops_command="$kops_command --yes"
|
||||||
|
fi
|
||||||
|
|
||||||
|
eval $kops_command
|
||||||
|
|
||||||
echo ==========
|
echo ==========
|
||||||
echo "Your k8s cluster ${CLUSTER_NAME}, awaits your bidding."
|
echo "Your k8s cluster ${CLUSTER_NAME}, awaits your bidding."
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@
|
||||||
|
|
||||||
. $(dirname "${BASH_SOURCE}")/common.sh
|
. $(dirname "${BASH_SOURCE}")/common.sh
|
||||||
|
|
||||||
BAD_HEADERS=$(${KUBE_ROOT}/hack/verify-boilerplate.sh | awk '{ print $6}')
|
BAD_HEADERS=$((${KUBE_ROOT}/hack/verify-boilerplate.sh || true) | awk '{ print $6}')
|
||||||
FORMATS="sh go Makefile Dockerfile"
|
FORMATS="sh go Makefile Dockerfile"
|
||||||
|
|
||||||
for i in ${FORMATS}
|
for i in ${FORMATS}
|
||||||
|
|
|
||||||
|
|
@ -34,5 +34,5 @@ cp /go/bin/channels /src/.build/artifacts/
|
||||||
|
|
||||||
# channels uses protokube
|
# channels uses protokube
|
||||||
cd /src/.build/artifacts/
|
cd /src/.build/artifacts/
|
||||||
curl -O https://storage.googleapis.com/kubernetes-release/release/v1.6.0-beta.1/bin/linux/amd64/kubectl
|
curl -O https://storage.googleapis.com/kubernetes-release/release/v1.6.1/bin/linux/amd64/kubectl
|
||||||
chmod +x kubectl
|
chmod +x kubectl
|
||||||
|
|
|
||||||
|
|
@ -37,18 +37,28 @@ func (b *CloudConfigBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
// Add cloud config file if needed
|
// Add cloud config file if needed
|
||||||
var lines []string
|
var lines []string
|
||||||
|
|
||||||
|
cloudProvider := b.Cluster.Spec.CloudProvider
|
||||||
cloudConfig := b.Cluster.Spec.CloudConfig
|
cloudConfig := b.Cluster.Spec.CloudConfig
|
||||||
|
|
||||||
if cloudConfig == nil {
|
if cloudConfig == nil {
|
||||||
cloudConfig = &kops.CloudConfiguration{}
|
cloudConfig = &kops.CloudConfiguration{}
|
||||||
}
|
}
|
||||||
if cloudConfig.NodeTags != nil {
|
|
||||||
lines = append(lines, "node-tags = "+*cloudConfig.NodeTags)
|
switch cloudProvider {
|
||||||
}
|
case "gce":
|
||||||
if cloudConfig.NodeInstancePrefix != nil {
|
if cloudConfig.NodeTags != nil {
|
||||||
lines = append(lines, "node-instance-prefix = "+*cloudConfig.NodeInstancePrefix)
|
lines = append(lines, "node-tags = "+*cloudConfig.NodeTags)
|
||||||
}
|
}
|
||||||
if cloudConfig.Multizone != nil {
|
if cloudConfig.NodeInstancePrefix != nil {
|
||||||
lines = append(lines, fmt.Sprintf("multizone = %t", *cloudConfig.Multizone))
|
lines = append(lines, "node-instance-prefix = "+*cloudConfig.NodeInstancePrefix)
|
||||||
|
}
|
||||||
|
if cloudConfig.Multizone != nil {
|
||||||
|
lines = append(lines, fmt.Sprintf("multizone = %t", *cloudConfig.Multizone))
|
||||||
|
}
|
||||||
|
case "aws":
|
||||||
|
if cloudConfig.DisableSecurityGroupIngress != nil {
|
||||||
|
lines = append(lines, fmt.Sprintf("DisableSecurityGroupIngress = %t", *cloudConfig.DisableSecurityGroupIngress))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config := "[global]\n" + strings.Join(lines, "\n") + "\n"
|
config := "[global]\n" + strings.Join(lines, "\n") + "\n"
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,6 @@ type NodeupModelContext struct {
|
||||||
Distribution distros.Distribution
|
Distribution distros.Distribution
|
||||||
|
|
||||||
IsMaster bool
|
IsMaster bool
|
||||||
UsesCNI bool
|
|
||||||
|
|
||||||
Assets *fi.AssetStore
|
Assets *fi.AssetStore
|
||||||
KeyStore fi.CAStore
|
KeyStore fi.CAStore
|
||||||
|
|
@ -83,7 +82,7 @@ func (c *NodeupModelContext) PathSrvSshproxy() string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *NodeupModelContext) NetworkPluginDir() string {
|
func (c *NodeupModelContext) CNIBinDir() string {
|
||||||
switch c.Distribution {
|
switch c.Distribution {
|
||||||
case distros.DistributionContainerOS:
|
case distros.DistributionContainerOS:
|
||||||
return "/home/kubernetes/bin/"
|
return "/home/kubernetes/bin/"
|
||||||
|
|
@ -92,6 +91,10 @@ func (c *NodeupModelContext) NetworkPluginDir() string {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *NodeupModelContext) CNIConfDir() string {
|
||||||
|
return "/etc/cni/net.d/"
|
||||||
|
}
|
||||||
|
|
||||||
func (c *NodeupModelContext) buildPKIKubeconfig(id string) (string, error) {
|
func (c *NodeupModelContext) buildPKIKubeconfig(id string) (string, error) {
|
||||||
caCertificate, err := c.KeyStore.Cert(fi.CertificateId_CA)
|
caCertificate, err := c.KeyStore.Cert(fi.CertificateId_CA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -171,3 +174,11 @@ func (c *NodeupModelContext) buildPKIKubeconfig(id string) (string, error) {
|
||||||
func (c *NodeupModelContext) IsKubernetesGTE(version string) bool {
|
func (c *NodeupModelContext) IsKubernetesGTE(version string) bool {
|
||||||
return util.IsKubernetesGTE(version, c.KubernetesVersion)
|
return util.IsKubernetesGTE(version, c.KubernetesVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *NodeupModelContext) UsesCNI() bool {
|
||||||
|
networking := c.Cluster.Spec.Networking
|
||||||
|
if networking == nil || networking.Classic != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -141,6 +141,8 @@ var dockerVersions = []dockerVersion{
|
||||||
Hash: "52ec22128e70acc2f76b3a8e87ff96785995116a",
|
Hash: "52ec22128e70acc2f76b3a8e87ff96785995116a",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// 1.12.3 - k8s 1.5
|
||||||
|
|
||||||
// 1.12.3 - Jessie
|
// 1.12.3 - Jessie
|
||||||
{
|
{
|
||||||
DockerVersion: "1.12.3",
|
DockerVersion: "1.12.3",
|
||||||
|
|
@ -199,6 +201,67 @@ var dockerVersions = []dockerVersion{
|
||||||
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-selinux-1.12.3-1.el7.centos.noarch.rpm",
|
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-selinux-1.12.3-1.el7.centos.noarch.rpm",
|
||||||
Hash: "a6b0243af348140236ed96f2e902b259c590eefa",
|
Hash: "a6b0243af348140236ed96f2e902b259c590eefa",
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// 1.12.6 - k8s 1.6
|
||||||
|
|
||||||
|
// 1.12.6 - Jessie
|
||||||
|
{
|
||||||
|
DockerVersion: "1.12.6",
|
||||||
|
Name: "docker-engine",
|
||||||
|
Distros: []distros.Distribution{distros.DistributionJessie},
|
||||||
|
Architectures: []Architecture{ArchitectureAmd64},
|
||||||
|
Version: "1.12.6-0~debian-jessie",
|
||||||
|
Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.12.6-0~debian-jessie_amd64.deb",
|
||||||
|
Hash: "1a8b0c4e3386e12964676a126d284cebf599cc8e",
|
||||||
|
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
|
||||||
|
//Depends: iptables, init-system-helpers (>= 1.18~), libapparmor1 (>= 2.6~devel), libc6 (>= 2.17), libdevmapper1.02.1 (>= 2:1.02.90), libltdl7 (>= 2.4.2), libsystemd0
|
||||||
|
//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils
|
||||||
|
},
|
||||||
|
|
||||||
|
// 1.12.6 - Jessie on ARM
|
||||||
|
{
|
||||||
|
DockerVersion: "1.12.6",
|
||||||
|
Name: "docker-engine",
|
||||||
|
Distros: []distros.Distribution{distros.DistributionJessie},
|
||||||
|
Architectures: []Architecture{ArchitectureArm},
|
||||||
|
Version: "1.12.6-0~debian-jessie",
|
||||||
|
Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.12.6-0~debian-jessie_armhf.deb",
|
||||||
|
Hash: "ac148e1f7381e4201e139584dd3c102372ad96fb",
|
||||||
|
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// 1.12.6 - Xenial
|
||||||
|
{
|
||||||
|
DockerVersion: "1.12.6",
|
||||||
|
Name: "docker-engine",
|
||||||
|
Distros: []distros.Distribution{distros.DistributionXenial},
|
||||||
|
Architectures: []Architecture{ArchitectureAmd64},
|
||||||
|
Version: "1.12.6-0~ubuntu-xenial",
|
||||||
|
Source: "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.12.6-0~ubuntu-xenial_amd64.deb",
|
||||||
|
Hash: "fffc22da4ad5b20715bbb6c485b2d2bb7e84fd33",
|
||||||
|
Dependencies: []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
|
||||||
|
},
|
||||||
|
|
||||||
|
// 1.12.6 - Centos / Rhel7 (two packages)
|
||||||
|
{
|
||||||
|
DockerVersion: "1.12.6",
|
||||||
|
Name: "docker-engine",
|
||||||
|
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||||
|
Architectures: []Architecture{ArchitectureAmd64},
|
||||||
|
Version: "1.12.6",
|
||||||
|
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-1.12.6-1.el7.centos.x86_64.rpm",
|
||||||
|
Hash: "776dbefa9dc7733000e46049293555a9a422c50e",
|
||||||
|
Dependencies: []string{"libtool-ltdl", "libseccomp"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
DockerVersion: "1.12.6",
|
||||||
|
Name: "docker-engine-selinux",
|
||||||
|
Distros: []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
|
||||||
|
Architectures: []Architecture{ArchitectureAmd64},
|
||||||
|
Version: "1.12.6",
|
||||||
|
Source: "https://yum.dockerproject.org/repo/main/centos/7/Packages/docker-engine-selinux-1.12.6-1.el7.centos.noarch.rpm",
|
||||||
|
Hash: "9a6ee0d631ca911b6927450a3c396e9a5be75047",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dockerVersion) matches(arch Architecture, dockerVersion string, distro distros.Distribution) bool {
|
func (d *dockerVersion) matches(arch Architecture, dockerVersion string, distro distros.Distribution) bool {
|
||||||
|
|
|
||||||
|
|
@ -17,14 +17,9 @@ limitations under the License.
|
||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
|
||||||
"k8s.io/kops/nodeup/pkg/distros"
|
|
||||||
"k8s.io/kops/pkg/apis/kops"
|
"k8s.io/kops/pkg/apis/kops"
|
||||||
"k8s.io/kops/pkg/diff"
|
|
||||||
"k8s.io/kops/upup/pkg/fi"
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
"path"
|
"path"
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
// Register our APIs
|
// Register our APIs
|
||||||
|
|
@ -90,64 +85,23 @@ func TestDockerBuilder_BuildFlags(t *testing.T) {
|
||||||
func runDockerBuilderTest(t *testing.T, key string) {
|
func runDockerBuilderTest(t *testing.T, key string) {
|
||||||
basedir := path.Join("tests/dockerbuilder/", key)
|
basedir := path.Join("tests/dockerbuilder/", key)
|
||||||
|
|
||||||
clusterYamlPath := path.Join(basedir, "cluster.yaml")
|
nodeUpModelContext, err := LoadModel(basedir)
|
||||||
clusterYaml, err := ioutil.ReadFile(clusterYamlPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error reading cluster yaml file %q: %v", clusterYamlPath, err)
|
t.Fatalf("error parsing cluster yaml %q: %v", basedir, err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
obj, _, err := kops.ParseVersionedYaml(clusterYaml)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error parsing cluster yaml %q: %v", clusterYamlPath, err)
|
|
||||||
}
|
|
||||||
cluster := obj.(*kops.Cluster)
|
|
||||||
|
|
||||||
context := &fi.ModelBuilderContext{
|
context := &fi.ModelBuilderContext{
|
||||||
Tasks: make(map[string]fi.Task),
|
Tasks: make(map[string]fi.Task),
|
||||||
}
|
}
|
||||||
nodeUpModelContext := &NodeupModelContext{
|
|
||||||
Cluster: cluster,
|
|
||||||
Architecture: "amd64",
|
|
||||||
Distribution: distros.DistributionXenial,
|
|
||||||
}
|
|
||||||
|
|
||||||
builder := DockerBuilder{NodeupModelContext: nodeUpModelContext}
|
builder := DockerBuilder{NodeupModelContext: nodeUpModelContext}
|
||||||
|
|
||||||
err = builder.Build(context)
|
err = builder.Build(context)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error from DockerBuilder Build: %v", err)
|
t.Fatalf("error from DockerBuilder Build: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var keys []string
|
ValidateTasks(t, basedir, context)
|
||||||
for key := range context.Tasks {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
|
|
||||||
var yamls []string
|
|
||||||
for _, key := range keys {
|
|
||||||
task := context.Tasks[key]
|
|
||||||
yaml, err := kops.ToRawYaml(task)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error serializing task: %v", err)
|
|
||||||
}
|
|
||||||
yamls = append(yamls, strings.TrimSpace(string(yaml)))
|
|
||||||
}
|
|
||||||
|
|
||||||
actualTasksYaml := strings.Join(yamls, "\n---\n")
|
|
||||||
|
|
||||||
tasksYamlPath := path.Join(basedir, "tasks.yaml")
|
|
||||||
expectedTasksYamlBytes, err := ioutil.ReadFile(tasksYamlPath)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("error reading file %q: %v", tasksYamlPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
actualTasksYaml = strings.TrimSpace(actualTasksYaml)
|
|
||||||
expectedTasksYaml := strings.TrimSpace(string(expectedTasksYamlBytes))
|
|
||||||
|
|
||||||
if expectedTasksYaml != actualTasksYaml {
|
|
||||||
diffString := diff.FormatDiff(expectedTasksYaml, actualTasksYaml)
|
|
||||||
t.Logf("diff:\n%s\n", diffString)
|
|
||||||
|
|
||||||
t.Fatalf("tasks differed from expected for test %q", key)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -61,6 +61,18 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
c.AddTask(t)
|
c.AddTask(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Touch log file, so that docker doesn't create a directory instead
|
||||||
|
{
|
||||||
|
t := &nodetasks.File{
|
||||||
|
Path: "/var/log/kube-apiserver.log",
|
||||||
|
Contents: fi.NewStringResource(""),
|
||||||
|
Type: nodetasks.FileType_File,
|
||||||
|
Mode: s("0400"),
|
||||||
|
IfNotExists: true,
|
||||||
|
}
|
||||||
|
c.AddTask(t)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -44,30 +44,10 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
return fmt.Errorf("error building kubelet config: %v", err)
|
return fmt.Errorf("error building kubelet config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add sysconfig file
|
|
||||||
{
|
{
|
||||||
// TODO: Dump this - just complexity!
|
t, err := b.buildSystemdEnvironmentFile(kubeletConfig)
|
||||||
flags, err := flagbuilder.BuildFlags(kubeletConfig)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error building kubelet flags: %v", err)
|
return err
|
||||||
}
|
|
||||||
|
|
||||||
// Add cloud config file if needed
|
|
||||||
// We build this flag differently because it depends on CloudConfig, and to expose it directly
|
|
||||||
// would be a degree of freedom we don't have (we'd have to write the config to different files)
|
|
||||||
// We can always add this later if it is needed.
|
|
||||||
if b.Cluster.Spec.CloudConfig != nil {
|
|
||||||
flags += " --cloud-config=" + CloudConfigFilePath
|
|
||||||
}
|
|
||||||
|
|
||||||
flags += " --network-plugin-dir=" + b.NetworkPluginDir()
|
|
||||||
|
|
||||||
sysconfig := "DAEMON_ARGS=\"" + flags + "\"\n"
|
|
||||||
|
|
||||||
t := &nodetasks.File{
|
|
||||||
Path: "/etc/sysconfig/kubelet",
|
|
||||||
Contents: fi.NewStringResource(sysconfig),
|
|
||||||
Type: nodetasks.FileType_File,
|
|
||||||
}
|
}
|
||||||
c.AddTask(t)
|
c.AddTask(t)
|
||||||
}
|
}
|
||||||
|
|
@ -77,6 +57,7 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
// TODO: Extract to common function?
|
// TODO: Extract to common function?
|
||||||
assetName := "kubelet"
|
assetName := "kubelet"
|
||||||
assetPath := ""
|
assetPath := ""
|
||||||
|
// TODO make Find call to an interface, we cannot mock out this function because it finds a file on disk
|
||||||
asset, err := b.Assets.Find(assetName, assetPath)
|
asset, err := b.Assets.Find(assetName, assetPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error trying to locate asset %q: %v", assetName, err)
|
return fmt.Errorf("error trying to locate asset %q: %v", assetName, err)
|
||||||
|
|
@ -111,9 +92,9 @@ func (b *KubeletBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
c.AddTask(t)
|
c.AddTask(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.UsesCNI {
|
if b.UsesCNI() {
|
||||||
t := &nodetasks.File{
|
t := &nodetasks.File{
|
||||||
Path: "/etc/cni/net.d/",
|
Path: b.CNIConfDir(),
|
||||||
Type: nodetasks.FileType_Directory,
|
Type: nodetasks.FileType_Directory,
|
||||||
}
|
}
|
||||||
c.AddTask(t)
|
c.AddTask(t)
|
||||||
|
|
@ -139,6 +120,41 @@ func (b *KubeletBuilder) kubeletPath() string {
|
||||||
return kubeletCommand
|
return kubeletCommand
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *KubeletBuilder) buildSystemdEnvironmentFile(kubeletConfig *kops.KubeletConfigSpec) (*nodetasks.File, error) {
|
||||||
|
// TODO: Dump the separate file for flags - just complexity!
|
||||||
|
flags, err := flagbuilder.BuildFlags(kubeletConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error building kubelet flags: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add cloud config file if needed
|
||||||
|
// We build this flag differently because it depends on CloudConfig, and to expose it directly
|
||||||
|
// would be a degree of freedom we don't have (we'd have to write the config to different files)
|
||||||
|
// We can always add this later if it is needed.
|
||||||
|
if b.Cluster.Spec.CloudConfig != nil {
|
||||||
|
flags += " --cloud-config=" + CloudConfigFilePath
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.UsesCNI() {
|
||||||
|
flags += " --cni-bin-dir=" + b.CNIBinDir()
|
||||||
|
flags += " --cni-conf-dir=" + b.CNIConfDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Cluster.Spec.Networking != nil && b.Cluster.Spec.Networking.Kubenet != nil {
|
||||||
|
// Kubenet is neither CNI nor not-CNI, so we need to pass it `--network-plugin-dir` also
|
||||||
|
flags += " --network-plugin-dir=" + b.CNIBinDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
sysconfig := "DAEMON_ARGS=\"" + flags + "\"\n"
|
||||||
|
|
||||||
|
t := &nodetasks.File{
|
||||||
|
Path: "/etc/sysconfig/kubelet",
|
||||||
|
Contents: fi.NewStringResource(sysconfig),
|
||||||
|
Type: nodetasks.FileType_File,
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
|
func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
|
||||||
kubeletCommand := b.kubeletPath()
|
kubeletCommand := b.kubeletPath()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,9 +17,20 @@ limitations under the License.
|
||||||
package model
|
package model
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kops/pkg/apis/kops"
|
"bytes"
|
||||||
"k8s.io/kops/upup/pkg/fi"
|
"io/ioutil"
|
||||||
|
"path"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"fmt"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/kops/nodeup/pkg/distros"
|
||||||
|
"k8s.io/kops/pkg/apis/kops"
|
||||||
|
"k8s.io/kops/pkg/apis/kops/v1alpha2"
|
||||||
|
"k8s.io/kops/pkg/diff"
|
||||||
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_InstanceGroupKubeletMerge(t *testing.T) {
|
func Test_InstanceGroupKubeletMerge(t *testing.T) {
|
||||||
|
|
@ -142,3 +153,115 @@ func stringSlicesEqual(exp, other []string) bool {
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_RunKubeletBuilder(t *testing.T) {
|
||||||
|
basedir := "tests/kubelet/featuregates"
|
||||||
|
|
||||||
|
context := &fi.ModelBuilderContext{
|
||||||
|
Tasks: make(map[string]fi.Task),
|
||||||
|
}
|
||||||
|
nodeUpModelContext, err := LoadModel(basedir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error loading model %q: %v", basedir, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
builder := KubeletBuilder{NodeupModelContext: nodeUpModelContext}
|
||||||
|
|
||||||
|
kubeletConfig, err := builder.buildKubeletConfig()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error from KubeletBuilder buildKubeletConfig: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fileTask, err := builder.buildSystemdEnvironmentFile(kubeletConfig)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error from KubeletBuilder buildSystemdEnvironmentFile: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
context.AddTask(fileTask)
|
||||||
|
|
||||||
|
ValidateTasks(t, basedir, context)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LoadModel(basedir string) (*NodeupModelContext, error) {
|
||||||
|
clusterYamlPath := path.Join(basedir, "cluster.yaml")
|
||||||
|
clusterYaml, err := ioutil.ReadFile(clusterYamlPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error reading cluster yaml file %q: %v", clusterYamlPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var cluster *kops.Cluster
|
||||||
|
var instanceGroup *kops.InstanceGroup
|
||||||
|
|
||||||
|
// Codecs provides access to encoding and decoding for the scheme
|
||||||
|
codecs := kops.Codecs
|
||||||
|
|
||||||
|
codec := codecs.UniversalDecoder(kops.SchemeGroupVersion)
|
||||||
|
|
||||||
|
sections := bytes.Split(clusterYaml, []byte("\n---\n"))
|
||||||
|
for _, section := range sections {
|
||||||
|
defaults := &schema.GroupVersionKind{
|
||||||
|
Group: v1alpha2.SchemeGroupVersion.Group,
|
||||||
|
Version: v1alpha2.SchemeGroupVersion.Version,
|
||||||
|
}
|
||||||
|
o, gvk, err := codec.Decode(section, defaults, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error parsing file %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := o.(type) {
|
||||||
|
case *kops.Cluster:
|
||||||
|
cluster = v
|
||||||
|
case *kops.InstanceGroup:
|
||||||
|
instanceGroup = v
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unhandled kind %q", gvk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeUpModelContext := &NodeupModelContext{
|
||||||
|
Cluster: cluster,
|
||||||
|
Architecture: "amd64",
|
||||||
|
Distribution: distros.DistributionXenial,
|
||||||
|
InstanceGroup: instanceGroup,
|
||||||
|
}
|
||||||
|
|
||||||
|
return nodeUpModelContext, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ValidateTasks(t *testing.T, basedir string, context *fi.ModelBuilderContext) {
|
||||||
|
var keys []string
|
||||||
|
for key := range context.Tasks {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
var yamls []string
|
||||||
|
for _, key := range keys {
|
||||||
|
task := context.Tasks[key]
|
||||||
|
yaml, err := kops.ToRawYaml(task)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error serializing task: %v", err)
|
||||||
|
}
|
||||||
|
yamls = append(yamls, strings.TrimSpace(string(yaml)))
|
||||||
|
}
|
||||||
|
|
||||||
|
actualTasksYaml := strings.Join(yamls, "\n---\n")
|
||||||
|
|
||||||
|
tasksYamlPath := path.Join(basedir, "tasks.yaml")
|
||||||
|
expectedTasksYamlBytes, err := ioutil.ReadFile(tasksYamlPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error reading file %q: %v", tasksYamlPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
actualTasksYaml = strings.TrimSpace(actualTasksYaml)
|
||||||
|
expectedTasksYaml := strings.TrimSpace(string(expectedTasksYamlBytes))
|
||||||
|
|
||||||
|
if expectedTasksYaml != actualTasksYaml {
|
||||||
|
diffString := diff.FormatDiff(expectedTasksYaml, actualTasksYaml)
|
||||||
|
t.Logf("diff:\n%s\n", diffString)
|
||||||
|
|
||||||
|
t.Fatalf("tasks differed from expected for test %q", basedir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -125,7 +125,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
image := c.Image
|
image := c.Image
|
||||||
cmd := "echo -998 > /proc/$$$/oom_score_adj && kube-proxy --kubeconfig=/var/lib/kube-proxy/kubeconfig --resource-container=\"\" "
|
cmd := "echo -998 > /proc/$$$/oom_score_adj && kube-proxy --kubeconfig=/var/lib/kube-proxy/kubeconfig --conntrack-max-per-core=131072 --resource-container=\"\" "
|
||||||
cmd += flags
|
cmd += flags
|
||||||
|
|
||||||
// cmd += " 1>>/var/log/kube-proxy.log 2>&1"
|
// cmd += " 1>>/var/log/kube-proxy.log 2>&1"
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,7 @@ func (b *NetworkBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, assetName := range assetNames {
|
for _, assetName := range assetNames {
|
||||||
if err := b.addAsset(c, assetName); err != nil {
|
if err := b.addCNIBinAsset(c, assetName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -63,7 +63,7 @@ func (b *NetworkBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *NetworkBuilder) addAsset(c *fi.ModelBuilderContext, assetName string) error {
|
func (b *NetworkBuilder) addCNIBinAsset(c *fi.ModelBuilderContext, assetName string) error {
|
||||||
assetPath := ""
|
assetPath := ""
|
||||||
asset, err := b.Assets.Find(assetName, assetPath)
|
asset, err := b.Assets.Find(assetName, assetPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
@ -74,7 +74,7 @@ func (b *NetworkBuilder) addAsset(c *fi.ModelBuilderContext, assetName string) e
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &nodetasks.File{
|
t := &nodetasks.File{
|
||||||
Path: filepath.Join(b.NetworkPluginDir(), assetName),
|
Path: filepath.Join(b.CNIBinDir(), assetName),
|
||||||
Contents: asset,
|
Contents: asset,
|
||||||
Type: nodetasks.FileType_File,
|
Type: nodetasks.FileType_File,
|
||||||
Mode: s("0755"),
|
Mode: s("0755"),
|
||||||
|
|
|
||||||
|
|
@ -95,10 +95,6 @@ func (b *SysctlBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||||
"# Increase size of file handles and inode cache",
|
"# Increase size of file handles and inode cache",
|
||||||
"fs.file-max = 2097152",
|
"fs.file-max = 2097152",
|
||||||
"",
|
"",
|
||||||
|
|
||||||
"# Increase size of conntrack table size to avoid poor iptables performance",
|
|
||||||
"net.netfilter.nf_conntrack_max = 1000000",
|
|
||||||
"",
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,60 @@
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-10T22:42:27Z"
|
||||||
|
name: minimal.example.com
|
||||||
|
spec:
|
||||||
|
kubernetesApiAccess:
|
||||||
|
- 0.0.0.0/0
|
||||||
|
channel: stable
|
||||||
|
cloudProvider: aws
|
||||||
|
configBase: memfs://clusters.example.com/minimal.example.com
|
||||||
|
etcdClusters:
|
||||||
|
- etcdMembers:
|
||||||
|
- instanceGroup: master-us-test-1a
|
||||||
|
name: master-us-test-1a
|
||||||
|
name: main
|
||||||
|
- etcdMembers:
|
||||||
|
- instanceGroup: master-us-test-1a
|
||||||
|
name: master-us-test-1a
|
||||||
|
name: events
|
||||||
|
kubelet:
|
||||||
|
featureGates:
|
||||||
|
ExperimentalCriticalPodAnnotation: "true"
|
||||||
|
AllowExtTrafficLocalEndpoints: "false"
|
||||||
|
kubernetesVersion: v1.5.0
|
||||||
|
masterInternalName: api.internal.minimal.example.com
|
||||||
|
masterPublicName: api.minimal.example.com
|
||||||
|
networkCIDR: 172.20.0.0/16
|
||||||
|
networking:
|
||||||
|
kubenet: {}
|
||||||
|
nonMasqueradeCIDR: 100.64.0.0/10
|
||||||
|
sshAccess:
|
||||||
|
- 0.0.0.0/0
|
||||||
|
topology:
|
||||||
|
masters: public
|
||||||
|
nodes: public
|
||||||
|
subnets:
|
||||||
|
- cidr: 172.20.32.0/19
|
||||||
|
name: us-test-1a
|
||||||
|
type: Public
|
||||||
|
zone: us-test-1a
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: InstanceGroup
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-10T22:42:28Z"
|
||||||
|
name: nodes
|
||||||
|
labels:
|
||||||
|
kops.k8s.io/cluster: minimal.example.com
|
||||||
|
spec:
|
||||||
|
associatePublicIp: true
|
||||||
|
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||||
|
machineType: t2.medium
|
||||||
|
maxSize: 2
|
||||||
|
minSize: 2
|
||||||
|
role: Node
|
||||||
|
subnets:
|
||||||
|
- us-test-1a
|
||||||
|
|
@ -0,0 +1,4 @@
|
||||||
|
contents: |
|
||||||
|
DAEMON_ARGS="--feature-gates=AllowExtTrafficLocalEndpoints=false,ExperimentalCriticalPodAnnotation=true --node-labels=kubernetes.io/role=node,node-role.kubernetes.io/node= --cni-bin-dir=/opt/cni/bin/ --cni-conf-dir=/etc/cni/net.d/ --network-plugin-dir=/opt/cni/bin/"
|
||||||
|
path: /etc/sysconfig/kubelet
|
||||||
|
type: file
|
||||||
|
|
@ -319,6 +319,9 @@ type KubeletConfigSpec struct {
|
||||||
|
|
||||||
// Taints to add when registering a node in the cluster
|
// Taints to add when registering a node in the cluster
|
||||||
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
|
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
|
||||||
|
|
||||||
|
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||||
|
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type KubeProxyConfig struct {
|
type KubeProxyConfig struct {
|
||||||
|
|
@ -686,4 +689,6 @@ type CloudConfiguration struct {
|
||||||
Multizone *bool `json:"multizone,omitempty"`
|
Multizone *bool `json:"multizone,omitempty"`
|
||||||
NodeTags *string `json:"nodeTags,omitempty"`
|
NodeTags *string `json:"nodeTags,omitempty"`
|
||||||
NodeInstancePrefix *string `json:"nodeInstancePrefix,omitempty"`
|
NodeInstancePrefix *string `json:"nodeInstancePrefix,omitempty"`
|
||||||
|
// AWS cloud-config options
|
||||||
|
DisableSecurityGroupIngress *bool `json:"disableSecurityGroupIngress,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -318,6 +318,9 @@ type KubeletConfigSpec struct {
|
||||||
|
|
||||||
// Taints to add when registering a node in the cluster
|
// Taints to add when registering a node in the cluster
|
||||||
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
|
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
|
||||||
|
|
||||||
|
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||||
|
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type KubeProxyConfig struct {
|
type KubeProxyConfig struct {
|
||||||
|
|
|
||||||
|
|
@ -1365,6 +1365,7 @@ func autoConvert_v1alpha1_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *Kubele
|
||||||
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
||||||
out.VolumePluginDirectory = in.VolumePluginDirectory
|
out.VolumePluginDirectory = in.VolumePluginDirectory
|
||||||
out.Taints = in.Taints
|
out.Taints = in.Taints
|
||||||
|
out.FeatureGates = in.FeatureGates
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1411,6 +1412,7 @@ func autoConvert_kops_KubeletConfigSpec_To_v1alpha1_KubeletConfigSpec(in *kops.K
|
||||||
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
||||||
out.VolumePluginDirectory = in.VolumePluginDirectory
|
out.VolumePluginDirectory = in.VolumePluginDirectory
|
||||||
out.Taints = in.Taints
|
out.Taints = in.Taints
|
||||||
|
out.FeatureGates = in.FeatureGates
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -140,6 +140,9 @@ type KubeletConfigSpec struct {
|
||||||
|
|
||||||
// Taints to add when registering a node in the cluster
|
// Taints to add when registering a node in the cluster
|
||||||
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
|
Taints []string `json:"taints,omitempty" flag:"register-with-taints"`
|
||||||
|
|
||||||
|
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||||
|
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type KubeProxyConfig struct {
|
type KubeProxyConfig struct {
|
||||||
|
|
|
||||||
|
|
@ -1463,6 +1463,7 @@ func autoConvert_v1alpha2_KubeletConfigSpec_To_kops_KubeletConfigSpec(in *Kubele
|
||||||
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
||||||
out.VolumePluginDirectory = in.VolumePluginDirectory
|
out.VolumePluginDirectory = in.VolumePluginDirectory
|
||||||
out.Taints = in.Taints
|
out.Taints = in.Taints
|
||||||
|
out.FeatureGates = in.FeatureGates
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1509,6 +1510,7 @@ func autoConvert_kops_KubeletConfigSpec_To_v1alpha2_KubeletConfigSpec(in *kops.K
|
||||||
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
out.EvictionMinimumReclaim = in.EvictionMinimumReclaim
|
||||||
out.VolumePluginDirectory = in.VolumePluginDirectory
|
out.VolumePluginDirectory = in.VolumePluginDirectory
|
||||||
out.Taints = in.Taints
|
out.Taints = in.Taints
|
||||||
|
out.FeatureGates = in.FeatureGates
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -89,6 +89,7 @@ func BuildFlags(options interface{}) (string, error) {
|
||||||
arg := fmt.Sprintf("%s=%s", k, v)
|
arg := fmt.Sprintf("%s=%s", k, v)
|
||||||
args = append(args, arg)
|
args = append(args, arg)
|
||||||
}
|
}
|
||||||
|
sort.Strings(args)
|
||||||
if len(args) != 0 {
|
if len(args) != 0 {
|
||||||
flag := fmt.Sprintf("--%s=%s", flagName, strings.Join(args, ","))
|
flag := fmt.Sprintf("--%s=%s", flagName, strings.Join(args, ","))
|
||||||
flags = append(flags, flag)
|
flags = append(flags, flag)
|
||||||
|
|
|
||||||
|
|
@ -48,7 +48,9 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerVersion := ""
|
dockerVersion := ""
|
||||||
if sv.Major == 1 && sv.Minor >= 5 {
|
if sv.Major == 1 && sv.Minor >= 6 {
|
||||||
|
dockerVersion = "1.12.6"
|
||||||
|
} else if sv.Major == 1 && sv.Minor >= 5 {
|
||||||
dockerVersion = "1.12.3"
|
dockerVersion = "1.12.3"
|
||||||
} else if sv.Major == 1 && sv.Minor <= 4 {
|
} else if sv.Major == 1 && sv.Minor <= 4 {
|
||||||
dockerVersion = "1.11.2"
|
dockerVersion = "1.11.2"
|
||||||
|
|
|
||||||
|
|
@ -140,7 +140,7 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
|
||||||
kcm.ConfigureCloudRoutes = fi.Bool(false)
|
kcm.ConfigureCloudRoutes = fi.Bool(false)
|
||||||
} else if networking.Kopeio != nil {
|
} else if networking.Kopeio != nil {
|
||||||
// Kopeio is based on kubenet / external
|
// Kopeio is based on kubenet / external
|
||||||
kcm.ConfigureCloudRoutes = fi.Bool(true)
|
kcm.ConfigureCloudRoutes = fi.Bool(false)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("no networking mode set")
|
return fmt.Errorf("no networking mode set")
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -180,7 +180,7 @@ func validateTheNodes(clusterName string, validationCluster *ValidationCluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
validationCluster.NodesReady = true
|
validationCluster.NodesReady = true
|
||||||
if len(validationCluster.NodesNotReadyArray) != 0 || validationCluster.NodesCount != len(validationCluster.NodesReadyArray) {
|
if len(validationCluster.NodesNotReadyArray) != 0 || validationCluster.NodesCount > len(validationCluster.NodesReadyArray) {
|
||||||
validationCluster.NodesReady = false
|
validationCluster.NodesReady = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -196,6 +196,11 @@ func dummyClient(masterReady string, nodeReady string) kubernetes.Interface {
|
||||||
"kubernetes.io/role": "node",
|
"kubernetes.io/role": "node",
|
||||||
NODE_READY: nodeReady,
|
NODE_READY: nodeReady,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"name": "node2",
|
||||||
|
"kubernetes.io/role": "node",
|
||||||
|
NODE_READY: "true",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==
|
||||||
|
|
@ -0,0 +1,102 @@
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-12T04:13:14Z"
|
||||||
|
name: privatedns1.example.com
|
||||||
|
spec:
|
||||||
|
kubernetesApiAccess:
|
||||||
|
- 0.0.0.0/0
|
||||||
|
channel: stable
|
||||||
|
cloudProvider: aws
|
||||||
|
configBase: memfs://clusters.example.com/privatedns1.example.com
|
||||||
|
dnsZone: internal.example.com
|
||||||
|
etcdClusters:
|
||||||
|
- etcdMembers:
|
||||||
|
- instanceGroup: master-us-test-1a
|
||||||
|
name: us-test-1a
|
||||||
|
name: main
|
||||||
|
- etcdMembers:
|
||||||
|
- instanceGroup: master-us-test-1a
|
||||||
|
name: us-test-1a
|
||||||
|
name: events
|
||||||
|
kubernetesVersion: v1.4.6
|
||||||
|
masterInternalName: api.internal.privatedns1.example.com
|
||||||
|
masterPublicName: api.privatedns1.example.com
|
||||||
|
networkCIDR: 172.20.0.0/16
|
||||||
|
networking:
|
||||||
|
weave: {}
|
||||||
|
nonMasqueradeCIDR: 100.64.0.0/10
|
||||||
|
sshAccess:
|
||||||
|
- 0.0.0.0/0
|
||||||
|
topology:
|
||||||
|
dns:
|
||||||
|
type: Private
|
||||||
|
masters: private
|
||||||
|
nodes: private
|
||||||
|
subnets:
|
||||||
|
- cidr: 172.20.32.0/19
|
||||||
|
name: us-test-1a
|
||||||
|
type: Private
|
||||||
|
zone: us-test-1a
|
||||||
|
- cidr: 172.20.4.0/22
|
||||||
|
name: utility-us-test-1a
|
||||||
|
type: Utility
|
||||||
|
zone: us-test-1a
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: InstanceGroup
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-12T04:13:15Z"
|
||||||
|
name: master-us-test-1a
|
||||||
|
labels:
|
||||||
|
kops.k8s.io/cluster: privatedns1.example.com
|
||||||
|
spec:
|
||||||
|
associatePublicIp: true
|
||||||
|
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||||
|
machineType: m3.medium
|
||||||
|
maxSize: 1
|
||||||
|
minSize: 1
|
||||||
|
role: Master
|
||||||
|
subnets:
|
||||||
|
- us-test-1a
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: InstanceGroup
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-12T04:13:15Z"
|
||||||
|
name: nodes
|
||||||
|
labels:
|
||||||
|
kops.k8s.io/cluster: privatedns1.example.com
|
||||||
|
spec:
|
||||||
|
associatePublicIp: true
|
||||||
|
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||||
|
machineType: t2.medium
|
||||||
|
maxSize: 2
|
||||||
|
minSize: 2
|
||||||
|
role: Node
|
||||||
|
subnets:
|
||||||
|
- us-test-1a
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: InstanceGroup
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-14T15:32:41Z"
|
||||||
|
name: bastion
|
||||||
|
labels:
|
||||||
|
kops.k8s.io/cluster: privatedns1.example.com
|
||||||
|
spec:
|
||||||
|
associatePublicIp: true
|
||||||
|
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||||
|
machineType: t2.micro
|
||||||
|
maxSize: 1
|
||||||
|
minSize: 1
|
||||||
|
role: Bastion
|
||||||
|
subnets:
|
||||||
|
- utility-us-test-1a
|
||||||
|
|
@ -0,0 +1,653 @@
|
||||||
|
output "bastion_security_group_ids" {
|
||||||
|
value = ["${aws_security_group.bastion-privatedns1-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "cluster_name" {
|
||||||
|
value = "privatedns1.example.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "master_security_group_ids" {
|
||||||
|
value = ["${aws_security_group.masters-privatedns1-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "node_security_group_ids" {
|
||||||
|
value = ["${aws_security_group.nodes-privatedns1-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "node_subnet_ids" {
|
||||||
|
value = ["${aws_subnet.us-test-1a-privatedns1-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "region" {
|
||||||
|
value = "us-test-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "vpc_id" {
|
||||||
|
value = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_attachment" "bastion-privatedns1-example-com" {
|
||||||
|
elb = "${aws_elb.bastion-privatedns1-example-com.id}"
|
||||||
|
autoscaling_group_name = "${aws_autoscaling_group.bastion-privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_attachment" "master-us-test-1a-masters-privatedns1-example-com" {
|
||||||
|
elb = "${aws_elb.api-privatedns1-example-com.id}"
|
||||||
|
autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1a-masters-privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_group" "bastion-privatedns1-example-com" {
|
||||||
|
name = "bastion.privatedns1.example.com"
|
||||||
|
launch_configuration = "${aws_launch_configuration.bastion-privatedns1-example-com.id}"
|
||||||
|
max_size = 1
|
||||||
|
min_size = 1
|
||||||
|
vpc_zone_identifier = ["${aws_subnet.utility-us-test-1a-privatedns1-example-com.id}"]
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "KubernetesCluster"
|
||||||
|
value = "privatedns1.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "Name"
|
||||||
|
value = "bastion.privatedns1.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "k8s.io/role/bastion"
|
||||||
|
value = "1"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_group" "master-us-test-1a-masters-privatedns1-example-com" {
|
||||||
|
name = "master-us-test-1a.masters.privatedns1.example.com"
|
||||||
|
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-privatedns1-example-com.id}"
|
||||||
|
max_size = 1
|
||||||
|
min_size = 1
|
||||||
|
vpc_zone_identifier = ["${aws_subnet.us-test-1a-privatedns1-example-com.id}"]
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "KubernetesCluster"
|
||||||
|
value = "privatedns1.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "Name"
|
||||||
|
value = "master-us-test-1a.masters.privatedns1.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "k8s.io/role/master"
|
||||||
|
value = "1"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_group" "nodes-privatedns1-example-com" {
|
||||||
|
name = "nodes.privatedns1.example.com"
|
||||||
|
launch_configuration = "${aws_launch_configuration.nodes-privatedns1-example-com.id}"
|
||||||
|
max_size = 2
|
||||||
|
min_size = 2
|
||||||
|
vpc_zone_identifier = ["${aws_subnet.us-test-1a-privatedns1-example-com.id}"]
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "KubernetesCluster"
|
||||||
|
value = "privatedns1.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "Name"
|
||||||
|
value = "nodes.privatedns1.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "k8s.io/role/node"
|
||||||
|
value = "1"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ebs_volume" "us-test-1a-etcd-events-privatedns1-example-com" {
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
size = 20
|
||||||
|
type = "gp2"
|
||||||
|
encrypted = false
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "us-test-1a.etcd-events.privatedns1.example.com"
|
||||||
|
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
|
||||||
|
"k8s.io/role/master" = "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ebs_volume" "us-test-1a-etcd-main-privatedns1-example-com" {
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
size = 20
|
||||||
|
type = "gp2"
|
||||||
|
encrypted = false
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "us-test-1a.etcd-main.privatedns1.example.com"
|
||||||
|
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
|
||||||
|
"k8s.io/role/master" = "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_eip" "us-test-1a-privatedns1-example-com" {
|
||||||
|
vpc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb" "api-privatedns1-example-com" {
|
||||||
|
name = "api-privatedns1-example-c-lq96ht"
|
||||||
|
|
||||||
|
listener = {
|
||||||
|
instance_port = 443
|
||||||
|
instance_protocol = "TCP"
|
||||||
|
lb_port = 443
|
||||||
|
lb_protocol = "TCP"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${aws_security_group.api-elb-privatedns1-example-com.id}"]
|
||||||
|
subnets = ["${aws_subnet.utility-us-test-1a-privatedns1-example-com.id}"]
|
||||||
|
|
||||||
|
health_check = {
|
||||||
|
target = "TCP:443"
|
||||||
|
healthy_threshold = 2
|
||||||
|
unhealthy_threshold = 2
|
||||||
|
interval = 10
|
||||||
|
timeout = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
idle_timeout = 300
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "api.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb" "bastion-privatedns1-example-com" {
|
||||||
|
name = "bastion-privatedns1-examp-mbgbef"
|
||||||
|
|
||||||
|
listener = {
|
||||||
|
instance_port = 22
|
||||||
|
instance_protocol = "TCP"
|
||||||
|
lb_port = 22
|
||||||
|
lb_protocol = "TCP"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${aws_security_group.bastion-elb-privatedns1-example-com.id}"]
|
||||||
|
subnets = ["${aws_subnet.utility-us-test-1a-privatedns1-example-com.id}"]
|
||||||
|
|
||||||
|
health_check = {
|
||||||
|
target = "TCP:22"
|
||||||
|
healthy_threshold = 2
|
||||||
|
unhealthy_threshold = 2
|
||||||
|
interval = 10
|
||||||
|
timeout = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
idle_timeout = 300
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "bastion.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "bastions-privatedns1-example-com" {
|
||||||
|
name = "bastions.privatedns1.example.com"
|
||||||
|
roles = ["${aws_iam_role.bastions-privatedns1-example-com.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "masters-privatedns1-example-com" {
|
||||||
|
name = "masters.privatedns1.example.com"
|
||||||
|
roles = ["${aws_iam_role.masters-privatedns1-example-com.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "nodes-privatedns1-example-com" {
|
||||||
|
name = "nodes.privatedns1.example.com"
|
||||||
|
roles = ["${aws_iam_role.nodes-privatedns1-example-com.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "bastions-privatedns1-example-com" {
|
||||||
|
name = "bastions.privatedns1.example.com"
|
||||||
|
assume_role_policy = "${file("${path.module}/data/aws_iam_role_bastions.privatedns1.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "masters-privatedns1-example-com" {
|
||||||
|
name = "masters.privatedns1.example.com"
|
||||||
|
assume_role_policy = "${file("${path.module}/data/aws_iam_role_masters.privatedns1.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "nodes-privatedns1-example-com" {
|
||||||
|
name = "nodes.privatedns1.example.com"
|
||||||
|
assume_role_policy = "${file("${path.module}/data/aws_iam_role_nodes.privatedns1.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "bastions-privatedns1-example-com" {
|
||||||
|
name = "bastions.privatedns1.example.com"
|
||||||
|
role = "${aws_iam_role.bastions-privatedns1-example-com.name}"
|
||||||
|
policy = "${file("${path.module}/data/aws_iam_role_policy_bastions.privatedns1.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "masters-privatedns1-example-com" {
|
||||||
|
name = "masters.privatedns1.example.com"
|
||||||
|
role = "${aws_iam_role.masters-privatedns1-example-com.name}"
|
||||||
|
policy = "${file("${path.module}/data/aws_iam_role_policy_masters.privatedns1.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "nodes-privatedns1-example-com" {
|
||||||
|
name = "nodes.privatedns1.example.com"
|
||||||
|
role = "${aws_iam_role.nodes-privatedns1-example-com.name}"
|
||||||
|
policy = "${file("${path.module}/data/aws_iam_role_policy_nodes.privatedns1.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_internet_gateway" "privatedns1-example-com" {
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_key_pair" "kubernetes-privatedns1-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
|
||||||
|
key_name = "kubernetes.privatedns1.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
|
||||||
|
public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.privatedns1.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_launch_configuration" "bastion-privatedns1-example-com" {
|
||||||
|
name_prefix = "bastion.privatedns1.example.com-"
|
||||||
|
image_id = "ami-12345678"
|
||||||
|
instance_type = "t2.micro"
|
||||||
|
key_name = "${aws_key_pair.kubernetes-privatedns1-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.bastions-privatedns1-example-com.id}"
|
||||||
|
security_groups = ["${aws_security_group.bastion-privatedns1-example-com.id}"]
|
||||||
|
associate_public_ip_address = true
|
||||||
|
|
||||||
|
root_block_device = {
|
||||||
|
volume_type = "gp2"
|
||||||
|
volume_size = 20
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle = {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_launch_configuration" "master-us-test-1a-masters-privatedns1-example-com" {
|
||||||
|
name_prefix = "master-us-test-1a.masters.privatedns1.example.com-"
|
||||||
|
image_id = "ami-12345678"
|
||||||
|
instance_type = "m3.medium"
|
||||||
|
key_name = "${aws_key_pair.kubernetes-privatedns1-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.masters-privatedns1-example-com.id}"
|
||||||
|
security_groups = ["${aws_security_group.masters-privatedns1-example-com.id}"]
|
||||||
|
associate_public_ip_address = false
|
||||||
|
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.privatedns1.example.com_user_data")}"
|
||||||
|
|
||||||
|
root_block_device = {
|
||||||
|
volume_type = "gp2"
|
||||||
|
volume_size = 20
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
|
||||||
|
ephemeral_block_device = {
|
||||||
|
device_name = "/dev/sdc"
|
||||||
|
virtual_name = "ephemeral0"
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle = {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_launch_configuration" "nodes-privatedns1-example-com" {
|
||||||
|
name_prefix = "nodes.privatedns1.example.com-"
|
||||||
|
image_id = "ami-12345678"
|
||||||
|
instance_type = "t2.medium"
|
||||||
|
key_name = "${aws_key_pair.kubernetes-privatedns1-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.nodes-privatedns1-example-com.id}"
|
||||||
|
security_groups = ["${aws_security_group.nodes-privatedns1-example-com.id}"]
|
||||||
|
associate_public_ip_address = false
|
||||||
|
user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.privatedns1.example.com_user_data")}"
|
||||||
|
|
||||||
|
root_block_device = {
|
||||||
|
volume_type = "gp2"
|
||||||
|
volume_size = 20
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle = {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_nat_gateway" "us-test-1a-privatedns1-example-com" {
|
||||||
|
allocation_id = "${aws_eip.us-test-1a-privatedns1-example-com.id}"
|
||||||
|
subnet_id = "${aws_subnet.utility-us-test-1a-privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route" "0-0-0-0--0" {
|
||||||
|
route_table_id = "${aws_route_table.privatedns1-example-com.id}"
|
||||||
|
destination_cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = "${aws_internet_gateway.privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route" "private-us-test-1a-0-0-0-0--0" {
|
||||||
|
route_table_id = "${aws_route_table.private-us-test-1a-privatedns1-example-com.id}"
|
||||||
|
destination_cidr_block = "0.0.0.0/0"
|
||||||
|
nat_gateway_id = "${aws_nat_gateway.us-test-1a-privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route53_record" "api-privatedns1-example-com" {
|
||||||
|
name = "api.privatedns1.example.com"
|
||||||
|
type = "A"
|
||||||
|
|
||||||
|
alias = {
|
||||||
|
name = "${aws_elb.api-privatedns1-example-com.dns_name}"
|
||||||
|
zone_id = "${aws_elb.api-privatedns1-example-com.zone_id}"
|
||||||
|
evaluate_target_health = false
|
||||||
|
}
|
||||||
|
|
||||||
|
zone_id = "/hostedzone/Z2AFAKE1ZON3NO"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route53_zone_association" "internal-example-com" {
|
||||||
|
zone_id = "/hostedzone/Z2AFAKE1ZON3NO"
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "private-us-test-1a-privatedns1-example-com" {
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "private-us-test-1a.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "privatedns1-example-com" {
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "private-us-test-1a-privatedns1-example-com" {
|
||||||
|
subnet_id = "${aws_subnet.us-test-1a-privatedns1-example-com.id}"
|
||||||
|
route_table_id = "${aws_route_table.private-us-test-1a-privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "utility-us-test-1a-privatedns1-example-com" {
|
||||||
|
subnet_id = "${aws_subnet.utility-us-test-1a-privatedns1-example-com.id}"
|
||||||
|
route_table_id = "${aws_route_table.privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "api-elb-privatedns1-example-com" {
|
||||||
|
name = "api-elb.privatedns1.example.com"
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
description = "Security group for api ELB"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "api-elb.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "bastion-elb-privatedns1-example-com" {
|
||||||
|
name = "bastion-elb.privatedns1.example.com"
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
description = "Security group for bastion ELB"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "bastion-elb.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "bastion-privatedns1-example-com" {
|
||||||
|
name = "bastion.privatedns1.example.com"
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
description = "Security group for bastion"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "bastion.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "masters-privatedns1-example-com" {
|
||||||
|
name = "masters.privatedns1.example.com"
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
description = "Security group for masters"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "masters.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "nodes-privatedns1-example-com" {
|
||||||
|
name = "nodes.privatedns1.example.com"
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
description = "Security group for nodes"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "nodes.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "all-master-to-master" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "all-master-to-node" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "all-node-to-node" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "api-elb-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.api-elb-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-elb-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-elb-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-to-master-ssh" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.bastion-privatedns1-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-to-node-ssh" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.bastion-privatedns1-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.api-elb-privatedns1-example-com.id}"
|
||||||
|
from_port = 443
|
||||||
|
to_port = 443
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "https-elb-to-master" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.api-elb-privatedns1-example-com.id}"
|
||||||
|
from_port = 443
|
||||||
|
to_port = 443
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "master-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
from_port = 1
|
||||||
|
to_port = 4000
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
from_port = 4003
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||||
|
from_port = 1
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "udp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-privatedns1-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.bastion-elb-privatedns1-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "ssh-external-to-bastion-elb-0-0-0-0--0" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-elb-privatedns1-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "us-test-1a-privatedns1-example-com" {
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
cidr_block = "172.20.32.0/19"
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "us-test-1a.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "utility-us-test-1a-privatedns1-example-com" {
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
cidr_block = "172.20.4.0/22"
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "utility-us-test-1a.privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc" "privatedns1-example-com" {
|
||||||
|
cidr_block = "172.20.0.0/16"
|
||||||
|
enable_dns_hostnames = true
|
||||||
|
enable_dns_support = true
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc_dhcp_options" "privatedns1-example-com" {
|
||||||
|
domain_name = "us-test-1.compute.internal"
|
||||||
|
domain_name_servers = ["AmazonProvidedDNS"]
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns1.example.com"
|
||||||
|
Name = "privatedns1.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_vpc_dhcp_options_association" "privatedns1-example-com" {
|
||||||
|
vpc_id = "${aws_vpc.privatedns1-example-com.id}"
|
||||||
|
dhcp_options_id = "${aws_vpc_dhcp_options.privatedns1-example-com.id}"
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1 @@
|
||||||
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==
|
||||||
|
|
@ -0,0 +1,103 @@
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: Cluster
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-12T04:13:14Z"
|
||||||
|
name: privatedns2.example.com
|
||||||
|
spec:
|
||||||
|
kubernetesApiAccess:
|
||||||
|
- 0.0.0.0/0
|
||||||
|
channel: stable
|
||||||
|
cloudProvider: aws
|
||||||
|
configBase: memfs://clusters.example.com/privatedns2.example.com
|
||||||
|
dnsZone: private.example.com
|
||||||
|
etcdClusters:
|
||||||
|
- etcdMembers:
|
||||||
|
- instanceGroup: master-us-test-1a
|
||||||
|
name: us-test-1a
|
||||||
|
name: main
|
||||||
|
- etcdMembers:
|
||||||
|
- instanceGroup: master-us-test-1a
|
||||||
|
name: us-test-1a
|
||||||
|
name: events
|
||||||
|
kubernetesVersion: v1.4.6
|
||||||
|
masterInternalName: api.internal.privatedns2.example.com
|
||||||
|
masterPublicName: api.privatedns2.example.com
|
||||||
|
networkCIDR: 172.20.0.0/16
|
||||||
|
networkID: vpc-123
|
||||||
|
networking:
|
||||||
|
weave: {}
|
||||||
|
nonMasqueradeCIDR: 100.64.0.0/10
|
||||||
|
sshAccess:
|
||||||
|
- 0.0.0.0/0
|
||||||
|
topology:
|
||||||
|
dns:
|
||||||
|
type: Private
|
||||||
|
masters: private
|
||||||
|
nodes: private
|
||||||
|
subnets:
|
||||||
|
- cidr: 172.20.32.0/19
|
||||||
|
name: us-test-1a
|
||||||
|
type: Private
|
||||||
|
zone: us-test-1a
|
||||||
|
- cidr: 172.20.4.0/22
|
||||||
|
name: utility-us-test-1a
|
||||||
|
type: Utility
|
||||||
|
zone: us-test-1a
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: InstanceGroup
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-12T04:13:15Z"
|
||||||
|
name: master-us-test-1a
|
||||||
|
labels:
|
||||||
|
kops.k8s.io/cluster: privatedns2.example.com
|
||||||
|
spec:
|
||||||
|
associatePublicIp: true
|
||||||
|
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||||
|
machineType: m3.medium
|
||||||
|
maxSize: 1
|
||||||
|
minSize: 1
|
||||||
|
role: Master
|
||||||
|
subnets:
|
||||||
|
- us-test-1a
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: InstanceGroup
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-12T04:13:15Z"
|
||||||
|
name: nodes
|
||||||
|
labels:
|
||||||
|
kops.k8s.io/cluster: privatedns2.example.com
|
||||||
|
spec:
|
||||||
|
associatePublicIp: true
|
||||||
|
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||||
|
machineType: t2.medium
|
||||||
|
maxSize: 2
|
||||||
|
minSize: 2
|
||||||
|
role: Node
|
||||||
|
subnets:
|
||||||
|
- us-test-1a
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: kops/v1alpha2
|
||||||
|
kind: InstanceGroup
|
||||||
|
metadata:
|
||||||
|
creationTimestamp: "2016-12-14T15:32:41Z"
|
||||||
|
name: bastion
|
||||||
|
labels:
|
||||||
|
kops.k8s.io/cluster: privatedns2.example.com
|
||||||
|
spec:
|
||||||
|
associatePublicIp: true
|
||||||
|
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||||
|
machineType: t2.micro
|
||||||
|
maxSize: 1
|
||||||
|
minSize: 1
|
||||||
|
role: Bastion
|
||||||
|
subnets:
|
||||||
|
- utility-us-test-1a
|
||||||
|
|
@ -0,0 +1,613 @@
|
||||||
|
output "bastion_security_group_ids" {
|
||||||
|
value = ["${aws_security_group.bastion-privatedns2-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "cluster_name" {
|
||||||
|
value = "privatedns2.example.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "master_security_group_ids" {
|
||||||
|
value = ["${aws_security_group.masters-privatedns2-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "node_security_group_ids" {
|
||||||
|
value = ["${aws_security_group.nodes-privatedns2-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "node_subnet_ids" {
|
||||||
|
value = ["${aws_subnet.us-test-1a-privatedns2-example-com.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "region" {
|
||||||
|
value = "us-test-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "vpc_id" {
|
||||||
|
value = "vpc-123"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_attachment" "bastion-privatedns2-example-com" {
|
||||||
|
elb = "${aws_elb.bastion-privatedns2-example-com.id}"
|
||||||
|
autoscaling_group_name = "${aws_autoscaling_group.bastion-privatedns2-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_attachment" "master-us-test-1a-masters-privatedns2-example-com" {
|
||||||
|
elb = "${aws_elb.api-privatedns2-example-com.id}"
|
||||||
|
autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1a-masters-privatedns2-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_group" "bastion-privatedns2-example-com" {
|
||||||
|
name = "bastion.privatedns2.example.com"
|
||||||
|
launch_configuration = "${aws_launch_configuration.bastion-privatedns2-example-com.id}"
|
||||||
|
max_size = 1
|
||||||
|
min_size = 1
|
||||||
|
vpc_zone_identifier = ["${aws_subnet.utility-us-test-1a-privatedns2-example-com.id}"]
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "KubernetesCluster"
|
||||||
|
value = "privatedns2.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "Name"
|
||||||
|
value = "bastion.privatedns2.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "k8s.io/role/bastion"
|
||||||
|
value = "1"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_group" "master-us-test-1a-masters-privatedns2-example-com" {
|
||||||
|
name = "master-us-test-1a.masters.privatedns2.example.com"
|
||||||
|
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-privatedns2-example-com.id}"
|
||||||
|
max_size = 1
|
||||||
|
min_size = 1
|
||||||
|
vpc_zone_identifier = ["${aws_subnet.us-test-1a-privatedns2-example-com.id}"]
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "KubernetesCluster"
|
||||||
|
value = "privatedns2.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "Name"
|
||||||
|
value = "master-us-test-1a.masters.privatedns2.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "k8s.io/role/master"
|
||||||
|
value = "1"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_autoscaling_group" "nodes-privatedns2-example-com" {
|
||||||
|
name = "nodes.privatedns2.example.com"
|
||||||
|
launch_configuration = "${aws_launch_configuration.nodes-privatedns2-example-com.id}"
|
||||||
|
max_size = 2
|
||||||
|
min_size = 2
|
||||||
|
vpc_zone_identifier = ["${aws_subnet.us-test-1a-privatedns2-example-com.id}"]
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "KubernetesCluster"
|
||||||
|
value = "privatedns2.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "Name"
|
||||||
|
value = "nodes.privatedns2.example.com"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
|
||||||
|
tag = {
|
||||||
|
key = "k8s.io/role/node"
|
||||||
|
value = "1"
|
||||||
|
propagate_at_launch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ebs_volume" "us-test-1a-etcd-events-privatedns2-example-com" {
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
size = 20
|
||||||
|
type = "gp2"
|
||||||
|
encrypted = false
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "us-test-1a.etcd-events.privatedns2.example.com"
|
||||||
|
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
|
||||||
|
"k8s.io/role/master" = "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_ebs_volume" "us-test-1a-etcd-main-privatedns2-example-com" {
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
size = 20
|
||||||
|
type = "gp2"
|
||||||
|
encrypted = false
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "us-test-1a.etcd-main.privatedns2.example.com"
|
||||||
|
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
|
||||||
|
"k8s.io/role/master" = "1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_eip" "us-test-1a-privatedns2-example-com" {
|
||||||
|
vpc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb" "api-privatedns2-example-com" {
|
||||||
|
name = "api-privatedns2-example-c-6jft30"
|
||||||
|
|
||||||
|
listener = {
|
||||||
|
instance_port = 443
|
||||||
|
instance_protocol = "TCP"
|
||||||
|
lb_port = 443
|
||||||
|
lb_protocol = "TCP"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${aws_security_group.api-elb-privatedns2-example-com.id}"]
|
||||||
|
subnets = ["${aws_subnet.utility-us-test-1a-privatedns2-example-com.id}"]
|
||||||
|
|
||||||
|
health_check = {
|
||||||
|
target = "TCP:443"
|
||||||
|
healthy_threshold = 2
|
||||||
|
unhealthy_threshold = 2
|
||||||
|
interval = 10
|
||||||
|
timeout = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
idle_timeout = 300
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "api.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb" "bastion-privatedns2-example-com" {
|
||||||
|
name = "bastion-privatedns2-examp-e704o2"
|
||||||
|
|
||||||
|
listener = {
|
||||||
|
instance_port = 22
|
||||||
|
instance_protocol = "TCP"
|
||||||
|
lb_port = 22
|
||||||
|
lb_protocol = "TCP"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${aws_security_group.bastion-elb-privatedns2-example-com.id}"]
|
||||||
|
subnets = ["${aws_subnet.utility-us-test-1a-privatedns2-example-com.id}"]
|
||||||
|
|
||||||
|
health_check = {
|
||||||
|
target = "TCP:22"
|
||||||
|
healthy_threshold = 2
|
||||||
|
unhealthy_threshold = 2
|
||||||
|
interval = 10
|
||||||
|
timeout = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
idle_timeout = 300
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "bastion.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "bastions-privatedns2-example-com" {
|
||||||
|
name = "bastions.privatedns2.example.com"
|
||||||
|
roles = ["${aws_iam_role.bastions-privatedns2-example-com.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "masters-privatedns2-example-com" {
|
||||||
|
name = "masters.privatedns2.example.com"
|
||||||
|
roles = ["${aws_iam_role.masters-privatedns2-example-com.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "nodes-privatedns2-example-com" {
|
||||||
|
name = "nodes.privatedns2.example.com"
|
||||||
|
roles = ["${aws_iam_role.nodes-privatedns2-example-com.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "bastions-privatedns2-example-com" {
|
||||||
|
name = "bastions.privatedns2.example.com"
|
||||||
|
assume_role_policy = "${file("${path.module}/data/aws_iam_role_bastions.privatedns2.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "masters-privatedns2-example-com" {
|
||||||
|
name = "masters.privatedns2.example.com"
|
||||||
|
assume_role_policy = "${file("${path.module}/data/aws_iam_role_masters.privatedns2.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "nodes-privatedns2-example-com" {
|
||||||
|
name = "nodes.privatedns2.example.com"
|
||||||
|
assume_role_policy = "${file("${path.module}/data/aws_iam_role_nodes.privatedns2.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "bastions-privatedns2-example-com" {
|
||||||
|
name = "bastions.privatedns2.example.com"
|
||||||
|
role = "${aws_iam_role.bastions-privatedns2-example-com.name}"
|
||||||
|
policy = "${file("${path.module}/data/aws_iam_role_policy_bastions.privatedns2.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "masters-privatedns2-example-com" {
|
||||||
|
name = "masters.privatedns2.example.com"
|
||||||
|
role = "${aws_iam_role.masters-privatedns2-example-com.name}"
|
||||||
|
policy = "${file("${path.module}/data/aws_iam_role_policy_masters.privatedns2.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "nodes-privatedns2-example-com" {
|
||||||
|
name = "nodes.privatedns2.example.com"
|
||||||
|
role = "${aws_iam_role.nodes-privatedns2-example-com.name}"
|
||||||
|
policy = "${file("${path.module}/data/aws_iam_role_policy_nodes.privatedns2.example.com_policy")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_key_pair" "kubernetes-privatedns2-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
|
||||||
|
key_name = "kubernetes.privatedns2.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
|
||||||
|
public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.privatedns2.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_launch_configuration" "bastion-privatedns2-example-com" {
|
||||||
|
name_prefix = "bastion.privatedns2.example.com-"
|
||||||
|
image_id = "ami-12345678"
|
||||||
|
instance_type = "t2.micro"
|
||||||
|
key_name = "${aws_key_pair.kubernetes-privatedns2-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.bastions-privatedns2-example-com.id}"
|
||||||
|
security_groups = ["${aws_security_group.bastion-privatedns2-example-com.id}"]
|
||||||
|
associate_public_ip_address = true
|
||||||
|
|
||||||
|
root_block_device = {
|
||||||
|
volume_type = "gp2"
|
||||||
|
volume_size = 20
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle = {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_launch_configuration" "master-us-test-1a-masters-privatedns2-example-com" {
|
||||||
|
name_prefix = "master-us-test-1a.masters.privatedns2.example.com-"
|
||||||
|
image_id = "ami-12345678"
|
||||||
|
instance_type = "m3.medium"
|
||||||
|
key_name = "${aws_key_pair.kubernetes-privatedns2-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.masters-privatedns2-example-com.id}"
|
||||||
|
security_groups = ["${aws_security_group.masters-privatedns2-example-com.id}"]
|
||||||
|
associate_public_ip_address = false
|
||||||
|
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.privatedns2.example.com_user_data")}"
|
||||||
|
|
||||||
|
root_block_device = {
|
||||||
|
volume_type = "gp2"
|
||||||
|
volume_size = 20
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
|
||||||
|
ephemeral_block_device = {
|
||||||
|
device_name = "/dev/sdc"
|
||||||
|
virtual_name = "ephemeral0"
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle = {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_launch_configuration" "nodes-privatedns2-example-com" {
|
||||||
|
name_prefix = "nodes.privatedns2.example.com-"
|
||||||
|
image_id = "ami-12345678"
|
||||||
|
instance_type = "t2.medium"
|
||||||
|
key_name = "${aws_key_pair.kubernetes-privatedns2-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
|
||||||
|
iam_instance_profile = "${aws_iam_instance_profile.nodes-privatedns2-example-com.id}"
|
||||||
|
security_groups = ["${aws_security_group.nodes-privatedns2-example-com.id}"]
|
||||||
|
associate_public_ip_address = false
|
||||||
|
user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.privatedns2.example.com_user_data")}"
|
||||||
|
|
||||||
|
root_block_device = {
|
||||||
|
volume_type = "gp2"
|
||||||
|
volume_size = 20
|
||||||
|
delete_on_termination = true
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle = {
|
||||||
|
create_before_destroy = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_nat_gateway" "us-test-1a-privatedns2-example-com" {
|
||||||
|
allocation_id = "${aws_eip.us-test-1a-privatedns2-example-com.id}"
|
||||||
|
subnet_id = "${aws_subnet.utility-us-test-1a-privatedns2-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route" "0-0-0-0--0" {
|
||||||
|
route_table_id = "${aws_route_table.privatedns2-example-com.id}"
|
||||||
|
destination_cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = "fake-ig"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route" "private-us-test-1a-0-0-0-0--0" {
|
||||||
|
route_table_id = "${aws_route_table.private-us-test-1a-privatedns2-example-com.id}"
|
||||||
|
destination_cidr_block = "0.0.0.0/0"
|
||||||
|
nat_gateway_id = "${aws_nat_gateway.us-test-1a-privatedns2-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route53_record" "api-privatedns2-example-com" {
|
||||||
|
name = "api.privatedns2.example.com"
|
||||||
|
type = "A"
|
||||||
|
|
||||||
|
alias = {
|
||||||
|
name = "${aws_elb.api-privatedns2-example-com.dns_name}"
|
||||||
|
zone_id = "${aws_elb.api-privatedns2-example-com.zone_id}"
|
||||||
|
evaluate_target_health = false
|
||||||
|
}
|
||||||
|
|
||||||
|
zone_id = "/hostedzone/Z3AFAKE1ZOMORE"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "private-us-test-1a-privatedns2-example-com" {
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "private-us-test-1a.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "privatedns2-example-com" {
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "private-us-test-1a-privatedns2-example-com" {
|
||||||
|
subnet_id = "${aws_subnet.us-test-1a-privatedns2-example-com.id}"
|
||||||
|
route_table_id = "${aws_route_table.private-us-test-1a-privatedns2-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "utility-us-test-1a-privatedns2-example-com" {
|
||||||
|
subnet_id = "${aws_subnet.utility-us-test-1a-privatedns2-example-com.id}"
|
||||||
|
route_table_id = "${aws_route_table.privatedns2-example-com.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "api-elb-privatedns2-example-com" {
|
||||||
|
name = "api-elb.privatedns2.example.com"
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
description = "Security group for api ELB"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "api-elb.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "bastion-elb-privatedns2-example-com" {
|
||||||
|
name = "bastion-elb.privatedns2.example.com"
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
description = "Security group for bastion ELB"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "bastion-elb.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "bastion-privatedns2-example-com" {
|
||||||
|
name = "bastion.privatedns2.example.com"
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
description = "Security group for bastion"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "bastion.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "masters-privatedns2-example-com" {
|
||||||
|
name = "masters.privatedns2.example.com"
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
description = "Security group for masters"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "masters.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group" "nodes-privatedns2-example-com" {
|
||||||
|
name = "nodes.privatedns2.example.com"
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
description = "Security group for nodes"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "nodes.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "all-master-to-master" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "all-master-to-node" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "all-node-to-node" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "api-elb-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.api-elb-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-elb-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-elb-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-to-master-ssh" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.bastion-privatedns2-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "bastion-to-node-ssh" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.bastion-privatedns2-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.api-elb-privatedns2-example-com.id}"
|
||||||
|
from_port = 443
|
||||||
|
to_port = 443
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "https-elb-to-master" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.api-elb-privatedns2-example-com.id}"
|
||||||
|
from_port = 443
|
||||||
|
to_port = 443
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "master-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-egress" {
|
||||||
|
type = "egress"
|
||||||
|
security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 0
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
from_port = 1
|
||||||
|
to_port = 4000
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-to-master-tcp-4003-65535" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
from_port = 4003
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "node-to-master-udp-1-65535" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||||
|
from_port = 1
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "udp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-privatedns2-example-com.id}"
|
||||||
|
source_security_group_id = "${aws_security_group.bastion-elb-privatedns2-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "ssh-external-to-bastion-elb-0-0-0-0--0" {
|
||||||
|
type = "ingress"
|
||||||
|
security_group_id = "${aws_security_group.bastion-elb-privatedns2-example-com.id}"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "tcp"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "us-test-1a-privatedns2-example-com" {
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
cidr_block = "172.20.32.0/19"
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "us-test-1a.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "utility-us-test-1a-privatedns2-example-com" {
|
||||||
|
vpc_id = "vpc-123"
|
||||||
|
cidr_block = "172.20.4.0/22"
|
||||||
|
availability_zone = "us-test-1a"
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
KubernetesCluster = "privatedns2.example.com"
|
||||||
|
Name = "utility-us-test-1a.privatedns2.example.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -20,6 +20,8 @@ spec:
|
||||||
version: v1.6.0
|
version: v1.6.0
|
||||||
annotations:
|
annotations:
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
# For 1.6, we keep the old tolerations in case of a downgrade to 1.5
|
||||||
|
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]'
|
||||||
spec:
|
spec:
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: "node-role.kubernetes.io/master"
|
- key: "node-role.kubernetes.io/master"
|
||||||
|
|
@ -72,6 +74,14 @@ rules:
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
- watch
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- "extensions"
|
||||||
|
resources:
|
||||||
|
- ingresses
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ spec:
|
||||||
k8s-app: kube-dns-autoscaler
|
k8s-app: kube-dns-autoscaler
|
||||||
annotations:
|
annotations:
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
# For 1.6, we keep the old tolerations in case of a downgrade to 1.5
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: kube-dns-autoscaler
|
serviceAccountName: kube-dns-autoscaler
|
||||||
|
|
@ -79,6 +80,7 @@ spec:
|
||||||
k8s-app: kube-dns
|
k8s-app: kube-dns
|
||||||
annotations:
|
annotations:
|
||||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
# For 1.6, we keep the old tolerations in case of a downgrade to 1.5
|
||||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
|
||||||
spec:
|
spec:
|
||||||
dnsPolicy: Default # Don't use cluster DNS.
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,109 @@
|
||||||
|
kind: ServiceAccount
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: flannel
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
---
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: kube-flannel-cfg
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: flannel
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
data:
|
||||||
|
cni-conf.json: |
|
||||||
|
{
|
||||||
|
"name": "cbr0",
|
||||||
|
"type": "flannel",
|
||||||
|
"delegate": {
|
||||||
|
"forceAddress": true,
|
||||||
|
"isDefaultGateway": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
net-conf.json: |
|
||||||
|
{
|
||||||
|
"Network": "100.64.0.0/10",
|
||||||
|
"Backend": {
|
||||||
|
"Type": "udp"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
---
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: kube-flannel-ds
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: flannel
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
tier: node
|
||||||
|
app: flannel
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
nodeSelector:
|
||||||
|
beta.kubernetes.io/arch: amd64
|
||||||
|
serviceAccountName: flannel
|
||||||
|
tolerations:
|
||||||
|
- effect: NoSchedule
|
||||||
|
key: node-role.kubernetes.io/master
|
||||||
|
containers:
|
||||||
|
- name: kube-flannel
|
||||||
|
image: quay.io/coreos/flannel:v0.7.0
|
||||||
|
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
env:
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 100Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 100Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: run
|
||||||
|
mountPath: /run
|
||||||
|
- name: flannel-cfg
|
||||||
|
mountPath: /etc/kube-flannel/
|
||||||
|
- name: install-cni
|
||||||
|
image: quay.io/coreos/flannel:v0.7.0
|
||||||
|
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 25Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 25Mi
|
||||||
|
volumeMounts:
|
||||||
|
- name: cni
|
||||||
|
mountPath: /etc/cni/net.d
|
||||||
|
- name: flannel-cfg
|
||||||
|
mountPath: /etc/kube-flannel/
|
||||||
|
volumes:
|
||||||
|
- name: run
|
||||||
|
hostPath:
|
||||||
|
path: /run
|
||||||
|
- name: cni
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
- name: flannel-cfg
|
||||||
|
configMap:
|
||||||
|
name: kube-flannel-cfg
|
||||||
|
|
@ -0,0 +1,89 @@
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: DaemonSet
|
||||||
|
metadata:
|
||||||
|
name: kopeio-networking-agent
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-addon: networking.kope.io
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: kopeio-networking-agent
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
hostPID: true
|
||||||
|
hostIPC: true
|
||||||
|
hostNetwork: true
|
||||||
|
containers:
|
||||||
|
- resources:
|
||||||
|
requests:
|
||||||
|
cpu: 20m
|
||||||
|
memory: 100Mi
|
||||||
|
limits:
|
||||||
|
cpu: 20m
|
||||||
|
memory: 100Mi
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
image: kopeio/networking-agent:1.0.20170406
|
||||||
|
name: networking-agent
|
||||||
|
volumeMounts:
|
||||||
|
- name: lib-modules
|
||||||
|
mountPath: /lib/modules
|
||||||
|
readOnly: true
|
||||||
|
serviceAccountName: kopeio-networking-agent
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
volumes:
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: kopeio-networking-agent
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-addon: networking.kope.io
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-addon: networking.kope.io
|
||||||
|
name: kopeio:networking-agent
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- patch
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-addon: networking.kope.io
|
||||||
|
name: kopeio:networking-agent
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: kopeio:networking-agent
|
||||||
|
subjects:
|
||||||
|
- apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: User
|
||||||
|
name: system:serviceaccount:kube-system:kopeio-networking-agent
|
||||||
|
|
@ -4,7 +4,7 @@ metadata:
|
||||||
name: kopeio-networking-agent
|
name: kopeio-networking-agent
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-addon: networking.addons.k8s.io
|
k8s-addon: networking.kope.io
|
||||||
role.kubernetes.io/networking: "1"
|
role.kubernetes.io/networking: "1"
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
|
|
@ -20,9 +20,13 @@ spec:
|
||||||
- resources:
|
- resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 20m
|
cpu: 20m
|
||||||
|
memory: 100Mi
|
||||||
|
limits:
|
||||||
|
cpu: 20m
|
||||||
|
memory: 100Mi
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
image: kopeio/networking-agent:1.0.20161116
|
image: kopeio/networking-agent:1.0.20170406
|
||||||
name: networking-agent
|
name: networking-agent
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name: lib-modules
|
- name: lib-modules
|
||||||
|
|
@ -0,0 +1,201 @@
|
||||||
|
# This ConfigMap can be used to configure a self-hosted Canal installation.
|
||||||
|
# See `canal.yaml` for an example of a Canal deployment which uses
|
||||||
|
# the config in this ConfigMap.
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: canal-config
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# The interface used by canal for host <-> host communication.
|
||||||
|
# If left blank, then the interface is chosing using the node's
|
||||||
|
# default route.
|
||||||
|
canal_iface: ""
|
||||||
|
|
||||||
|
# Whether or not to masquerade traffic to destinations not within
|
||||||
|
# the pod network.
|
||||||
|
masquerade: "true"
|
||||||
|
|
||||||
|
# The CNI network configuration to install on each node.
|
||||||
|
cni_network_config: |-
|
||||||
|
{
|
||||||
|
"name": "k8s-pod-network",
|
||||||
|
"type": "calico",
|
||||||
|
"log_level": "info",
|
||||||
|
"datastore_type": "kubernetes",
|
||||||
|
"hostname": "__KUBERNETES_NODE_NAME__",
|
||||||
|
"ipam": {
|
||||||
|
"type": "host-local",
|
||||||
|
"subnet": "usePodCidr"
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"type": "k8s",
|
||||||
|
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||||
|
},
|
||||||
|
"kubernetes": {
|
||||||
|
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||||
|
"kubeconfig": "__KUBECONFIG_FILEPATH__",
|
||||||
|
"node_name": "__KUBERNETES_NODE_NAME__"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Flannel network configuration.
|
||||||
|
net-conf.json: |
|
||||||
|
{
|
||||||
|
"Network": "{{ .NonMasqueradeCIDR }}",
|
||||||
|
"Backend": {
|
||||||
|
"Type": "vxlan"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This manifest installs the calico/node container, as well
|
||||||
|
# as the Calico CNI plugins and network config on
|
||||||
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: canal
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: canal
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: canal
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: canal
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
containers:
|
||||||
|
# Runs calico/node container on each Kubernetes node. This
|
||||||
|
# container programs network policy and routes on each
|
||||||
|
# host.
|
||||||
|
- name: calico-node
|
||||||
|
image: calico/node:v1.0.2
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
env:
|
||||||
|
# Use Kubernetes API as the backing datastore.
|
||||||
|
- name: DATASTORE_TYPE
|
||||||
|
value: "kubernetes"
|
||||||
|
# Enable felix logging.
|
||||||
|
- name: FELIX_LOGSEVERITYSYS
|
||||||
|
value: "info"
|
||||||
|
# Don't enable BGP.
|
||||||
|
- name: CALICO_NETWORKING_BACKEND
|
||||||
|
value: "none"
|
||||||
|
# Disable file logging so `kubectl logs` works.
|
||||||
|
- name: CALICO_DISABLE_FILE_LOGGING
|
||||||
|
value: "true"
|
||||||
|
# No IP address needed.
|
||||||
|
- name: IP
|
||||||
|
value: ""
|
||||||
|
# Set the hostname based on the k8s node name.
|
||||||
|
- name: HOSTNAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /var/run/calico
|
||||||
|
name: var-run-calico
|
||||||
|
readOnly: false
|
||||||
|
# This container installs the Calico CNI binaries
|
||||||
|
# and CNI network config file on each node.
|
||||||
|
- name: install-cni
|
||||||
|
image: calico/cni:v1.5.6
|
||||||
|
command: ["/install-cni.sh"]
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
env:
|
||||||
|
# The CNI network config to install on each node.
|
||||||
|
- name: CNI_NETWORK_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: canal-config
|
||||||
|
key: cni_network_config
|
||||||
|
# Set the hostname based on the k8s node name.
|
||||||
|
- name: KUBERNETES_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /host/opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
- mountPath: /host/etc/cni/net.d
|
||||||
|
name: cni-net-dir
|
||||||
|
# This container runs flannel using the kube-subnet-mgr backend
|
||||||
|
# for allocating subnets.
|
||||||
|
- name: kube-flannel
|
||||||
|
image: quay.io/coreos/flannel:v0.7.0
|
||||||
|
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
env:
|
||||||
|
- name: POD_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.name
|
||||||
|
- name: POD_NAMESPACE
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: metadata.namespace
|
||||||
|
- name: FLANNELD_IFACE
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: canal-config
|
||||||
|
key: canal_iface
|
||||||
|
- name: FLANNELD_IP_MASQ
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: canal-config
|
||||||
|
key: masquerade
|
||||||
|
volumeMounts:
|
||||||
|
- name: run
|
||||||
|
mountPath: /run
|
||||||
|
- name: flannel-cfg
|
||||||
|
mountPath: /etc/kube-flannel/
|
||||||
|
volumes:
|
||||||
|
# Used by calico/node.
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/run/calico
|
||||||
|
# Used to install CNI.
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
- name: cni-net-dir
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
# Used by flannel.
|
||||||
|
- name: run
|
||||||
|
hostPath:
|
||||||
|
path: /run
|
||||||
|
- name: flannel-cfg
|
||||||
|
configMap:
|
||||||
|
name: canal-config
|
||||||
|
|
@ -61,6 +61,7 @@ metadata:
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
k8s-app: canal
|
k8s-app: canal
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
|
|
@ -0,0 +1,327 @@
|
||||||
|
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||||
|
kind: ConfigMap
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: calico-config
|
||||||
|
namespace: kube-system
|
||||||
|
data:
|
||||||
|
# The calico-etcd PetSet service IP:port
|
||||||
|
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
|
||||||
|
{{- range $j, $member := $cluster.Members -}}
|
||||||
|
{{- if $j }},{{ end -}}
|
||||||
|
http://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001
|
||||||
|
{{- end }}"
|
||||||
|
|
||||||
|
# True enables BGP networking, false tells Calico to enforce
|
||||||
|
# policy only, using native networking.
|
||||||
|
enable_bgp: "true"
|
||||||
|
|
||||||
|
# The CNI network configuration to install on each node.
|
||||||
|
cni_network_config: |-
|
||||||
|
{
|
||||||
|
"name": "k8s-pod-network",
|
||||||
|
"type": "calico",
|
||||||
|
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||||
|
"log_level": "info",
|
||||||
|
"ipam": {
|
||||||
|
"type": "calico-ipam"
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"type": "k8s",
|
||||||
|
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||||
|
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||||
|
},
|
||||||
|
"kubernetes": {
|
||||||
|
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# The default IP Pool to be created for the cluster.
|
||||||
|
# Pod IP addresses will be assigned from this pool.
|
||||||
|
ippool.yaml: |
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ipPool
|
||||||
|
metadata:
|
||||||
|
cidr: {{ .NonMasqueradeCIDR }}
|
||||||
|
spec:
|
||||||
|
ipip:
|
||||||
|
enabled: true
|
||||||
|
nat-outgoing: true
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This manifest installs the calico/node container, as well
|
||||||
|
# as the Calico CNI plugins and network config on
|
||||||
|
# each master and worker node in a Kubernetes cluster.
|
||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico-node
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-node
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: calico
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
containers:
|
||||||
|
# Runs calico/node container on each Kubernetes node. This
|
||||||
|
# container programs network policy and routes on each
|
||||||
|
# host.
|
||||||
|
- name: calico-node
|
||||||
|
image: calico/node:v1.1.1
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# Enable BGP. Disable to enforce policy only.
|
||||||
|
- name: CALICO_NETWORKING
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: enable_bgp
|
||||||
|
# Disable file logging so `kubectl logs` works.
|
||||||
|
- name: CALICO_DISABLE_FILE_LOGGING
|
||||||
|
value: "true"
|
||||||
|
# Don't configure a default pool. This is done by the Job
|
||||||
|
# below.
|
||||||
|
- name: NO_DEFAULT_POOLS
|
||||||
|
value: "true"
|
||||||
|
# Auto-detect the BGP IP address.
|
||||||
|
- name: IP
|
||||||
|
value: ""
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /lib/modules
|
||||||
|
name: lib-modules
|
||||||
|
readOnly: true
|
||||||
|
- mountPath: /var/run/calico
|
||||||
|
name: var-run-calico
|
||||||
|
readOnly: false
|
||||||
|
# This container installs the Calico CNI binaries
|
||||||
|
# and CNI network config file on each node.
|
||||||
|
- name: install-cni
|
||||||
|
image: calico/cni:v1.6.1
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
imagePullPolicy: Always
|
||||||
|
command: ["/install-cni.sh"]
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# The CNI network config to install on each node.
|
||||||
|
- name: CNI_NETWORK_CONFIG
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: cni_network_config
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /host/opt/cni/bin
|
||||||
|
name: cni-bin-dir
|
||||||
|
- mountPath: /host/etc/cni/net.d
|
||||||
|
name: cni-net-dir
|
||||||
|
volumes:
|
||||||
|
# Used by calico/node.
|
||||||
|
- name: lib-modules
|
||||||
|
hostPath:
|
||||||
|
path: /lib/modules
|
||||||
|
- name: var-run-calico
|
||||||
|
hostPath:
|
||||||
|
path: /var/run/calico
|
||||||
|
# Used to install CNI.
|
||||||
|
- name: cni-bin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /opt/cni/bin
|
||||||
|
- name: cni-net-dir
|
||||||
|
hostPath:
|
||||||
|
path: /etc/cni/net.d
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# This manifest deploys the Calico policy controller on Kubernetes.
|
||||||
|
# See https://github.com/projectcalico/k8s-policy
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-policy
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
# The policy controller can only have a single active instance.
|
||||||
|
replicas: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: calico-policy-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico-policy-controller
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
# The policy controller must run in the host network namespace so that
|
||||||
|
# it isn't governed by policy that would prevent it from working.
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: calico
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
containers:
|
||||||
|
- name: calico-policy-controller
|
||||||
|
image: calico/kube-policy-controller:v0.5.4
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
env:
|
||||||
|
# The location of the Calico etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
# The location of the Kubernetes API. Use the default Kubernetes
|
||||||
|
# service for API access.
|
||||||
|
- name: K8S_API
|
||||||
|
value: "https://kubernetes.default:443"
|
||||||
|
# Since we're running in the host namespace and might not have KubeDNS
|
||||||
|
# access, configure the container's /etc/hosts to resolve
|
||||||
|
# kubernetes.default to the correct service clusterIP.
|
||||||
|
- name: CONFIGURE_ETC_HOSTS
|
||||||
|
value: "true"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## This manifest deploys a Job which performs one time
|
||||||
|
# configuration of Calico
|
||||||
|
apiVersion: batch/v1
|
||||||
|
kind: Job
|
||||||
|
metadata:
|
||||||
|
name: configure-calico
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: calico
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
name: configure-calico
|
||||||
|
annotations:
|
||||||
|
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||||
|
spec:
|
||||||
|
hostNetwork: true
|
||||||
|
serviceAccountName: calico
|
||||||
|
tolerations:
|
||||||
|
- key: node-role.kubernetes.io/master
|
||||||
|
effect: NoSchedule
|
||||||
|
- key: CriticalAddonsOnly
|
||||||
|
operator: Exists
|
||||||
|
restartPolicy: OnFailure
|
||||||
|
containers:
|
||||||
|
# Writes basic configuration to datastore.
|
||||||
|
- name: configure-calico
|
||||||
|
image: calico/ctl:v1.1.1
|
||||||
|
args:
|
||||||
|
- apply
|
||||||
|
- -f
|
||||||
|
- /etc/config/calico/ippool.yaml
|
||||||
|
volumeMounts:
|
||||||
|
- name: config-volume
|
||||||
|
mountPath: /etc/config
|
||||||
|
env:
|
||||||
|
# The location of the etcd cluster.
|
||||||
|
- name: ETCD_ENDPOINTS
|
||||||
|
valueFrom:
|
||||||
|
configMapKeyRef:
|
||||||
|
name: calico-config
|
||||||
|
key: etcd_endpoints
|
||||||
|
volumes:
|
||||||
|
- name: config-volume
|
||||||
|
configMap:
|
||||||
|
name: calico-config
|
||||||
|
items:
|
||||||
|
- key: ippool.yaml
|
||||||
|
path: calico/ippool.yaml
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- namespaces
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
- apiGroups:
|
||||||
|
- extensions
|
||||||
|
resources:
|
||||||
|
- networkpolicies
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: calico
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
---
|
||||||
|
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: calico
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: calico
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: calico
|
||||||
|
namespace: kube-system
|
||||||
|
|
@ -77,7 +77,7 @@ spec:
|
||||||
# container programs network policy and routes on each
|
# container programs network policy and routes on each
|
||||||
# host.
|
# host.
|
||||||
- name: calico-node
|
- name: calico-node
|
||||||
image: calico/node:v1.0.2
|
image: calico/node:v1.1.1
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
|
|
@ -116,7 +116,7 @@ spec:
|
||||||
# This container installs the Calico CNI binaries
|
# This container installs the Calico CNI binaries
|
||||||
# and CNI network config file on each node.
|
# and CNI network config file on each node.
|
||||||
- name: install-cni
|
- name: install-cni
|
||||||
image: calico/cni:v1.5.6
|
image: calico/cni:v1.6.1
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
|
|
@ -189,7 +189,7 @@ spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
containers:
|
containers:
|
||||||
- name: calico-policy-controller
|
- name: calico-policy-controller
|
||||||
image: calico/kube-policy-controller:v0.5.2
|
image: calico/kube-policy-controller:v0.5.4
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 10m
|
||||||
|
|
@ -237,7 +237,7 @@ spec:
|
||||||
containers:
|
containers:
|
||||||
# Writes basic configuration to datastore.
|
# Writes basic configuration to datastore.
|
||||||
- name: configure-calico
|
- name: configure-calico
|
||||||
image: calico/ctl:v1.0.2
|
image: calico/ctl:v1.1.1
|
||||||
args:
|
args:
|
||||||
- apply
|
- apply
|
||||||
- -f
|
- -f
|
||||||
|
|
@ -2,6 +2,8 @@ kind: ClusterRole
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: weave-net
|
name: weave-net
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
rules:
|
rules:
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|
@ -27,11 +29,15 @@ kind: ServiceAccount
|
||||||
metadata:
|
metadata:
|
||||||
name: weave-net
|
name: weave-net
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
metadata:
|
metadata:
|
||||||
name: weave-net
|
name: weave-net
|
||||||
|
labels:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
|
|
@ -46,11 +52,15 @@ kind: DaemonSet
|
||||||
metadata:
|
metadata:
|
||||||
name: weave-net
|
name: weave-net
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
name: weave-net
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
name: weave-net
|
name: weave-net
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
spec:
|
spec:
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
hostPID: true
|
hostPID: true
|
||||||
|
|
@ -82,12 +92,20 @@ spec:
|
||||||
mountPath: /lib/modules
|
mountPath: /lib/modules
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 100m
|
||||||
|
memory: 200Mi
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 200Mi
|
||||||
- name: weave-npc
|
- name: weave-npc
|
||||||
image: weaveworks/weave-npc:1.9.4
|
image: weaveworks/weave-npc:1.9.4
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 10m
|
cpu: 100m
|
||||||
|
memory: 200Mi
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 200Mi
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
restartPolicy: Always
|
restartPolicy: Always
|
||||||
|
|
@ -4,6 +4,7 @@ metadata:
|
||||||
name: weave-net
|
name: weave-net
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
labels:
|
labels:
|
||||||
|
name: weave-net
|
||||||
role.kubernetes.io/networking: "1"
|
role.kubernetes.io/networking: "1"
|
||||||
spec:
|
spec:
|
||||||
template:
|
template:
|
||||||
|
|
@ -26,7 +27,7 @@ spec:
|
||||||
hostPID: true
|
hostPID: true
|
||||||
containers:
|
containers:
|
||||||
- name: weave
|
- name: weave
|
||||||
image: weaveworks/weave-kube:1.9.2
|
image: weaveworks/weave-kube:1.9.4
|
||||||
command:
|
command:
|
||||||
- /home/weave/launch.sh
|
- /home/weave/launch.sh
|
||||||
livenessProbe:
|
livenessProbe:
|
||||||
|
|
@ -58,7 +59,7 @@ spec:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
memory: 200Mi
|
memory: 200Mi
|
||||||
- name: weave-npc
|
- name: weave-npc
|
||||||
image: weaveworks/weave-npc:1.9.2
|
image: weaveworks/weave-npc:1.9.4
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
cpu: 100m
|
cpu: 100m
|
||||||
|
|
@ -1,84 +0,0 @@
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: weave-net
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
role.kubernetes.io/networking: "1"
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
name: weave-net
|
|
||||||
role.kubernetes.io/networking: "1"
|
|
||||||
annotations:
|
|
||||||
scheduler.alpha.kubernetes.io/tolerations: |
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"key": "dedicated",
|
|
||||||
"operator": "Equal",
|
|
||||||
"value": "master",
|
|
||||||
"effect": "NoSchedule"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
spec:
|
|
||||||
hostNetwork: true
|
|
||||||
hostPID: true
|
|
||||||
containers:
|
|
||||||
- name: weave
|
|
||||||
image: weaveworks/weave-kube:1.9.0
|
|
||||||
command:
|
|
||||||
- /home/weave/launch.sh
|
|
||||||
livenessProbe:
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
httpGet:
|
|
||||||
host: 127.0.0.1
|
|
||||||
path: /status
|
|
||||||
port: 6784
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- name: weavedb
|
|
||||||
mountPath: /weavedb
|
|
||||||
- name: cni-bin
|
|
||||||
mountPath: /host/opt
|
|
||||||
- name: cni-bin2
|
|
||||||
mountPath: /host/home
|
|
||||||
- name: cni-conf
|
|
||||||
mountPath: /host/etc
|
|
||||||
- name: dbus
|
|
||||||
mountPath: /host/var/lib/dbus
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
- name: weave-npc
|
|
||||||
image: weaveworks/weave-npc:1.9.0
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: weavedb
|
|
||||||
emptyDir: {}
|
|
||||||
- name: cni-bin
|
|
||||||
hostPath:
|
|
||||||
path: /opt
|
|
||||||
- name: cni-bin2
|
|
||||||
hostPath:
|
|
||||||
path: /home
|
|
||||||
- name: cni-conf
|
|
||||||
hostPath:
|
|
||||||
path: /etc
|
|
||||||
- name: dbus
|
|
||||||
hostPath:
|
|
||||||
path: /var/lib/dbus
|
|
||||||
|
|
@ -1,89 +0,0 @@
|
||||||
apiVersion: extensions/v1beta1
|
|
||||||
kind: DaemonSet
|
|
||||||
metadata:
|
|
||||||
name: weave-net
|
|
||||||
namespace: kube-system
|
|
||||||
labels:
|
|
||||||
role.kubernetes.io/networking: "1"
|
|
||||||
spec:
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
name: weave-net
|
|
||||||
role.kubernetes.io/networking: "1"
|
|
||||||
annotations:
|
|
||||||
scheduler.alpha.kubernetes.io/tolerations: |
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"key": "dedicated",
|
|
||||||
"operator": "Equal",
|
|
||||||
"value": "master",
|
|
||||||
"effect": "NoSchedule"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
spec:
|
|
||||||
hostNetwork: true
|
|
||||||
hostPID: true
|
|
||||||
containers:
|
|
||||||
- name: weave
|
|
||||||
image: weaveworks/weave-kube:1.9.3
|
|
||||||
command:
|
|
||||||
- /home/weave/launch.sh
|
|
||||||
livenessProbe:
|
|
||||||
initialDelaySeconds: 30
|
|
||||||
httpGet:
|
|
||||||
host: 127.0.0.1
|
|
||||||
path: /status
|
|
||||||
port: 6784
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
volumeMounts:
|
|
||||||
- name: weavedb
|
|
||||||
mountPath: /weavedb
|
|
||||||
- name: cni-bin
|
|
||||||
mountPath: /host/opt
|
|
||||||
- name: cni-bin2
|
|
||||||
mountPath: /host/home
|
|
||||||
- name: cni-conf
|
|
||||||
mountPath: /host/etc
|
|
||||||
- name: dbus
|
|
||||||
mountPath: /host/var/lib/dbus
|
|
||||||
- name: lib-modules
|
|
||||||
mountPath: /lib/modules
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
- name: weave-npc
|
|
||||||
image: weaveworks/weave-npc:1.9.3
|
|
||||||
resources:
|
|
||||||
requests:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
limits:
|
|
||||||
cpu: 100m
|
|
||||||
memory: 200Mi
|
|
||||||
securityContext:
|
|
||||||
privileged: true
|
|
||||||
restartPolicy: Always
|
|
||||||
volumes:
|
|
||||||
- name: weavedb
|
|
||||||
emptyDir: {}
|
|
||||||
- name: cni-bin
|
|
||||||
hostPath:
|
|
||||||
path: /opt
|
|
||||||
- name: cni-bin2
|
|
||||||
hostPath:
|
|
||||||
path: /home
|
|
||||||
- name: cni-conf
|
|
||||||
hostPath:
|
|
||||||
path: /etc
|
|
||||||
- name: dbus
|
|
||||||
hostPath:
|
|
||||||
path: /var/lib/dbus
|
|
||||||
- name: lib-modules
|
|
||||||
hostPath:
|
|
||||||
path: /lib/modules
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
"ifNotExists": true
|
|
||||||
}
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
"manageState": false
|
|
||||||
}
|
|
||||||
|
|
@ -132,7 +132,7 @@ func (e *DNSZone) findExisting(cloud awsup.AWSCloud) (*route53.GetHostedZoneOutp
|
||||||
|
|
||||||
var zones []*route53.HostedZone
|
var zones []*route53.HostedZone
|
||||||
for _, zone := range response.HostedZones {
|
for _, zone := range response.HostedZones {
|
||||||
if aws.StringValue(zone.Name) == findName {
|
if aws.StringValue(zone.Name) == findName && fi.BoolValue(zone.Config.PrivateZone) == fi.BoolValue(e.Private) {
|
||||||
zones = append(zones, zone)
|
zones = append(zones, zone)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -219,10 +219,9 @@ func (_ *DNSZone) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *DNSZone) error
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type terraformRoute53Zone struct {
|
type terraformRoute53ZoneAssociation struct {
|
||||||
Name *string `json:"name"`
|
ZoneID *terraform.Literal `json:"zone_id"`
|
||||||
VPCID *terraform.Literal `json:"vpc_id,omitempty"`
|
VPCID *terraform.Literal `json:"vpc_id"`
|
||||||
Tags map[string]string `json:"tags,omitempty"`
|
|
||||||
Lifecycle *terraform.Lifecycle `json:"lifecycle,omitempty"`
|
Lifecycle *terraform.Lifecycle `json:"lifecycle,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -244,34 +243,45 @@ func (_ *DNSZone) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *D
|
||||||
glog.Infof("Existing zone %q found; will configure TF to reuse", aws.StringValue(z.HostedZone.Name))
|
glog.Infof("Existing zone %q found; will configure TF to reuse", aws.StringValue(z.HostedZone.Name))
|
||||||
|
|
||||||
e.ZoneID = z.HostedZone.Id
|
e.ZoneID = z.HostedZone.Id
|
||||||
}
|
|
||||||
|
|
||||||
if z == nil {
|
// If the user specifies dns=private we'll have a non-nil PrivateVPC that specifies the VPC
|
||||||
// Because we expect most users to create their zones externally,
|
// that should used with the private Route53 zone. If the zone doesn't already know about the
|
||||||
// we now block hostedzone creation in terraform.
|
// VPC, we add that association.
|
||||||
// This lets us perform deeper DNS validation, but also solves the problem
|
|
||||||
// that otherwise we don't know if TF created the hosted zone
|
|
||||||
// (in which case we should output it) or whether it already existed (in which case we should not)
|
|
||||||
// The root problem here is that TF doesn't have a strong notion of an unmanaged resource
|
|
||||||
return fmt.Errorf("Creation of Route53 hosted zones is not supported for terraform")
|
|
||||||
//tf := &terraformRoute53Zone{
|
|
||||||
// Name: e.Name,
|
|
||||||
// //Tags: cloud.BuildTags(e.Name, nil),
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//tf.Lifecycle = &terraform.Lifecycle{
|
|
||||||
// PreventDestroy: fi.Bool(true),
|
|
||||||
//}
|
|
||||||
//
|
|
||||||
//return t.RenderResource("aws_route53_zone", *e.Name, tf)
|
|
||||||
} else {
|
|
||||||
// Same problem here also...
|
|
||||||
if e.PrivateVPC != nil {
|
if e.PrivateVPC != nil {
|
||||||
return fmt.Errorf("Route53 private hosted zones are not supported for terraform")
|
assocNeeded := true
|
||||||
|
var vpcName string
|
||||||
|
if e.PrivateVPC.ID != nil {
|
||||||
|
vpcName = *e.PrivateVPC.ID
|
||||||
|
for _, vpc := range z.VPCs {
|
||||||
|
if *vpc.VPCId == vpcName {
|
||||||
|
glog.Infof("VPC %q already associated with zone %q", vpcName, aws.StringValue(z.HostedZone.Name))
|
||||||
|
assocNeeded = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
vpcName = *e.PrivateVPC.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
if assocNeeded {
|
||||||
|
glog.Infof("No association between VPC %q and zone %q; adding", vpcName, aws.StringValue(z.HostedZone.Name))
|
||||||
|
tf := &terraformRoute53ZoneAssociation{
|
||||||
|
ZoneID: terraform.LiteralFromStringValue(*e.ZoneID),
|
||||||
|
VPCID: e.PrivateVPC.TerraformLink(),
|
||||||
|
}
|
||||||
|
return t.RenderResource("aws_route53_zone_association", *e.Name, tf)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Because we expect most users to create their zones externally,
|
||||||
|
// we now block hostedzone creation in terraform.
|
||||||
|
// This lets us perform deeper DNS validation, but also solves the problem
|
||||||
|
// that otherwise we don't know if TF created the hosted zone
|
||||||
|
// (in which case we should output it) or whether it already existed (in which case we should not)
|
||||||
|
// The root problem here is that TF doesn't have a strong notion of an unmanaged resource
|
||||||
|
return fmt.Errorf("Creation of Route53 hosted zones is not supported for terraform")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *DNSZone) TerraformLink() *terraform.Literal {
|
func (e *DNSZone) TerraformLink() *terraform.Literal {
|
||||||
|
|
|
||||||
|
|
@ -83,6 +83,9 @@ func (e *LaunchConfiguration) Find(c *fi.Context) (*LaunchConfiguration, error)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error listing AutoscalingLaunchConfigurations: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
if len(configurations) == 0 {
|
if len(configurations) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
|
|
@ -111,6 +114,7 @@ func (e *LaunchConfiguration) Find(c *fi.Context) (*LaunchConfiguration, error)
|
||||||
AssociatePublicIP: lc.AssociatePublicIpAddress,
|
AssociatePublicIP: lc.AssociatePublicIpAddress,
|
||||||
IAMInstanceProfile: &IAMInstanceProfile{Name: lc.IamInstanceProfile},
|
IAMInstanceProfile: &IAMInstanceProfile{Name: lc.IamInstanceProfile},
|
||||||
SpotPrice: aws.StringValue(lc.SpotPrice),
|
SpotPrice: aws.StringValue(lc.SpotPrice),
|
||||||
|
Tenancy: lc.PlacementTenancy,
|
||||||
}
|
}
|
||||||
|
|
||||||
securityGroups := []*SecurityGroup{}
|
securityGroups := []*SecurityGroup{}
|
||||||
|
|
@ -339,6 +343,7 @@ type terraformLaunchConfiguration struct {
|
||||||
EphemeralBlockDevice []*terraformBlockDevice `json:"ephemeral_block_device,omitempty"`
|
EphemeralBlockDevice []*terraformBlockDevice `json:"ephemeral_block_device,omitempty"`
|
||||||
Lifecycle *terraform.Lifecycle `json:"lifecycle,omitempty"`
|
Lifecycle *terraform.Lifecycle `json:"lifecycle,omitempty"`
|
||||||
SpotPrice *string `json:"spot_price,omitempty"`
|
SpotPrice *string `json:"spot_price,omitempty"`
|
||||||
|
PlacementTenancy *string `json:"placement_tenancy,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type terraformBlockDevice struct {
|
type terraformBlockDevice struct {
|
||||||
|
|
@ -377,6 +382,10 @@ func (_ *LaunchConfiguration) RenderTerraform(t *terraform.TerraformTarget, a, e
|
||||||
tf.KeyName = e.SSHKey.TerraformLink()
|
tf.KeyName = e.SSHKey.TerraformLink()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if e.Tenancy != nil {
|
||||||
|
tf.PlacementTenancy = e.Tenancy
|
||||||
|
}
|
||||||
|
|
||||||
for _, sg := range e.SecurityGroups {
|
for _, sg := range e.SecurityGroups {
|
||||||
tf.SecurityGroups = append(tf.SecurityGroups, sg.TerraformLink())
|
tf.SecurityGroups = append(tf.SecurityGroups, sg.TerraformLink())
|
||||||
}
|
}
|
||||||
|
|
@ -450,6 +459,7 @@ type cloudformationLaunchConfiguration struct {
|
||||||
SecurityGroups []*cloudformation.Literal `json:"SecurityGroups,omitempty"`
|
SecurityGroups []*cloudformation.Literal `json:"SecurityGroups,omitempty"`
|
||||||
SpotPrice *string `json:"SpotPrice,omitempty"`
|
SpotPrice *string `json:"SpotPrice,omitempty"`
|
||||||
UserData *string `json:"UserData,omitempty"`
|
UserData *string `json:"UserData,omitempty"`
|
||||||
|
PlacementTenancy *string `json:"PlacementTenancy,omitempty"`
|
||||||
|
|
||||||
//NamePrefix *string `json:"name_prefix,omitempty"`
|
//NamePrefix *string `json:"name_prefix,omitempty"`
|
||||||
//Lifecycle *cloudformation.Lifecycle `json:"lifecycle,omitempty"`
|
//Lifecycle *cloudformation.Lifecycle `json:"lifecycle,omitempty"`
|
||||||
|
|
@ -498,6 +508,10 @@ func (_ *LaunchConfiguration) RenderCloudformation(t *cloudformation.Cloudformat
|
||||||
cf.KeyName = e.SSHKey.Name
|
cf.KeyName = e.SSHKey.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if e.Tenancy != nil {
|
||||||
|
cf.PlacementTenancy = e.Tenancy
|
||||||
|
}
|
||||||
|
|
||||||
for _, sg := range e.SecurityGroups {
|
for _, sg := range e.SecurityGroups {
|
||||||
cf.SecurityGroups = append(cf.SecurityGroups, sg.CloudformationLink())
|
cf.SecurityGroups = append(cf.SecurityGroups, sg.CloudformationLink())
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ import (
|
||||||
|
|
||||||
channelsapi "k8s.io/kops/channels/pkg/api"
|
channelsapi "k8s.io/kops/channels/pkg/api"
|
||||||
"k8s.io/kops/pkg/apis/kops"
|
"k8s.io/kops/pkg/apis/kops"
|
||||||
"k8s.io/kops/pkg/apis/kops/util"
|
|
||||||
"k8s.io/kops/upup/pkg/fi"
|
"k8s.io/kops/upup/pkg/fi"
|
||||||
"k8s.io/kops/upup/pkg/fi/fitasks"
|
"k8s.io/kops/upup/pkg/fi/fitasks"
|
||||||
"k8s.io/kops/upup/pkg/fi/utils"
|
"k8s.io/kops/upup/pkg/fi/utils"
|
||||||
|
|
@ -73,11 +72,6 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
||||||
addons.Kind = "Addons"
|
addons.Kind = "Addons"
|
||||||
addons.ObjectMeta.Name = "bootstrap"
|
addons.ObjectMeta.Name = "bootstrap"
|
||||||
|
|
||||||
kv, err := util.ParseKubernetesVersion(b.cluster.Spec.KubernetesVersion)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("unable to determine kubernetes version from %q", b.cluster.Spec.KubernetesVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
key := "core.addons.k8s.io"
|
key := "core.addons.k8s.io"
|
||||||
version := "1.4.0"
|
version := "1.4.0"
|
||||||
|
|
@ -95,25 +89,37 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
||||||
|
|
||||||
{
|
{
|
||||||
key := "kube-dns.addons.k8s.io"
|
key := "kube-dns.addons.k8s.io"
|
||||||
|
version := "1.6.1-alpha.2"
|
||||||
|
|
||||||
var version string
|
{
|
||||||
var location string
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
switch {
|
id := "pre-k8s-1.6"
|
||||||
case kv.Major == 1 && kv.Minor <= 5:
|
|
||||||
version = "1.5.1"
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
location = key + "/k8s-1.5.yaml"
|
Name: fi.String(key),
|
||||||
default:
|
Version: fi.String(version),
|
||||||
version = "1.6.0"
|
Selector: map[string]string{"k8s-addon": key},
|
||||||
location = key + "/k8s-1.6.yaml"
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: "<1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
}
|
}
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
{
|
||||||
Name: fi.String(key),
|
location := key + "/k8s-1.6.yaml"
|
||||||
Version: fi.String(version),
|
id := "k8s-1.6"
|
||||||
Selector: map[string]string{"k8s-addon": key},
|
|
||||||
Manifest: fi.String(location),
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
})
|
Name: fi.String(key),
|
||||||
manifests[key] = "addons/" + location
|
Version: fi.String(version),
|
||||||
|
Selector: map[string]string{"k8s-addon": key},
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
@ -133,27 +139,37 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
||||||
|
|
||||||
{
|
{
|
||||||
key := "dns-controller.addons.k8s.io"
|
key := "dns-controller.addons.k8s.io"
|
||||||
|
version := "1.6.1-alpha.2"
|
||||||
|
|
||||||
var version string
|
{
|
||||||
var location string
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
switch {
|
id := "pre-k8s-1.6"
|
||||||
case kv.Major == 1 && kv.Minor <= 5:
|
|
||||||
// This is awkward... we would like to do version 1.6.0,
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
// but if we do then we won't get the new manifest when we upgrade to 1.6.0
|
Name: fi.String(key),
|
||||||
version = "1.5.3"
|
Version: fi.String(version),
|
||||||
location = key + "/k8s-1.5.yaml"
|
Selector: map[string]string{"k8s-addon": key},
|
||||||
default:
|
Manifest: fi.String(location),
|
||||||
version = "1.6.0"
|
KubernetesVersion: "<1.6.0",
|
||||||
location = key + "/k8s-1.6.yaml"
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
}
|
}
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
{
|
||||||
Name: fi.String(key),
|
location := key + "/k8s-1.6.yaml"
|
||||||
Version: fi.String(version),
|
id := "k8s-1.6"
|
||||||
Selector: map[string]string{"k8s-addon": key},
|
|
||||||
Manifest: fi.String(location),
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
})
|
Name: fi.String(key),
|
||||||
manifests[key] = "addons/" + location
|
Version: fi.String(version),
|
||||||
|
Selector: map[string]string{"k8s-addon": key},
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
@ -184,95 +200,183 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
||||||
// TODO: Create "empty" configurations for others, so we can delete e.g. the kopeio configuration
|
// TODO: Create "empty" configurations for others, so we can delete e.g. the kopeio configuration
|
||||||
// if we switch to kubenet?
|
// if we switch to kubenet?
|
||||||
|
|
||||||
|
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
|
||||||
|
|
||||||
|
networkingSelector := map[string]string{"role.kubernetes.io/networking": "1"}
|
||||||
|
|
||||||
if b.cluster.Spec.Networking.Kopeio != nil {
|
if b.cluster.Spec.Networking.Kopeio != nil {
|
||||||
key := "networking.kope.io"
|
key := "networking.kope.io"
|
||||||
version := "1.0.20161116"
|
version := "1.0.20170406"
|
||||||
|
|
||||||
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
|
{
|
||||||
location := key + "/v" + version + ".yaml"
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
|
id := "pre-k8s-1.6"
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
Name: fi.String(key),
|
Name: fi.String(key),
|
||||||
Version: fi.String(version),
|
Version: fi.String(version),
|
||||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
Selector: networkingSelector,
|
||||||
Manifest: fi.String(location),
|
Manifest: fi.String(location),
|
||||||
})
|
KubernetesVersion: "<1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
|
|
||||||
manifests[key] = "addons/" + location
|
{
|
||||||
|
location := key + "/k8s-1.6.yaml"
|
||||||
|
id := "k8s-1.6"
|
||||||
|
|
||||||
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
|
Name: fi.String(key),
|
||||||
|
Version: fi.String(version),
|
||||||
|
Selector: networkingSelector,
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.cluster.Spec.Networking.Weave != nil {
|
if b.cluster.Spec.Networking.Weave != nil {
|
||||||
key := "networking.weave"
|
key := "networking.weave"
|
||||||
var version string
|
version := "1.9.4"
|
||||||
switch {
|
|
||||||
case kv.Major == 1 && kv.Minor <= 5:
|
{
|
||||||
version = "1.9.3"
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
default:
|
id := "pre-k8s-1.6"
|
||||||
version = "1.9.4"
|
|
||||||
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
|
Name: fi.String(key),
|
||||||
|
Version: fi.String(version),
|
||||||
|
Selector: networkingSelector,
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: "<1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
|
{
|
||||||
location := key + "/v" + version + ".yaml"
|
location := key + "/k8s-1.6.yaml"
|
||||||
|
id := "k8s-1.6"
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
Name: fi.String(key),
|
Name: fi.String(key),
|
||||||
Version: fi.String(version),
|
Version: fi.String(version),
|
||||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
Selector: networkingSelector,
|
||||||
Manifest: fi.String(location),
|
Manifest: fi.String(location),
|
||||||
})
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
manifests[key] = "addons/" + location
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.cluster.Spec.Networking.Flannel != nil {
|
if b.cluster.Spec.Networking.Flannel != nil {
|
||||||
key := "networking.flannel"
|
key := "networking.flannel"
|
||||||
version := "0.7.0"
|
version := "0.7.0"
|
||||||
|
|
||||||
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
|
{
|
||||||
location := key + "/v" + version + ".yaml"
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
|
id := "pre-k8s-1.6"
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
Name: fi.String(key),
|
Name: fi.String(key),
|
||||||
Version: fi.String(version),
|
Version: fi.String(version),
|
||||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
Selector: networkingSelector,
|
||||||
Manifest: fi.String(location),
|
Manifest: fi.String(location),
|
||||||
})
|
KubernetesVersion: "<1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
|
|
||||||
manifests[key] = "addons/" + location
|
{
|
||||||
|
location := key + "/k8s-1.6.yaml"
|
||||||
|
id := "k8s-1.6"
|
||||||
|
|
||||||
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
|
Name: fi.String(key),
|
||||||
|
Version: fi.String(version),
|
||||||
|
Selector: networkingSelector,
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.cluster.Spec.Networking.Calico != nil {
|
if b.cluster.Spec.Networking.Calico != nil {
|
||||||
key := "networking.projectcalico.org"
|
key := "networking.projectcalico.org"
|
||||||
version := "2.0.2"
|
version := "2.1.1"
|
||||||
|
|
||||||
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
|
{
|
||||||
location := key + "/v" + version + ".yaml"
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
|
id := "pre-k8s-1.6"
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
Name: fi.String(key),
|
Name: fi.String(key),
|
||||||
Version: fi.String(version),
|
Version: fi.String(version),
|
||||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
Selector: networkingSelector,
|
||||||
Manifest: fi.String(location),
|
Manifest: fi.String(location),
|
||||||
})
|
KubernetesVersion: "<1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
|
|
||||||
manifests[key] = "addons/" + location
|
{
|
||||||
|
location := key + "/k8s-1.6.yaml"
|
||||||
|
id := "k8s-1.6"
|
||||||
|
|
||||||
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
|
Name: fi.String(key),
|
||||||
|
Version: fi.String(version),
|
||||||
|
Selector: networkingSelector,
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.cluster.Spec.Networking.Canal != nil {
|
if b.cluster.Spec.Networking.Canal != nil {
|
||||||
key := "networking.projectcalico.org.canal"
|
key := "networking.projectcalico.org.canal"
|
||||||
version := "1.0"
|
version := "1.0"
|
||||||
|
|
||||||
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
|
{
|
||||||
location := key + "/v" + version + ".yaml"
|
location := key + "/pre-k8s-1.6.yaml"
|
||||||
|
id := "pre-k8s-1.6"
|
||||||
|
|
||||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
Name: fi.String(key),
|
Name: fi.String(key),
|
||||||
Version: fi.String(version),
|
Version: fi.String(version),
|
||||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
Selector: networkingSelector,
|
||||||
Manifest: fi.String(location),
|
Manifest: fi.String(location),
|
||||||
})
|
KubernetesVersion: "<1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
|
|
||||||
manifests[key] = "addons/" + location
|
{
|
||||||
|
location := key + "/k8s-1.6.yaml"
|
||||||
|
id := "k8s-1.6"
|
||||||
|
|
||||||
|
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||||
|
Name: fi.String(key),
|
||||||
|
Version: fi.String(version),
|
||||||
|
Selector: networkingSelector,
|
||||||
|
Manifest: fi.String(location),
|
||||||
|
KubernetesVersion: ">=1.6.0",
|
||||||
|
Id: id,
|
||||||
|
})
|
||||||
|
manifests[key+"-"+id] = "addons/" + location
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return addons, manifests, nil
|
return addons, manifests, nil
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ func usesCNI(c *api.Cluster) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if networkConfig.Flannel != nil {
|
if networkConfig.Flannel != nil {
|
||||||
// Weave uses CNI
|
// Flannel uses CNI
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -43,9 +43,10 @@ const (
|
||||||
|
|
||||||
var masterMachineTypeExceptions = map[string]string{
|
var masterMachineTypeExceptions = map[string]string{
|
||||||
// Some regions do not (currently) support the m3 family; the c4 large is the cheapest non-burstable instance
|
// Some regions do not (currently) support the m3 family; the c4 large is the cheapest non-burstable instance
|
||||||
"us-east-2": "c4.large",
|
"us-east-2": "c4.large",
|
||||||
"ca-central-1": "c4.large",
|
"ca-central-1": "c4.large",
|
||||||
"eu-west-2": "c4.large",
|
"eu-west-2": "c4.large",
|
||||||
|
"ap-northeast-2": "c4.large",
|
||||||
}
|
}
|
||||||
|
|
||||||
var awsDedicatedInstanceExceptions = map[string]bool{
|
var awsDedicatedInstanceExceptions = map[string]bool{
|
||||||
|
|
|
||||||
|
|
@ -9,28 +9,55 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: core.addons.k8s.io
|
k8s-addon: core.addons.k8s.io
|
||||||
version: 1.4.0
|
version: 1.4.0
|
||||||
- manifest: kube-dns.addons.k8s.io/k8s-1.5.yaml
|
- id: pre-k8s-1.6
|
||||||
|
kubernetesVersion: <1.6.0
|
||||||
|
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
|
||||||
name: kube-dns.addons.k8s.io
|
name: kube-dns.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: kube-dns.addons.k8s.io
|
k8s-addon: kube-dns.addons.k8s.io
|
||||||
version: 1.5.1
|
version: 1.6.1-alpha.2
|
||||||
|
- id: k8s-1.6
|
||||||
|
kubernetesVersion: '>=1.6.0'
|
||||||
|
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
|
||||||
|
name: kube-dns.addons.k8s.io
|
||||||
|
selector:
|
||||||
|
k8s-addon: kube-dns.addons.k8s.io
|
||||||
|
version: 1.6.1-alpha.2
|
||||||
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
|
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
|
||||||
name: limit-range.addons.k8s.io
|
name: limit-range.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: limit-range.addons.k8s.io
|
k8s-addon: limit-range.addons.k8s.io
|
||||||
version: 1.5.0
|
version: 1.5.0
|
||||||
- manifest: dns-controller.addons.k8s.io/k8s-1.5.yaml
|
- id: pre-k8s-1.6
|
||||||
|
kubernetesVersion: <1.6.0
|
||||||
|
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
|
||||||
name: dns-controller.addons.k8s.io
|
name: dns-controller.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: dns-controller.addons.k8s.io
|
k8s-addon: dns-controller.addons.k8s.io
|
||||||
version: 1.5.3
|
version: 1.6.1-alpha.2
|
||||||
|
- id: k8s-1.6
|
||||||
|
kubernetesVersion: '>=1.6.0'
|
||||||
|
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
|
||||||
|
name: dns-controller.addons.k8s.io
|
||||||
|
selector:
|
||||||
|
k8s-addon: dns-controller.addons.k8s.io
|
||||||
|
version: 1.6.1-alpha.2
|
||||||
- manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
- manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||||
name: storage-aws.addons.k8s.io
|
name: storage-aws.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: storage-aws.addons.k8s.io
|
k8s-addon: storage-aws.addons.k8s.io
|
||||||
version: 1.6.0
|
version: 1.6.0
|
||||||
- manifest: networking.kope.io/v1.0.20161116.yaml
|
- id: pre-k8s-1.6
|
||||||
|
kubernetesVersion: <1.6.0
|
||||||
|
manifest: networking.kope.io/pre-k8s-1.6.yaml
|
||||||
name: networking.kope.io
|
name: networking.kope.io
|
||||||
selector:
|
selector:
|
||||||
role.kubernetes.io/networking: "1"
|
role.kubernetes.io/networking: "1"
|
||||||
version: 1.0.20161116
|
version: 1.0.20170406
|
||||||
|
- id: k8s-1.6
|
||||||
|
kubernetesVersion: '>=1.6.0'
|
||||||
|
manifest: networking.kope.io/k8s-1.6.yaml
|
||||||
|
name: networking.kope.io
|
||||||
|
selector:
|
||||||
|
role.kubernetes.io/networking: "1"
|
||||||
|
version: 1.0.20170406
|
||||||
|
|
|
||||||
|
|
@ -9,21 +9,39 @@ spec:
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: core.addons.k8s.io
|
k8s-addon: core.addons.k8s.io
|
||||||
version: 1.4.0
|
version: 1.4.0
|
||||||
- manifest: kube-dns.addons.k8s.io/k8s-1.5.yaml
|
- id: pre-k8s-1.6
|
||||||
|
kubernetesVersion: <1.6.0
|
||||||
|
manifest: kube-dns.addons.k8s.io/pre-k8s-1.6.yaml
|
||||||
name: kube-dns.addons.k8s.io
|
name: kube-dns.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: kube-dns.addons.k8s.io
|
k8s-addon: kube-dns.addons.k8s.io
|
||||||
version: 1.5.1
|
version: 1.6.1-alpha.2
|
||||||
|
- id: k8s-1.6
|
||||||
|
kubernetesVersion: '>=1.6.0'
|
||||||
|
manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml
|
||||||
|
name: kube-dns.addons.k8s.io
|
||||||
|
selector:
|
||||||
|
k8s-addon: kube-dns.addons.k8s.io
|
||||||
|
version: 1.6.1-alpha.2
|
||||||
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
|
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
|
||||||
name: limit-range.addons.k8s.io
|
name: limit-range.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: limit-range.addons.k8s.io
|
k8s-addon: limit-range.addons.k8s.io
|
||||||
version: 1.5.0
|
version: 1.5.0
|
||||||
- manifest: dns-controller.addons.k8s.io/k8s-1.5.yaml
|
- id: pre-k8s-1.6
|
||||||
|
kubernetesVersion: <1.6.0
|
||||||
|
manifest: dns-controller.addons.k8s.io/pre-k8s-1.6.yaml
|
||||||
name: dns-controller.addons.k8s.io
|
name: dns-controller.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
k8s-addon: dns-controller.addons.k8s.io
|
k8s-addon: dns-controller.addons.k8s.io
|
||||||
version: 1.5.3
|
version: 1.6.1-alpha.2
|
||||||
|
- id: k8s-1.6
|
||||||
|
kubernetesVersion: '>=1.6.0'
|
||||||
|
manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml
|
||||||
|
name: dns-controller.addons.k8s.io
|
||||||
|
selector:
|
||||||
|
k8s-addon: dns-controller.addons.k8s.io
|
||||||
|
version: 1.6.1-alpha.2
|
||||||
- manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
- manifest: storage-aws.addons.k8s.io/v1.6.0.yaml
|
||||||
name: storage-aws.addons.k8s.io
|
name: storage-aws.addons.k8s.io
|
||||||
selector:
|
selector:
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,6 @@ import (
|
||||||
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
|
"k8s.io/kops/upup/pkg/fi/nodeup/cloudinit"
|
||||||
"k8s.io/kops/upup/pkg/fi/nodeup/local"
|
"k8s.io/kops/upup/pkg/fi/nodeup/local"
|
||||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||||
"k8s.io/kops/upup/pkg/fi/nodeup/tags"
|
|
||||||
"k8s.io/kops/upup/pkg/fi/utils"
|
"k8s.io/kops/upup/pkg/fi/utils"
|
||||||
"k8s.io/kops/util/pkg/vfs"
|
"k8s.io/kops/util/pkg/vfs"
|
||||||
)
|
)
|
||||||
|
|
@ -208,7 +207,6 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
||||||
Architecture: model.ArchitectureAmd64,
|
Architecture: model.ArchitectureAmd64,
|
||||||
InstanceGroup: c.instanceGroup,
|
InstanceGroup: c.instanceGroup,
|
||||||
IsMaster: nodeTags.Has(TagMaster),
|
IsMaster: nodeTags.Has(TagMaster),
|
||||||
UsesCNI: nodeTags.Has(tags.TagCNI),
|
|
||||||
Assets: assets,
|
Assets: assets,
|
||||||
KeyStore: tf.keyStore,
|
KeyStore: tf.keyStore,
|
||||||
SecretStore: tf.secretStore,
|
SecretStore: tf.secretStore,
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue