Switch all the final switches for release 1.4

Also apply the 1.4 schema changes.
This commit is contained in:
Justin Santa Barbara 2016-10-01 13:50:19 -04:00
parent 9a507c0397
commit 655a61588e
22 changed files with 113 additions and 53 deletions

View File

@ -14,7 +14,7 @@ GOVERSION=1.6
# See http://stackoverflow.com/questions/18136918/how-to-get-current-relative-directory-of-your-makefile # See http://stackoverflow.com/questions/18136918/how-to-get-current-relative-directory-of-your-makefile
MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))")) MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))"))
TAG=1.3 TAG=1.4
ifndef VERSION ifndef VERSION
VERSION := git-$(shell git describe --always) VERSION := git-$(shell git describe --always)

View File

@ -2,9 +2,15 @@ package main
import ( import (
"fmt" "fmt"
"k8s.io/kops/upup/pkg/fi/cloudup"
"os" "os"
) )
var (
// value overwritten during build. This can be used to resolve issues.
BuildVersion = cloudup.NodeUpVersion
)
func main() { func main() {
Execute() Execute()
} }

View File

@ -6,11 +6,6 @@ import (
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
var (
// value overwritten during build. This can be used to resolve issues.
BuildVersion = "0.1"
)
type VersionCmd struct { type VersionCmd struct {
cobraCommand *cobra.Command cobraCommand *cobra.Command
} }

View File

@ -110,9 +110,9 @@ func TestBuildTags_UpdatePolicy_Nil(t *testing.T) {
}, },
} }
tags, err := buildClusterTags(c) tags, err := buildCloudupTags(c)
if err != nil { if err != nil {
t.Fatalf("buildTags error: %v", err) t.Fatalf("buildCloudupTags error: %v", err)
} }
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags) nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
@ -132,9 +132,9 @@ func TestBuildTags_UpdatePolicy_External(t *testing.T) {
}, },
} }
tags, err := buildClusterTags(c) tags, err := buildCloudupTags(c)
if err != nil { if err != nil {
t.Fatalf("buildTags error: %v", err) t.Fatalf("buildCloudupTags error: %v", err)
} }
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags) nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
@ -184,9 +184,9 @@ and then push nodeup using:
``` ```
export S3_BUCKET_NAME=<yourbucketname> export S3_BUCKET_NAME=<yourbucketname>
make upload S3_BUCKET=s3://${S3_BUCKET_NAME} VERSION=1.3 make upload S3_BUCKET=s3://${S3_BUCKET_NAME} VERSION=1.4.0
export NODEUP_URL=https://${S3_BUCKET_NAME}.s3.amazonaws.com/kops/1.3/linux/amd64/nodeup export NODEUP_URL=https://${S3_BUCKET_NAME}.s3.amazonaws.com/kops/1.4.0/linux/amd64/nodeup
kops create cluster <clustername> --zones us-east-1b kops create cluster <clustername> --zones us-east-1b
... ...

View File

@ -59,12 +59,16 @@ tar zxf kubernetes-server-linux-amd64.tar.gz
rm kubernetes/server/bin/federation* rm kubernetes/server/bin/federation*
rm kubernetes/server/bin/hyperkube rm kubernetes/server/bin/hyperkube
rm kubernetes/server/bin/kubeadm
rm kubernetes/server/bin/kube-apiserver rm kubernetes/server/bin/kube-apiserver
rm kubernetes/server/bin/kube-controller-manager rm kubernetes/server/bin/kube-controller-manager
rm kubernetes/server/bin/kube-discovery
rm kubernetes/server/bin/kube-dns rm kubernetes/server/bin/kube-dns
rm kubernetes/server/bin/kubemark rm kubernetes/server/bin/kubemark
rm kubernetes/server/bin/kube-proxy rm kubernetes/server/bin/kube-proxy
rm kubernetes/server/bin/kube-scheduler rm kubernetes/server/bin/kube-scheduler
rm kubernetes/kubernetes-src.tar.gz
find kubernetes/server/bin -type f -name "*.tar" | xargs -I {} /bin/bash -c "sha1sum {} | cut -f1 -d ' ' > {}.sha1" find kubernetes/server/bin -type f -name "*.tar" | xargs -I {} /bin/bash -c "sha1sum {} | cut -f1 -d ' ' > {}.sha1"
find kubernetes/server/bin -type f -name "kube???" | xargs -I {} /bin/bash -c "sha1sum {} | cut -f1 -d ' ' > {}.sha1" find kubernetes/server/bin -type f -name "kube???" | xargs -I {} /bin/bash -c "sha1sum {} | cut -f1 -d ' ' > {}.sha1"

View File

@ -31,7 +31,7 @@ cat <<'EOF' | ssh admin@${ip} 'sudo bash -s'
#/bin/bash #/bin/bash
set -e set -e
set -x set -x
NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.3/linux/amd64/nodeup NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.4.0/linux/amd64/nodeup
INSTALL_DIR="/var/cache/kubernetes-install" INSTALL_DIR="/var/cache/kubernetes-install"
mkdir -p ${INSTALL_DIR} mkdir -p ${INSTALL_DIR}

View File

@ -85,7 +85,7 @@ chmod +x /tmp/e2e.sh
curl -fsS --retry 3 "https://storage.googleapis.com/kubernetes-release/release/v1.3.5/bin/linux/amd64/kubectl" > /usr/local/bin/kubectl curl -fsS --retry 3 "https://storage.googleapis.com/kubernetes-release/release/v1.3.5/bin/linux/amd64/kubectl" > /usr/local/bin/kubectl
chmod +x /usr/local/bin/kubectl chmod +x /usr/local/bin/kubectl
curl -fsS --retry 3 "https://kubeupv2.s3.amazonaws.com/kops/1.3/linux/amd64/kops" > /tmp/kops curl -fsS --retry 3 "https://kubeupv2.s3.amazonaws.com/kops/1.4.0/linux/amd64/kops" > /tmp/kops
cp /tmp/kops /usr/local/bin/kops cp /tmp/kops /usr/local/bin/kops
chmod +x /usr/local/bin/kops chmod +x /usr/local/bin/kops

View File

@ -0,0 +1,15 @@
managedFile/{{ ClusterName }}-addons-bootstrap:
location: addons/bootstrap-channel.yaml
contents: resources/addons/bootstrap-channel.yaml
managedFile/{{ ClusterName }}-addons-bootstrap-kube-dns:
location: addons/kube-dns/v1.4.0.yaml
contents: resources/addons/kube-dns/v1.4.0.yaml
managedFile/{{ ClusterName }}-addons-bootstrap-core:
location: addons/core/v1.4.0.yaml
contents: resources/addons/core/v1.4.0.yaml
managedFile/{{ ClusterName }}-addons-bootstrap-dns-controller:
location: addons/dns-controller/v1.4.0.yaml
contents: resources/addons/dns-controller/v1.4.0.yaml

View File

@ -147,4 +147,4 @@ spec:
protocol: UDP protocol: UDP
- name: dns-tcp - name: dns-tcp
port: 53 port: 53
protocol: TCP protocol: TCP

View File

@ -0,0 +1,8 @@
KubeAPIServer:
# If we include ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
AdmissionControl:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- ResourceQuota

View File

@ -0,0 +1,9 @@
KubeAPIServer:
# If we include ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
AdmissionControl:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- ResourceQuota

View File

@ -0,0 +1,9 @@
KubeAPIServer:
# If we include ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
AdmissionControl:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- ResourceQuota

View File

@ -3,9 +3,10 @@ KubeAPIServer:
PathSrvKubernetes: /srv/kubernetes PathSrvKubernetes: /srv/kubernetes
PathSrvSshproxy: /srv/sshproxy PathSrvSshproxy: /srv/sshproxy
Address: 127.0.0.1 Address: 127.0.0.1
EtcdServers: http://127.0.0.1:4001 EtcdServers:
EtcdServersOverrides: /events#http://127.0.0.1:4002 - http://127.0.0.1:4001
AdmissionControl: NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,PersistentVolumeLabel EtcdServersOverrides:
- /events#http://127.0.0.1:4002
ServiceClusterIPRange: {{ .ServiceClusterIPRange }} ServiceClusterIPRange: {{ .ServiceClusterIPRange }}
ClientCAFile: /srv/kubernetes/ca.crt ClientCAFile: /srv/kubernetes/ca.crt
BasicAuthFile: /srv/kubernetes/basic_auth.csv BasicAuthFile: /srv/kubernetes/basic_auth.csv

View File

@ -1,18 +0,0 @@
# kope-aws podspec
apiVersion: v1
kind: Pod
metadata:
name: kope-aws
namespace: kube-system
spec:
hostNetwork: true
containers:
- name: kope-aws
image: kope/aws-controller:1.3
command:
- /usr/bin/aws-controller
- -healthz-port=10245
- -zone-name={{ .DNSZone }}
- -v=4
securityContext:
privileged: true

View File

@ -1,6 +1,9 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
annotations:
dns.alpha.kubernetes.io/external: {{ .MasterPublicName }}
dns.alpha.kubernetes.io/internal: {{ .MasterInternalName }}
name: kube-apiserver name: kube-apiserver
namespace: kube-system namespace: kube-system
spec: spec:

View File

@ -64,7 +64,7 @@ type KubeletConfigSpec struct {
// of the actual hostname. // of the actual hostname.
// Note: We recognize some additional values: // Note: We recognize some additional values:
// @aws uses the hostname from the AWS metadata service // @aws uses the hostname from the AWS metadata service
HostnameOverride string `json:"hostnameOverride" flag:"hostname-override"` HostnameOverride string `json:"hostnameOverride,omitempty" flag:"hostname-override"`
//// podInfraContainerImage is the image whose network/ipc namespaces //// podInfraContainerImage is the image whose network/ipc namespaces
//// containers in each pod will use. //// containers in each pod will use.
//PodInfraContainerImage string `json:"podInfraContainerImage"` //PodInfraContainerImage string `json:"podInfraContainerImage"`
@ -362,9 +362,9 @@ type KubeAPIServerConfig struct {
CloudProvider string `json:"cloudProvider,omitempty" flag:"cloud-provider"` CloudProvider string `json:"cloudProvider,omitempty" flag:"cloud-provider"`
SecurePort int `json:"securePort,omitempty" flag:"secure-port"` SecurePort int `json:"securePort,omitempty" flag:"secure-port"`
Address string `json:"address,omitempty" flag:"address"` Address string `json:"address,omitempty" flag:"address"`
EtcdServers string `json:"etcdServers,omitempty" flag:"etcd-servers"` EtcdServers []string `json:"etcdServers,omitempty" flag:"etcd-servers"`
EtcdServersOverrides string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"` EtcdServersOverrides []string `json:"etcdServersOverrides,omitempty" flag:"etcd-servers-overrides"`
AdmissionControl string `json:"admissionControl,omitempty" flag:"admission-control"` AdmissionControl []string `json:"admissionControl,omitempty" flag:"admission-control"`
ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty" flag:"service-cluster-ip-range"` ServiceClusterIPRange string `json:"serviceClusterIPRange,omitempty" flag:"service-cluster-ip-range"`
ClientCAFile string `json:"clientCAFile,omitempty" flag:"client-ca-file"` ClientCAFile string `json:"clientCAFile,omitempty" flag:"client-ca-file"`
BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"` BasicAuthFile string `json:"basicAuthFile,omitempty" flag:"basic-auth-file"`

View File

@ -2,6 +2,9 @@ package api
import "k8s.io/kops/upup/pkg/fi/utils" import "k8s.io/kops/upup/pkg/fi/utils"
const RoleLabelName = "kubernetes.io/role"
const RoleMasterLabelValue = "master"
// NodeLabels are defined in the InstanceGroup, but set flags on the kubelet config. // NodeLabels are defined in the InstanceGroup, but set flags on the kubelet config.
// We have a conflict here: on the one hand we want an easy to use abstract specification // We have a conflict here: on the one hand we want an easy to use abstract specification
// for the cluster, on the other hand we don't want two fields that do the same thing. // for the cluster, on the other hand we don't want two fields that do the same thing.
@ -22,6 +25,13 @@ func BuildKubeletConfigSpec(cluster *Cluster, instanceGroup *InstanceGroup) (*Ku
utils.JsonMergeStruct(c, cluster.Spec.Kubelet) utils.JsonMergeStruct(c, cluster.Spec.Kubelet)
} }
if instanceGroup.Spec.Role == InstanceGroupRoleMaster {
if c.NodeLabels == nil {
c.NodeLabels = make(map[string]string)
}
c.NodeLabels[RoleLabelName] = RoleMasterLabelValue
}
for k, v := range instanceGroup.Spec.NodeLabels { for k, v := range instanceGroup.Spec.NodeLabels {
if c.NodeLabels == nil { if c.NodeLabels == nil {
c.NodeLabels = make(map[string]string) c.NodeLabels = make(map[string]string)

View File

@ -327,6 +327,9 @@ func (r *ClusterRegistry) DeleteAllClusterState(clusterName string) error {
if relativePath == "config" || relativePath == "cluster.spec" { if relativePath == "config" || relativePath == "cluster.spec" {
continue continue
} }
if strings.HasPrefix(relativePath, "addons/") {
continue
}
if strings.HasPrefix(relativePath, "pki/") { if strings.HasPrefix(relativePath, "pki/") {
continue continue
} }

View File

@ -19,6 +19,10 @@ import (
"strings" "strings"
) )
const (
NodeUpVersion = "1.4.0"
)
const MaxAttemptsWithNoProgress = 3 const MaxAttemptsWithNoProgress = 3
type ApplyClusterCmd struct { type ApplyClusterCmd struct {
@ -101,6 +105,10 @@ func (c *ApplyClusterCmd) Run() error {
return fmt.Errorf("error getting config base: %v", err) return fmt.Errorf("error getting config base: %v", err)
} }
channels := []string{
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
}
// Normalize k8s version // Normalize k8s version
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion) versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
if strings.HasPrefix(versionWithoutV, "v") { if strings.HasPrefix(versionWithoutV, "v") {
@ -159,7 +167,7 @@ func (c *ApplyClusterCmd) Run() error {
if c.NodeUpSource == "" { if c.NodeUpSource == "" {
location := os.Getenv("NODEUP_URL") location := os.Getenv("NODEUP_URL")
if location == "" { if location == "" {
location = "https://kubeupv2.s3.amazonaws.com/kops/1.3/linux/amd64/nodeup" location = "https://kubeupv2.s3.amazonaws.com/kops/" + NodeUpVersion + "/linux/amd64/nodeup"
glog.V(2).Infof("Using default nodeup location: %q", location) glog.V(2).Infof("Using default nodeup location: %q", location)
} else { } else {
glog.Warningf("Using nodeup location from NODEUP_URL env var: %q", location) glog.Warningf("Using nodeup location from NODEUP_URL env var: %q", location)
@ -388,6 +396,8 @@ func (c *ApplyClusterCmd) Run() error {
} }
} }
config.Channels = channels
yaml, err := api.ToYaml(config) yaml, err := api.ToYaml(config)
if err != nil { if err != nil {
return "", err return "", err

View File

@ -101,6 +101,10 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap) {
dest["CloudTags"] = tf.CloudTags dest["CloudTags"] = tf.CloudTags
dest["APIServerCount"] = tf.APIServerCount dest["APIServerCount"] = tf.APIServerCount
dest["KubeDNS"] = func() *api.KubeDNSConfig {
return tf.cluster.Spec.KubeDNS
}
} }
func (tf *TemplateFunctions) EtcdClusterMemberTags(etcd *api.EtcdClusterSpec, m *api.EtcdMemberSpec) map[string]string { func (tf *TemplateFunctions) EtcdClusterMemberTags(etcd *api.EtcdClusterSpec, m *api.EtcdMemberSpec) map[string]string {
@ -125,7 +129,7 @@ func (tf *TemplateFunctions) EtcdClusterMemberTags(etcd *api.EtcdClusterSpec, m
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer // SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
func (tf *TemplateFunctions) SharedVPC() bool { func (tf *TemplateFunctions) SharedVPC() bool {
return tf.cluster.Spec.NetworkID != "" return tf.cluster.SharedVPC()
} }
// SharedZone is a simple helper function which makes the templates for a shared Zone clearer // SharedZone is a simple helper function which makes the templates for a shared Zone clearer
@ -220,11 +224,6 @@ func (tf *TemplateFunctions) CloudTags(ig *api.InstanceGroup) (map[string]string
if ig.Spec.Role == api.InstanceGroupRoleMaster { if ig.Spec.Role == api.InstanceGroupRoleMaster {
labels["k8s.io/role/master"] = "1" labels["k8s.io/role/master"] = "1"
labels["k8s.io/dns/internal"] = "api.internal." + tf.cluster.Name
if !tf.HasTag("_master_lb") {
labels["k8s.io/dns/public"] = "api." + tf.cluster.Name
}
} }
if ig.Spec.Role == api.InstanceGroupRoleNode { if ig.Spec.Role == api.InstanceGroupRoleNode {
@ -244,7 +243,7 @@ func (tf *TemplateFunctions) GetInstanceGroup(name string) (*api.InstanceGroup,
return nil, fmt.Errorf("InstanceGroup %q not found", name) return nil, fmt.Errorf("InstanceGroup %q not found", name)
} }
// APIServerCount returns the value for the kubeapiserver --apiserver-count flag // APIServerCount returns the value for the apiserver --apiserver-count flag
func (tf *TemplateFunctions) APIServerCount() int { func (tf *TemplateFunctions) APIServerCount() int {
count := 0 count := 0
for _, ig := range tf.instanceGroups { for _, ig := range tf.instanceGroups {

View File

@ -165,10 +165,6 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
tags[tag] = struct{}{} tags[tag] = struct{}{}
} }
// For the transition to 1.4, we will force the _kube-addons tag
// In 1.4, we will remove this tag and replace it with the protokube channels management
tags["_kube-addons"] = struct{}{}
glog.Infof("Config tags: %v", c.config.Tags) glog.Infof("Config tags: %v", c.config.Tags)
glog.Infof("OS tags: %v", osTags) glog.Infof("OS tags: %v", osTags)
@ -255,10 +251,13 @@ func evaluateSpec(c *api.Cluster) error {
} }
func evaluateHostnameOverride(hostnameOverride string) (string, error) { func evaluateHostnameOverride(hostnameOverride string) (string, error) {
if hostnameOverride == "" {
return "", nil
}
k := strings.TrimSpace(hostnameOverride) k := strings.TrimSpace(hostnameOverride)
k = strings.ToLower(k) k = strings.ToLower(k)
if hostnameOverride != "@aws" { if k != "@aws" {
return hostnameOverride, nil return hostnameOverride, nil
} }

View File

@ -13,6 +13,13 @@ func StringValue(s *string) string {
return *s return *s
} }
func IsNilOrEmpty(s *string) bool {
if s == nil {
return true
}
return *s == ""
}
func String(s string) *string { func String(s string) *string {
return &s return &s
} }