Merge pull request #3120 from KashifSaadat/diff-on-component-config-changes

Automatic merge from submit-queue

Add cluster spec to node user data so component config changes are detected

Related to #3076 

Some cluster changes such as component config modifications are not picked up when performing updates (nodes are not marked as `NEEDUPDATE`). This change introduces the ability to:
1. Include certain cluster specs within the node user data file ~(`enableClusterSpecInUserData: true`)~
2. ~Encode the cluster spec string before placing within the user data file (`enableClusterSpecInUserData: true`)~

~The above flags default to false so shouldn't cause any changes to existing clusters.~

Following feedback I've removed the optional API flags, so component config is included by default within the user data. This WILL cause all nodes to have a required update to their bootstrap scripts.
This commit is contained in:
Kubernetes Submit Queue 2017-08-11 03:43:17 -07:00 committed by GitHub
commit b7efd3ba62
12 changed files with 583 additions and 31 deletions

View File

@ -22,11 +22,7 @@ import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"golang.org/x/crypto/ssh"
"io/ioutil"
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/pkg/testutils"
"os"
"path"
"reflect"
@ -34,6 +30,12 @@ import (
"strings"
"testing"
"time"
"golang.org/x/crypto/ssh"
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/pkg/testutils"
)
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
@ -322,8 +324,10 @@ func runTestCloudformation(t *testing.T, clusterName string, srcDir string, vers
t.Fatalf("unexpected error reading expected cloudformation output: %v", err)
}
if !bytes.Equal(actualCF, expectedCF) {
diffString := diff.FormatDiff(string(expectedCF), string(actualCF))
expectedCFTrimmed := strings.TrimSpace(string(expectedCF))
actualCFTrimmed := strings.TrimSpace(string(actualCF))
if actualCFTrimmed != expectedCFTrimmed {
diffString := diff.FormatDiff(expectedCFTrimmed, actualCFTrimmed)
t.Logf("diff:\n%s\n", diffString)
t.Fatalf("cloudformation output differed from expected")

View File

@ -18,7 +18,9 @@ package awsmodel
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
@ -117,7 +119,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
return err
}
if t.UserData, err = b.BootstrapScript.ResourceNodeUp(ig, b.Cluster.Spec.EgressProxy); err != nil {
if t.UserData, err = b.BootstrapScript.ResourceNodeUp(ig, &b.Cluster.Spec); err != nil {
return err
}

View File

@ -24,6 +24,8 @@ import (
"strings"
"text/template"
"github.com/ghodss/yaml"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/model/resources"
@ -37,7 +39,9 @@ type BootstrapScript struct {
NodeUpConfigBuilder func(ig *kops.InstanceGroup) (*nodeup.Config, error)
}
func (b *BootstrapScript) ResourceNodeUp(ig *kops.InstanceGroup, ps *kops.EgressProxySpec) (*fi.ResourceHolder, error) {
// ResourceNodeUp generates and returns a nodeup (bootstrap) script from a
// template file, substituting in specific env vars & cluster spec configuration
func (b *BootstrapScript) ResourceNodeUp(ig *kops.InstanceGroup, cs *kops.ClusterSpec) (*fi.ResourceHolder, error) {
if ig.Spec.Role == kops.InstanceGroupRoleBastion {
// Bastions are just bare machines (currently), used as SSH jump-hosts
return nil, nil
@ -77,7 +81,7 @@ func (b *BootstrapScript) ResourceNodeUp(ig *kops.InstanceGroup, ps *kops.Egress
},
"ProxyEnv": func() string {
return b.createProxyEnv(ps)
return b.createProxyEnv(cs.EgressProxy)
},
"AWS_REGION": func() string {
if os.Getenv("AWS_REGION") != "" {
@ -86,6 +90,40 @@ func (b *BootstrapScript) ResourceNodeUp(ig *kops.InstanceGroup, ps *kops.Egress
}
return ""
},
"ClusterSpec": func() (string, error) {
spec := make(map[string]interface{})
spec["cloudConfig"] = cs.CloudConfig
spec["docker"] = cs.Docker
spec["kubelet"] = cs.Kubelet
spec["kubeProxy"] = cs.KubeProxy
if ig.IsMaster() {
spec["kubeAPIServer"] = cs.KubeAPIServer
spec["kubeControllerManager"] = cs.KubeControllerManager
spec["kubeScheduler"] = cs.KubeScheduler
spec["masterKubelet"] = cs.MasterKubelet
}
content, err := yaml.Marshal(spec)
if err != nil {
return "", fmt.Errorf("error converting cluster spec to yaml for inclusion within bootstrap script: %v", err)
}
return string(content), nil
},
"IGSpec": func() (string, error) {
spec := make(map[string]interface{})
spec["kubelet"] = ig.Spec.Kubelet
spec["nodeLabels"] = ig.Spec.NodeLabels
spec["taints"] = ig.Spec.Taints
content, err := yaml.Marshal(spec)
if err != nil {
return "", fmt.Errorf("error converting instancegroup spec to yaml for inclusion within bootstrap script: %v", err)
}
return string(content), nil
},
}
templateResource, err := NewTemplateResource("nodeup", resources.AWSNodeUpTemplate, functions, nil)
@ -99,22 +137,22 @@ func (b *BootstrapScript) createProxyEnv(ps *kops.EgressProxySpec) string {
var buffer bytes.Buffer
if ps != nil && ps.HTTPProxy.Host != "" {
var httpProxyUrl string
var httpProxyURL string
// TODO double check that all the code does this
// TODO move this into a validate so we can enforce the string syntax
if !strings.HasPrefix(ps.HTTPProxy.Host, "http://") {
httpProxyUrl = "http://"
httpProxyURL = "http://"
}
if ps.HTTPProxy.Port != 0 {
httpProxyUrl += ps.HTTPProxy.Host + ":" + strconv.Itoa(ps.HTTPProxy.Port)
httpProxyURL += ps.HTTPProxy.Host + ":" + strconv.Itoa(ps.HTTPProxy.Port)
} else {
httpProxyUrl += ps.HTTPProxy.Host
httpProxyURL += ps.HTTPProxy.Host
}
// Set base env variables
buffer.WriteString("export http_proxy=" + httpProxyUrl + "\n")
buffer.WriteString("export http_proxy=" + httpProxyURL + "\n")
buffer.WriteString("export https_proxy=${http_proxy}\n")
buffer.WriteString("export no_proxy=" + ps.ProxyExcludes + "\n")
buffer.WriteString("export NO_PROXY=${no_proxy}\n")

View File

@ -17,10 +17,12 @@ limitations under the License.
package model
import (
"io/ioutil"
"strings"
"testing"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/nodeup"
)
func Test_ProxyFunc(t *testing.T) {
@ -49,5 +51,133 @@ func Test_ProxyFunc(t *testing.T) {
if !strings.Contains(script, "export no_proxy="+ps.ProxyExcludes) {
t.Fatalf("script not setting no_proxy properly")
}
}
func TestBootstrapUserData(t *testing.T) {
cs := []struct {
Role kops.InstanceGroupRole
ExpectedFilePath string
}{
{
Role: "Master",
ExpectedFilePath: "tests/data/bootstrapscript_0.txt",
},
{
Role: "Node",
ExpectedFilePath: "tests/data/bootstrapscript_1.txt",
},
}
for i, x := range cs {
spec := makeTestCluster().Spec
group := makeTestInstanceGroup(x.Role)
renderNodeUpConfig := func(ig *kops.InstanceGroup) (*nodeup.Config, error) {
return &nodeup.Config{}, nil
}
bs := &BootstrapScript{
NodeUpSource: "NUSource",
NodeUpSourceHash: "NUSHash",
NodeUpConfigBuilder: renderNodeUpConfig,
}
res, err := bs.ResourceNodeUp(group, &spec)
if err != nil {
t.Errorf("case %d failed to create nodeup resource. error: %s", i, err)
continue
}
actual, err := res.AsString()
if err != nil {
t.Errorf("case %d failed to render nodeup resource. error: %s", i, err)
continue
}
expectedBytes, err := ioutil.ReadFile(x.ExpectedFilePath)
if err != nil {
t.Fatalf("unexpected error reading ExpectedFilePath %q: %v", x.ExpectedFilePath, err)
}
if actual != string(expectedBytes) {
t.Errorf("case %d, expected: %s. got: %s", i, string(expectedBytes), actual)
}
}
}
func makeTestCluster() *kops.Cluster {
return &kops.Cluster{
Spec: kops.ClusterSpec{
CloudProvider: "aws",
KubernetesVersion: "1.7.0",
Subnets: []kops.ClusterSubnetSpec{
{Name: "test", Zone: "eu-west-1a"},
},
NonMasqueradeCIDR: "10.100.0.0/16",
EtcdClusters: []*kops.EtcdClusterSpec{
{
Name: "main",
Members: []*kops.EtcdMemberSpec{
{
Name: "test",
InstanceGroup: s("ig-1"),
},
},
},
},
NetworkCIDR: "10.79.0.0/24",
CloudConfig: &kops.CloudConfiguration{
NodeTags: s("something"),
},
Docker: &kops.DockerConfig{
LogLevel: s("INFO"),
},
KubeAPIServer: &kops.KubeAPIServerConfig{
Image: "CoreOS",
},
KubeControllerManager: &kops.KubeControllerManagerConfig{
CloudProvider: "aws",
},
KubeProxy: &kops.KubeProxyConfig{
CPURequest: "30m",
FeatureGates: map[string]string{
"AdvancedAuditing": "true",
},
},
KubeScheduler: &kops.KubeSchedulerConfig{
Image: "SomeImage",
},
Kubelet: &kops.KubeletConfigSpec{
KubeconfigPath: "/etc/kubernetes/config.txt",
},
MasterKubelet: &kops.KubeletConfigSpec{
KubeconfigPath: "/etc/kubernetes/config.cfg",
},
EgressProxy: &kops.EgressProxySpec{
HTTPProxy: kops.HTTPProxy{
Host: "example.com",
Port: 80,
},
},
},
}
}
func makeTestInstanceGroup(role kops.InstanceGroupRole) *kops.InstanceGroup {
return &kops.InstanceGroup{
Spec: kops.InstanceGroupSpec{
Kubelet: &kops.KubeletConfigSpec{
KubeconfigPath: "/etc/kubernetes/igconfig.txt",
},
NodeLabels: map[string]string{
"labelname": "labelvalue",
"label2": "value2",
},
Role: role,
Taints: []string{
"key1=value1:NoSchedule",
"key2=value2:NoExecute",
},
},
}
}

View File

@ -18,6 +18,7 @@ package gcemodel
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model"
@ -44,7 +45,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
for _, ig := range b.InstanceGroups {
name := b.SafeObjectName(ig.ObjectMeta.Name)
startupScript, err := b.BootstrapScript.ResourceNodeUp(ig, b.Cluster.Spec.EgressProxy)
startupScript, err := b.BootstrapScript.ResourceNodeUp(ig, &b.Cluster.Spec)
if err != nil {
return err
}

View File

@ -141,6 +141,14 @@ function download-release() {
echo "== nodeup node config starting =="
ensure-install-dir
cat > cluster_spec.yaml << __EOF_CLUSTER_SPEC
{{ ClusterSpec }}
__EOF_CLUSTER_SPEC
cat > ig_spec.yaml << __EOF_IG_SPEC
{{ IGSpec }}
__EOF_IG_SPEC
cat > kube_env.yaml << __EOF_KUBE_ENV
{{ KubeEnv }}
__EOF_KUBE_ENV

View File

@ -0,0 +1,187 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=NUSource
NODEUP_HASH=NUSHash
export http_proxy=http://example.com:80
export https_proxy=${http_proxy}
export no_proxy=
export NO_PROXY=${no_proxy}
echo "export http_proxy=${http_proxy}" >> /etc/default/docker
echo "export https_proxy=${http_proxy}" >> /etc/default/docker
echo "export no_proxy=${no_proxy}" >> /etc/default/docker
echo "export NO_PROXY=${no_proxy}" >> /etc/default/docker
echo "export http_proxy=${http_proxy}" >> /etc/environment
echo "export https_proxy=${http_proxy}" >> /etc/environment
echo "export no_proxy=${no_proxy}" >> /etc/environment
echo "export NO_PROXY=${no_proxy}" >> /etc/environment
echo DefaultEnvironment=\"http_proxy=${http_proxy}\" \"https_proxy=${http_proxy}\"echo DefaultEnvironment=\"http_proxy=${http_proxy}\" \"https_proxy=${http_proxy}\" \"NO_PROXY=${no_proxy}\" \"no_proxy=${no_proxy}\" >> /etc/systemd/system.conf
source /etc/environment
systemctl daemon-reload
systemctl daemon-reexec
if [ -f /etc/lsb-release ] || [ -f /etc/debian_version ]; then
echo "Acquire::http::Proxy \"${http_proxy}\";" > /etc/apt/apt.conf.d/30proxy
elif [ -f /etc/redhat-release ]; then
echo "http_proxy=${http_proxy}" >> /etc/yum.conf
fi
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
fi
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
download-or-bust() {
local -r hash="$1"
shift 1
urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
local -r nodeup_filename="${nodeup_urls[0]##*/}"
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha1 (not found in env)"
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > cluster_spec.yaml << __EOF_CLUSTER_SPEC
cloudConfig:
nodeTags: something
docker:
logLevel: INFO
kubeAPIServer:
image: CoreOS
kubeControllerManager:
cloudProvider: aws
kubeProxy:
cpuRequest: 30m
featureGates:
AdvancedAuditing: "true"
kubeScheduler:
image: SomeImage
kubelet:
kubeconfigPath: /etc/kubernetes/config.txt
masterKubelet:
kubeconfigPath: /etc/kubernetes/config.cfg
__EOF_CLUSTER_SPEC
cat > ig_spec.yaml << __EOF_IG_SPEC
kubelet:
kubeconfigPath: /etc/kubernetes/igconfig.txt
nodeLabels:
label2: value2
labelname: labelvalue
taints:
- key1=value1:NoSchedule
- key2=value2:NoExecute
__EOF_IG_SPEC
cat > kube_env.yaml << __EOF_KUBE_ENV
{}
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,179 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=NUSource
NODEUP_HASH=NUSHash
export http_proxy=http://example.com:80
export https_proxy=${http_proxy}
export no_proxy=
export NO_PROXY=${no_proxy}
echo "export http_proxy=${http_proxy}" >> /etc/default/docker
echo "export https_proxy=${http_proxy}" >> /etc/default/docker
echo "export no_proxy=${no_proxy}" >> /etc/default/docker
echo "export NO_PROXY=${no_proxy}" >> /etc/default/docker
echo "export http_proxy=${http_proxy}" >> /etc/environment
echo "export https_proxy=${http_proxy}" >> /etc/environment
echo "export no_proxy=${no_proxy}" >> /etc/environment
echo "export NO_PROXY=${no_proxy}" >> /etc/environment
echo DefaultEnvironment=\"http_proxy=${http_proxy}\" \"https_proxy=${http_proxy}\"echo DefaultEnvironment=\"http_proxy=${http_proxy}\" \"https_proxy=${http_proxy}\" \"NO_PROXY=${no_proxy}\" \"no_proxy=${no_proxy}\" >> /etc/systemd/system.conf
source /etc/environment
systemctl daemon-reload
systemctl daemon-reexec
if [ -f /etc/lsb-release ] || [ -f /etc/debian_version ]; then
echo "Acquire::http::Proxy \"${http_proxy}\";" > /etc/apt/apt.conf.d/30proxy
elif [ -f /etc/redhat-release ]; then
echo "http_proxy=${http_proxy}" >> /etc/yum.conf
fi
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
fi
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
download-or-bust() {
local -r hash="$1"
shift 1
urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
local -r nodeup_filename="${nodeup_urls[0]##*/}"
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha1 (not found in env)"
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > cluster_spec.yaml << __EOF_CLUSTER_SPEC
cloudConfig:
nodeTags: something
docker:
logLevel: INFO
kubeProxy:
cpuRequest: 30m
featureGates:
AdvancedAuditing: "true"
kubelet:
kubeconfigPath: /etc/kubernetes/config.txt
__EOF_CLUSTER_SPEC
cat > ig_spec.yaml << __EOF_IG_SPEC
kubelet:
kubeconfigPath: /etc/kubernetes/igconfig.txt
nodeLabels:
label2: value2
labelname: labelvalue
taints:
- key1=value1:NoSchedule
- key2=value2:NoExecute
__EOF_IG_SPEC
cat > kube_env.yaml << __EOF_KUBE_ENV
{}
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -19,11 +19,12 @@ package vspheremodel
// autoscalinggroup is a model for vSphere cloud. It's responsible for building tasks, necessary for kubernetes cluster deployment.
import (
"strconv"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks"
"strconv"
)
// AutoscalingGroupModelBuilder configures AutoscalingGroup objects
@ -61,7 +62,7 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
VM: createVmTask,
IG: ig,
BootstrapScript: b.BootstrapScript,
EtcdClusters: b.Cluster.Spec.EtcdClusters,
Spec: &b.Cluster.Spec,
}
c.AddTask(attachISOTask)

File diff suppressed because one or more lines are too long

View File

@ -24,6 +24,7 @@ import (
"text/template"
"github.com/golang/glog"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/util"

View File

@ -21,13 +21,7 @@ package vspheretasks
import (
"bytes"
"fmt"
"github.com/golang/glog"
"github.com/pborman/uuid"
"io/ioutil"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
"net"
"net/url"
"os"
@ -35,6 +29,14 @@ import (
"path/filepath"
"runtime"
"strings"
"github.com/golang/glog"
"github.com/pborman/uuid"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
)
// AttachISO represents the cloud-init ISO file attached to a VM on vSphere cloud.
@ -44,7 +46,7 @@ type AttachISO struct {
VM *VirtualMachine
IG *kops.InstanceGroup
BootstrapScript *model.BootstrapScript
EtcdClusters []*kops.EtcdClusterSpec
Spec *kops.ClusterSpec
}
var _ fi.HasName = &AttachISO{}
@ -91,8 +93,7 @@ func (_ *AttachISO) CheckChanges(a, e, changes *AttachISO) error {
// RenderVSphere executes the actual task logic, for vSphere cloud.
func (_ *AttachISO) RenderVSphere(t *vsphere.VSphereAPITarget, a, e, changes *AttachISO) error {
// TODO #3071 .. need to replace the nil for http proxy support
startupScript, err := changes.BootstrapScript.ResourceNodeUp(changes.IG, nil)
startupScript, err := changes.BootstrapScript.ResourceNodeUp(changes.IG, changes.Spec)
if err != nil {
return fmt.Errorf("error on resource nodeup: %v", err)
}
@ -196,7 +197,7 @@ func getVolMetadata(changes *AttachISO) (string, error) {
var volsMetadata []vsphere.VolumeMetadata
// Creating vsphere.VolumeMetadata using clusters EtcdClusterSpec
for i, etcd := range changes.EtcdClusters {
for i, etcd := range changes.Spec.EtcdClusters {
volMetadata := vsphere.VolumeMetadata{}
volMetadata.EtcdClusterName = etcd.Name
volMetadata.VolumeId = vsphere.GetVolumeId(i + 1)