Extract UserData from CloudFormation output during testing

This gives us some sanity, so we can peek inside the base64 blob
This commit is contained in:
Justin Santa Barbara 2017-08-28 11:12:51 -04:00
parent 327235a22c
commit e793562ee6
5 changed files with 691 additions and 4 deletions

View File

@ -21,7 +21,10 @@ import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"io"
"io/ioutil"
"os"
"path"
@ -31,11 +34,13 @@ import (
"testing"
"time"
"golang.org/x/crypto/ssh"
"k8s.io/kops/cmd/kops/util"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/pkg/jsonutils"
"k8s.io/kops/pkg/testutils"
"github.com/ghodss/yaml"
"golang.org/x/crypto/ssh"
)
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
@ -335,6 +340,39 @@ func runTestCloudformation(t *testing.T, clusterName string, srcDir string, vers
t.Fatalf("unexpected error reading expected cloudformation output: %v", err)
}
// Expand out the UserData base64 blob, as otherwise testing is painful
extracted := make(map[string]string)
var buf bytes.Buffer
out := jsonutils.NewJSONStreamWriter(&buf)
in := json.NewDecoder(bytes.NewReader(actualCF))
for {
token, err := in.Token()
if err != nil {
if err == io.EOF {
break
} else {
t.Fatalf("unexpected error parsing cloudformation output: %v", err)
}
}
if strings.HasSuffix(out.Path(), ".UserData") {
if s, ok := token.(string); ok {
vBytes, err := base64.StdEncoding.DecodeString(s)
if err != nil {
t.Fatalf("error decoding UserData: %v", err)
} else {
extracted[out.Path()] = string(vBytes)
token = json.Token("extracted")
}
}
}
if err := out.WriteToken(token); err != nil {
t.Fatalf("error writing json: %v", err)
}
}
actualCF = buf.Bytes()
expectedCFTrimmed := strings.TrimSpace(string(expectedCF))
actualCFTrimmed := strings.TrimSpace(string(actualCF))
if actualCFTrimmed != expectedCFTrimmed {
@ -349,6 +387,23 @@ func runTestCloudformation(t *testing.T, clusterName string, srcDir string, vers
t.Fatalf("cloudformation output differed from expected")
}
actualExtracted, err := yaml.Marshal(extracted)
if err != nil {
t.Fatalf("unexpected error serializing extracted values: %v", err)
}
expectedExtracted, err := ioutil.ReadFile(path.Join(srcDir, expectedCfPath+".extracted.yaml"))
if err != nil {
t.Fatalf("unexpected error reading expected extracted cloudformation output: %v", err)
}
actualExtractedTrimmed := strings.TrimSpace(string(actualExtracted))
expectedExtractedTrimmed := strings.TrimSpace(string(expectedExtracted))
if actualExtractedTrimmed != expectedExtractedTrimmed {
diffString := diff.FormatDiff(actualExtractedTrimmed, expectedExtractedTrimmed)
t.Logf("diff:\n%s\n", diffString)
t.Fatalf("cloudformation output differed from expected")
}
}
}

View File

@ -62,6 +62,7 @@ k8s.io/kops/pkg/edit
k8s.io/kops/pkg/featureflag
k8s.io/kops/pkg/flagbuilder
k8s.io/kops/pkg/instancegroups
k8s.io/kops/pkg/jsonutils
k8s.io/kops/pkg/kubeconfig
k8s.io/kops/pkg/kubemanifest
k8s.io/kops/pkg/model

View File

@ -0,0 +1,184 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonutils
import (
"encoding/json"
"fmt"
"io"
"strings"
)
// JSONStreamWriter writes tokens as parsed by a json.Decoder back to a string
type JSONStreamWriter struct {
// out is the output destination
out io.Writer
// indent is the current indent level
indent string
// state stores a stack of the json state, comprised of [ { and F characters. F=field
state string
// deferred is used to buffer the output temporarily, used to prevent a trailing comma in an object
deferred string
// path is the current stack of fields, used to support the Path() function
path []string
}
// NewJSONStreamWriter is the constructor for a JSONStreamWriter
func NewJSONStreamWriter(out io.Writer) *JSONStreamWriter {
return &JSONStreamWriter{
out: out,
}
}
// Path returns the path to the current position in the JSON tree
func (j *JSONStreamWriter) Path() string {
return strings.Join(j.path, ".")
}
// WriteToken writes the next token to the output
func (j *JSONStreamWriter) WriteToken(token json.Token) error {
state := byte(0)
if j.state != "" {
state = j.state[len(j.state)-1]
}
var v string
switch tt := token.(type) {
// Delim, for the four JSON delimiters [ ] { }
case json.Delim:
v = tt.String()
indent := j.indent
switch tt {
case json.Delim('{'):
j.indent += " "
j.state += "{"
case json.Delim('['):
j.indent += " "
j.state += "["
case json.Delim(']'), json.Delim('}'):
j.indent = j.indent[:len(j.indent)-2]
indent = j.indent
j.state = j.state[:len(j.state)-1]
if j.state != "" && j.state[len(j.state)-1] == 'F' {
j.state = j.state[:len(j.state)-1]
j.path = j.path[:len(j.path)-1]
}
// Don't put a comma on the last field in a block
if j.deferred == ",\n" {
j.deferred = "\n"
}
default:
return fmt.Errorf("unknown delim: %v", tt)
}
switch state {
case 0:
if err := j.writeRaw(indent + v); err != nil {
return err
}
case '{':
if err := j.writeRaw(indent + v); err != nil {
return err
}
case '[':
if err := j.writeRaw(indent + v); err != nil {
return err
}
case 'F':
if err := j.writeRaw(v); err != nil {
return err
}
default:
return fmt.Errorf("unhandled state for json delim serialization: %v %q", state, j.state)
}
switch tt {
case json.Delim('{'):
j.deferred = "\n"
case json.Delim('['):
j.deferred = "\n"
case json.Delim(']'), json.Delim('}'):
j.deferred = ",\n"
default:
return fmt.Errorf("unknown delim: %v", tt)
}
return nil
// bool, for JSON booleans
case bool:
v = fmt.Sprintf("%v", tt)
// string, for JSON string literals
case string:
v = "\"" + tt + "\""
// float64, for JSON numbers
case float64:
v = fmt.Sprintf("%g", tt)
// Number, for JSON numbers
case json.Number:
v = tt.String()
// nil, for JSON null
case nil:
v = "null"
default:
return fmt.Errorf("unhandled token type %T", tt)
}
switch state {
case '{':
j.state += "F"
j.path = append(j.path, fmt.Sprintf("%s", token))
return j.writeRaw(j.indent + v + ": ")
case '[':
if err := j.writeRaw(j.indent + v); err != nil {
return err
}
j.deferred = ",\n"
return nil
case 'F':
j.state = j.state[:len(j.state)-1]
j.path = j.path[:len(j.path)-1]
if err := j.writeRaw(v); err != nil {
return err
}
j.deferred = ",\n"
return nil
}
return fmt.Errorf("unhandled state for json value (%T %q) serialization: %v %q", token, v, state, j.state)
}
func (j *JSONStreamWriter) writeRaw(s string) error {
if j.deferred != "" {
if _, err := j.out.Write([]byte(j.deferred)); err != nil {
return err
}
j.deferred = ""
}
_, err := j.out.Write([]byte(s))
return err
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,447 @@
Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexamplecom.Properties.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.5.0/linux/amd64/nodeup
NODEUP_HASH=
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
fi
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
download-or-bust() {
local -r hash="$1"
shift 1
urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
local -r nodeup_filename="${nodeup_urls[0]##*/}"
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha1 (not found in env)"
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
docker:
bridge: ""
ipMasq: false
ipTables: false
logLevel: warn
storage: overlay,aufs
version: 1.11.2
kubeAPIServer:
address: 127.0.0.1
admissionControl:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- ResourceQuota
allowPrivileged: true
apiServerCount: 1
authorizationMode: AlwaysAllow
cloudProvider: aws
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: gcr.io/google_containers/kube-apiserver:v1.4.6
insecurePort: 8080
logLevel: 2
securePort: 443
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd2
kubeControllerManager:
allocateNodeCIDRs: true
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: true
image: gcr.io/google_containers/kube-controller-manager:v1.4.6
leaderElection:
leaderElect: true
logLevel: 2
master: 127.0.0.1:8080
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
featureGates: null
hostnameOverride: '@aws'
image: gcr.io/google_containers/kube-proxy:v1.4.6
logLevel: 2
kubeScheduler:
image: gcr.io/google_containers/kube-scheduler:v1.4.6
leaderElection:
leaderElect: true
logLevel: 2
master: http://127.0.0.1:8080
kubelet:
allowPrivileged: true
apiServers: https://api.internal.minimal.example.com
babysitDaemons: true
cgroupRoot: docker
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true
masterKubelet:
allowPrivileged: true
apiServers: http://127.0.0.1:8080
babysitDaemons: true
cgroupRoot: docker
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 10.123.45.0/28
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- 7d70e090951486cae52d9a82b7aaf5056f84f8ed@https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/linux/amd64/kubelet
- 9adcd120fdb7ad6e64c061e56a05fefc12e9618b@https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/linux/amd64/kubectl
- 19d49f7b2b99cd2493d5ae0ace896c64e289ccbb@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
- cbba856746a441c7d1a9e95e141c982a1b8864e6@https://kubeupv2.s3.amazonaws.com/kops/1.5.0/linux/amd64/utils.tar.gz
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
Tags:
- _automatic_upgrades
- _aws
- _kubernetes_master
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
protokubeImage:
hash: 7c3a0ec0723fd350609b2958bc5b8ab02583851c
name: protokube:1.5.0
source: https://kubeupv2.s3.amazonaws.com/kops/1.5.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSAutoScalingLaunchConfigurationnodesminimalexamplecom.Properties.UserData: |
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL=https://kubeupv2.s3.amazonaws.com/kops/1.5.0/linux/amd64/nodeup
NODEUP_HASH=
function ensure-install-dir() {
INSTALL_DIR="/var/cache/kubernetes-install"
# On ContainerOS, we install to /var/lib/toolbox install (because of noexec)
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kubernetes-install"
fi
mkdir -p ${INSTALL_DIR}
cd ${INSTALL_DIR}
}
# Retry a download until we get it. Takes a hash and a set of URLs.
#
# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown.
# $2+ are the URLs to download.
download-or-bust() {
local -r hash="$1"
shift 1
urls=( $* )
while true; do
for url in "${urls[@]}"; do
local file="${url##*/}"
rm -f "${file}"
if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then
echo "== Failed to download ${url}. Retrying. =="
elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha1sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
# TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot
# optimization.
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
local -r nodeup_filename="${nodeup_urls[0]##*/}"
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha1 (not found in env)"
download-or-bust "" "${nodeup_urls[@]/%/.sha1}"
local -r nodeup_hash=$(cat "${nodeup_filename}.sha1")
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
# In case of failure checking integrity of release, retry.
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig: null
docker:
bridge: ""
ipMasq: false
ipTables: false
logLevel: warn
storage: overlay,aufs
version: 1.11.2
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
featureGates: null
hostnameOverride: '@aws'
image: gcr.io/google_containers/kube-proxy:v1.4.6
logLevel: 2
kubelet:
allowPrivileged: true
apiServers: https://api.internal.minimal.example.com
babysitDaemons: true
cgroupRoot: docker
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hostnameOverride: '@aws'
logLevel: 2
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true
__EOF_CLUSTER_SPEC
cat > ig_spec.yaml << '__EOF_IG_SPEC'
kubelet: null
nodeLabels: null
taints: null
__EOF_IG_SPEC
cat > kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
- 7d70e090951486cae52d9a82b7aaf5056f84f8ed@https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/linux/amd64/kubelet
- 9adcd120fdb7ad6e64c061e56a05fefc12e9618b@https://storage.googleapis.com/kubernetes-release/release/v1.4.6/bin/linux/amd64/kubectl
- 19d49f7b2b99cd2493d5ae0ace896c64e289ccbb@https://storage.googleapis.com/kubernetes-release/network-plugins/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz
- cbba856746a441c7d1a9e95e141c982a1b8864e6@https://kubeupv2.s3.amazonaws.com/kops/1.5.0/linux/amd64/utils.tar.gz
ClusterName: minimal.example.com
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
Tags:
- _automatic_upgrades
- _aws
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
protokubeImage:
hash: 7c3a0ec0723fd350609b2958bc5b8ab02583851c
name: protokube:1.5.0
source: https://kubeupv2.s3.amazonaws.com/kops/1.5.0/images/protokube.tar.gz
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="