Merge pull request #12 from jinzhejz/jinzhejz_integrationtest
add integration test cases
This commit is contained in:
commit
0d5c3582b9
13
Makefile
13
Makefile
|
|
@ -3,8 +3,15 @@ BIN_DIR=_output/bin
|
|||
kube-arbitrator: init
|
||||
go build -o ${BIN_DIR}/kube-arbitrator cmd/main.go
|
||||
|
||||
test:
|
||||
echo "unit test script is not ready!"
|
||||
|
||||
init:
|
||||
mkdir -p ${BIN_DIR}
|
||||
|
||||
test-integration:
|
||||
hack/make-rules/test-integration.sh $(WHAT)
|
||||
|
||||
run-test:
|
||||
hack/make-rules/test.sh $(WHAT) $(TESTS)
|
||||
|
||||
clean:
|
||||
rm -rf _output/
|
||||
rm -f kube-arbitrator
|
||||
|
|
|
|||
|
|
@ -0,0 +1,171 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Controls verbosity of the script output and logging.
|
||||
KUBE_VERBOSE="${KUBE_VERBOSE:-5}"
|
||||
|
||||
# Handler for when we exit automatically on an error.
|
||||
# Borrowed from https://gist.github.com/ahendrix/7030300
|
||||
kube::log::errexit() {
|
||||
local err="${PIPESTATUS[@]}"
|
||||
|
||||
# If the shell we are in doesn't have errexit set (common in subshells) then
|
||||
# don't dump stacks.
|
||||
set +o | grep -qe "-o errexit" || return
|
||||
|
||||
set +o xtrace
|
||||
local code="${1:-1}"
|
||||
# Print out the stack trace described by $function_stack
|
||||
if [ ${#FUNCNAME[@]} -gt 2 ]
|
||||
then
|
||||
kube::log::error "Call tree:"
|
||||
for ((i=1;i<${#FUNCNAME[@]}-1;i++))
|
||||
do
|
||||
kube::log::error " $i: ${BASH_SOURCE[$i+1]}:${BASH_LINENO[$i]} ${FUNCNAME[$i]}(...)"
|
||||
done
|
||||
fi
|
||||
kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status $err" "${1:-1}" 1
|
||||
}
|
||||
|
||||
kube::log::install_errexit() {
|
||||
# trap ERR to provide an error handler whenever a command exits nonzero this
|
||||
# is a more verbose version of set -o errexit
|
||||
trap 'kube::log::errexit' ERR
|
||||
|
||||
# setting errtrace allows our ERR trap handler to be propagated to functions,
|
||||
# expansions and subshells
|
||||
set -o errtrace
|
||||
}
|
||||
|
||||
# Print out the stack trace
|
||||
#
|
||||
# Args:
|
||||
# $1 The number of stack frames to skip when printing.
|
||||
kube::log::stack() {
|
||||
local stack_skip=${1:-0}
|
||||
stack_skip=$((stack_skip + 1))
|
||||
if [[ ${#FUNCNAME[@]} -gt $stack_skip ]]; then
|
||||
echo "Call stack:" >&2
|
||||
local i
|
||||
for ((i=1 ; i <= ${#FUNCNAME[@]} - $stack_skip ; i++))
|
||||
do
|
||||
local frame_no=$((i - 1 + stack_skip))
|
||||
local source_file=${BASH_SOURCE[$frame_no]}
|
||||
local source_lineno=${BASH_LINENO[$((frame_no - 1))]}
|
||||
local funcname=${FUNCNAME[$frame_no]}
|
||||
echo " $i: ${source_file}:${source_lineno} ${funcname}(...)" >&2
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# Log an error and exit.
|
||||
# Args:
|
||||
# $1 Message to log with the error
|
||||
# $2 The error code to return
|
||||
# $3 The number of stack frames to skip when printing.
|
||||
kube::log::error_exit() {
|
||||
local message="${1:-}"
|
||||
local code="${2:-1}"
|
||||
local stack_skip="${3:-0}"
|
||||
stack_skip=$((stack_skip + 1))
|
||||
|
||||
if [[ ${KUBE_VERBOSE} -ge 4 ]]; then
|
||||
local source_file=${BASH_SOURCE[$stack_skip]}
|
||||
local source_line=${BASH_LINENO[$((stack_skip - 1))]}
|
||||
echo "!!! Error in ${source_file}:${source_line}" >&2
|
||||
[[ -z ${1-} ]] || {
|
||||
echo " ${1}" >&2
|
||||
}
|
||||
|
||||
kube::log::stack $stack_skip
|
||||
|
||||
echo "Exiting with status ${code}" >&2
|
||||
fi
|
||||
|
||||
exit "${code}"
|
||||
}
|
||||
|
||||
# Log an error but keep going. Don't dump the stack or exit.
|
||||
kube::log::error() {
|
||||
timestamp=$(date +"[%m%d %H:%M:%S]")
|
||||
echo "!!! $timestamp ${1-}" >&2
|
||||
shift
|
||||
for message; do
|
||||
echo " $message" >&2
|
||||
done
|
||||
}
|
||||
|
||||
# Print an usage message to stderr. The arguments are printed directly.
|
||||
kube::log::usage() {
|
||||
echo >&2
|
||||
local message
|
||||
for message; do
|
||||
echo "$message" >&2
|
||||
done
|
||||
echo >&2
|
||||
}
|
||||
|
||||
kube::log::usage_from_stdin() {
|
||||
local messages=()
|
||||
while read -r line; do
|
||||
messages+=("$line")
|
||||
done
|
||||
|
||||
kube::log::usage "${messages[@]}"
|
||||
}
|
||||
|
||||
# Print out some info that isn't a top level status line
|
||||
kube::log::info() {
|
||||
local V="${V:-0}"
|
||||
if [[ $KUBE_VERBOSE < $V ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
for message; do
|
||||
echo "$message"
|
||||
done
|
||||
}
|
||||
|
||||
# Just like kube::log::info, but no \n, so you can make a progress bar
|
||||
kube::log::progress() {
|
||||
for message; do
|
||||
echo -e -n "$message"
|
||||
done
|
||||
}
|
||||
|
||||
kube::log::info_from_stdin() {
|
||||
local messages=()
|
||||
while read -r line; do
|
||||
messages+=("$line")
|
||||
done
|
||||
|
||||
kube::log::info "${messages[@]}"
|
||||
}
|
||||
|
||||
# Print a status line. Formatted to show up in a stream of output.
|
||||
kube::log::status() {
|
||||
local V="${V:-0}"
|
||||
if [[ $KUBE_VERBOSE < $V ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
timestamp=$(date +"[%m%d %H:%M:%S]")
|
||||
echo "+++ $timestamp $1"
|
||||
shift
|
||||
for message; do
|
||||
echo " $message"
|
||||
done
|
||||
}
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A set of helpers for starting/running etcd for tests
|
||||
|
||||
ETCD_VERSION=${ETCD_VERSION:-3.0.17}
|
||||
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
|
||||
ETCD_PORT=${ETCD_PORT:-2379}
|
||||
|
||||
kube::etcd::validate() {
|
||||
# validate if in path
|
||||
which etcd >/dev/null || {
|
||||
kube::log::usage "etcd must be in your PATH"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# validate it is not running
|
||||
if pgrep -x etcd >/dev/null 2>&1; then
|
||||
kube::log::usage "etcd appears to already be running on this machine (`pgrep -xl etcd`) (or its a zombie and you need to kill its parent)."
|
||||
kube::log::usage "retry after you resolve this etcd error."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# validate installed version is at least equal to minimum
|
||||
version=$(etcd --version | tail -n +1 | head -n 1 | cut -d " " -f 3)
|
||||
if [[ $(kube::etcd::version $ETCD_VERSION) -gt $(kube::etcd::version $version) ]]; then
|
||||
export PATH=$KUBE_ROOT/third_party/etcd:$PATH
|
||||
hash etcd
|
||||
echo $PATH
|
||||
version=$(etcd --version | head -n 1 | cut -d " " -f 3)
|
||||
if [[ $(kube::etcd::version $ETCD_VERSION) -gt $(kube::etcd::version $version) ]]; then
|
||||
kube::log::usage "etcd version ${ETCD_VERSION} or greater required."
|
||||
kube::log::info "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
kube::etcd::version() {
|
||||
printf '%s\n' "${@}" | awk -F . '{ printf("%d%03d%03d\n", $1, $2, $3) }'
|
||||
}
|
||||
|
||||
kube::etcd::start() {
|
||||
# validate before running
|
||||
kube::etcd::validate
|
||||
|
||||
# Start etcd
|
||||
ETCD_DIR=${ETCD_DIR:-$(mktemp -d 2>/dev/null || mktemp -d -t test-etcd.XXXXXX)}
|
||||
if [[ -d "${ARTIFACTS_DIR:-}" ]]; then
|
||||
ETCD_LOGFILE="${ARTIFACTS_DIR}/etcd.$(uname -n).$(id -un).log.DEBUG.$(date +%Y%m%d-%H%M%S).$$"
|
||||
else
|
||||
ETCD_LOGFILE=/dev/null
|
||||
fi
|
||||
kube::log::info "etcd --advertise-client-urls http://${ETCD_HOST}:${ETCD_PORT} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --debug > \"${ETCD_LOGFILE}\" 2>/dev/null"
|
||||
etcd --advertise-client-urls http://${ETCD_HOST}:${ETCD_PORT} --data-dir ${ETCD_DIR} --listen-client-urls http://${ETCD_HOST}:${ETCD_PORT} --debug 2> "${ETCD_LOGFILE}" >/dev/null &
|
||||
ETCD_PID=$!
|
||||
|
||||
echo "Waiting for etcd to come up."
|
||||
kube::util::wait_for_url "http://${ETCD_HOST}:${ETCD_PORT}/v2/machines" "etcd: " 0.25 80
|
||||
curl -fs -X PUT "http://${ETCD_HOST}:${ETCD_PORT}/v2/keys/_test"
|
||||
}
|
||||
|
||||
kube::etcd::stop() {
|
||||
kill "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
wait "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
}
|
||||
|
||||
kube::etcd::clean_etcd_dir() {
|
||||
rm -rf "${ETCD_DIR-}"
|
||||
}
|
||||
|
||||
kube::etcd::cleanup() {
|
||||
kube::etcd::stop
|
||||
kube::etcd::clean_etcd_dir
|
||||
}
|
||||
|
||||
kube::etcd::install() {
|
||||
(
|
||||
cd "${KUBE_ROOT}/third_party"
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
download_file="etcd-v${ETCD_VERSION}-darwin-amd64.zip"
|
||||
url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/${download_file}"
|
||||
kube::util::download_file "${url}" "${download_file}"
|
||||
unzip -o "${download_file}"
|
||||
ln -fns "etcd-v${ETCD_VERSION}-darwin-amd64" etcd
|
||||
rm "${download_file}"
|
||||
else
|
||||
url="https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
|
||||
download_file="etcd-v${ETCD_VERSION}-linux-amd64.tar.gz"
|
||||
kube::util::download_file "${url}" "${download_file}"
|
||||
tar xzf "${download_file}"
|
||||
ln -fns "etcd-v${ETCD_VERSION}-linux-amd64" etcd
|
||||
fi
|
||||
kube::log::info "etcd v${ETCD_VERSION} installed. To use:"
|
||||
kube::log::info "export PATH=$(pwd)/etcd:\${PATH}"
|
||||
)
|
||||
}
|
||||
|
|
@ -0,0 +1,722 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# The golang package that we are building.
|
||||
readonly KUBE_GO_PACKAGE=github.com/kubernetes-incubator/kube-arbitrator
|
||||
readonly KUBE_GOPATH="${GOPATH}"
|
||||
|
||||
# The set of server targets that we are only building for Linux
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::server_targets() {
|
||||
local targets=(
|
||||
cmd/kube-proxy
|
||||
cmd/kube-apiserver
|
||||
cmd/kube-controller-manager
|
||||
cmd/cloud-controller-manager
|
||||
cmd/kubelet
|
||||
cmd/kubeadm
|
||||
cmd/hyperkube
|
||||
vendor/k8s.io/kube-aggregator
|
||||
vendor/k8s.io/apiextensions-apiserver
|
||||
plugin/cmd/kube-scheduler
|
||||
)
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
|
||||
readonly KUBE_SERVER_TARGETS=($(kube::golang::server_targets))
|
||||
readonly KUBE_SERVER_BINARIES=("${KUBE_SERVER_TARGETS[@]##*/}")
|
||||
|
||||
# The set of server targets that we are only building for Kubernetes nodes
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::node_targets() {
|
||||
local targets=(
|
||||
cmd/kube-proxy
|
||||
cmd/kubelet
|
||||
)
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
|
||||
readonly KUBE_NODE_TARGETS=($(kube::golang::node_targets))
|
||||
readonly KUBE_NODE_BINARIES=("${KUBE_NODE_TARGETS[@]##*/}")
|
||||
readonly KUBE_NODE_BINARIES_WIN=("${KUBE_NODE_BINARIES[@]/%/.exe}")
|
||||
|
||||
if [[ -n "${KUBE_BUILD_PLATFORMS:-}" ]]; then
|
||||
readonly KUBE_SERVER_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
readonly KUBE_NODE_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
readonly KUBE_TEST_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
readonly KUBE_CLIENT_PLATFORMS=(${KUBE_BUILD_PLATFORMS})
|
||||
elif [[ "${KUBE_FASTBUILD:-}" == "true" ]]; then
|
||||
readonly KUBE_SERVER_PLATFORMS=(linux/amd64)
|
||||
readonly KUBE_NODE_PLATFORMS=(linux/amd64)
|
||||
if [[ "${KUBE_BUILDER_OS:-}" == "darwin"* ]]; then
|
||||
readonly KUBE_TEST_PLATFORMS=(
|
||||
darwin/amd64
|
||||
linux/amd64
|
||||
)
|
||||
readonly KUBE_CLIENT_PLATFORMS=(
|
||||
darwin/amd64
|
||||
linux/amd64
|
||||
)
|
||||
else
|
||||
readonly KUBE_TEST_PLATFORMS=(linux/amd64)
|
||||
readonly KUBE_CLIENT_PLATFORMS=(linux/amd64)
|
||||
fi
|
||||
else
|
||||
|
||||
# The server platform we are building on.
|
||||
readonly KUBE_SERVER_PLATFORMS=(
|
||||
linux/amd64
|
||||
linux/arm
|
||||
linux/arm64
|
||||
linux/s390x
|
||||
linux/ppc64le
|
||||
)
|
||||
|
||||
# The node platforms we build for
|
||||
readonly KUBE_NODE_PLATFORMS=(
|
||||
linux/amd64
|
||||
linux/arm
|
||||
linux/arm64
|
||||
linux/s390x
|
||||
linux/ppc64le
|
||||
windows/amd64
|
||||
)
|
||||
|
||||
# If we update this we should also update the set of platforms whose standard library is precompiled for in build/build-image/cross/Dockerfile
|
||||
readonly KUBE_CLIENT_PLATFORMS=(
|
||||
linux/amd64
|
||||
linux/386
|
||||
linux/arm
|
||||
linux/arm64
|
||||
linux/s390x
|
||||
linux/ppc64le
|
||||
darwin/amd64
|
||||
darwin/386
|
||||
windows/amd64
|
||||
windows/386
|
||||
)
|
||||
|
||||
# Which platforms we should compile test targets for. Not all client platforms need these tests
|
||||
readonly KUBE_TEST_PLATFORMS=(
|
||||
linux/amd64
|
||||
darwin/amd64
|
||||
windows/amd64
|
||||
)
|
||||
fi
|
||||
|
||||
# The set of client targets that we are building for all platforms
|
||||
# If you update this list, please also update build/BUILD.
|
||||
readonly KUBE_CLIENT_TARGETS=(
|
||||
cmd/kubectl
|
||||
federation/cmd/kubefed
|
||||
)
|
||||
readonly KUBE_CLIENT_BINARIES=("${KUBE_CLIENT_TARGETS[@]##*/}")
|
||||
readonly KUBE_CLIENT_BINARIES_WIN=("${KUBE_CLIENT_BINARIES[@]/%/.exe}")
|
||||
|
||||
# The set of test targets that we are building for all platforms
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::test_targets() {
|
||||
local targets=(
|
||||
cmd/gendocs
|
||||
cmd/genkubedocs
|
||||
cmd/genman
|
||||
cmd/genyaml
|
||||
cmd/mungedocs
|
||||
cmd/genswaggertypedocs
|
||||
cmd/linkcheck
|
||||
federation/cmd/genfeddocs
|
||||
vendor/github.com/onsi/ginkgo/ginkgo
|
||||
test/e2e/e2e.test
|
||||
)
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
readonly KUBE_TEST_TARGETS=($(kube::golang::test_targets))
|
||||
readonly KUBE_TEST_BINARIES=("${KUBE_TEST_TARGETS[@]##*/}")
|
||||
readonly KUBE_TEST_BINARIES_WIN=("${KUBE_TEST_BINARIES[@]/%/.exe}")
|
||||
# If you update this list, please also update build/BUILD.
|
||||
readonly KUBE_TEST_PORTABLE=(
|
||||
test/e2e/testing-manifests
|
||||
test/kubemark
|
||||
federation/develop
|
||||
hack/e2e.go
|
||||
hack/e2e-internal
|
||||
hack/get-build.sh
|
||||
hack/ginkgo-e2e.sh
|
||||
hack/federated-ginkgo-e2e.sh
|
||||
hack/lib
|
||||
)
|
||||
|
||||
# Test targets which run on the Kubernetes clusters directly, so we only
|
||||
# need to target server platforms.
|
||||
# These binaries will be distributed in the kubernetes-test tarball.
|
||||
# If you update this list, please also update build/BUILD.
|
||||
kube::golang::server_test_targets() {
|
||||
local targets=(
|
||||
cmd/kubemark
|
||||
vendor/github.com/onsi/ginkgo/ginkgo
|
||||
)
|
||||
|
||||
if [[ "${OSTYPE:-}" == "linux"* ]]; then
|
||||
targets+=( test/e2e_node/e2e_node.test )
|
||||
fi
|
||||
|
||||
echo "${targets[@]}"
|
||||
}
|
||||
|
||||
readonly KUBE_TEST_SERVER_TARGETS=($(kube::golang::server_test_targets))
|
||||
readonly KUBE_TEST_SERVER_BINARIES=("${KUBE_TEST_SERVER_TARGETS[@]##*/}")
|
||||
readonly KUBE_TEST_SERVER_PLATFORMS=("${KUBE_SERVER_PLATFORMS[@]}")
|
||||
|
||||
# Gigabytes desired for parallel platform builds. 11 is fairly
|
||||
# arbitrary, but is a reasonable splitting point for 2015
|
||||
# laptops-versus-not.
|
||||
readonly KUBE_PARALLEL_BUILD_MEMORY=11
|
||||
|
||||
# TODO(pipejakob) gke-certificates-controller is included here to exercise its
|
||||
# compilation, but it doesn't need to be distributed in any of our tars. Its
|
||||
# code is only living in this repo temporarily until it finds a new home.
|
||||
readonly KUBE_ALL_TARGETS=(
|
||||
"${KUBE_SERVER_TARGETS[@]}"
|
||||
"${KUBE_CLIENT_TARGETS[@]}"
|
||||
"${KUBE_TEST_TARGETS[@]}"
|
||||
"${KUBE_TEST_SERVER_TARGETS[@]}"
|
||||
cmd/gke-certificates-controller
|
||||
)
|
||||
readonly KUBE_ALL_BINARIES=("${KUBE_ALL_TARGETS[@]##*/}")
|
||||
|
||||
readonly KUBE_STATIC_LIBRARIES=(
|
||||
cloud-controller-manager
|
||||
kube-apiserver
|
||||
kube-controller-manager
|
||||
kube-scheduler
|
||||
kube-proxy
|
||||
kube-aggregator
|
||||
kubeadm
|
||||
kubectl
|
||||
)
|
||||
|
||||
# Add any files with those //generate annotations in the array below.
|
||||
readonly KUBE_BINDATAS=(
|
||||
test/e2e/generated/gobindata_util.go
|
||||
)
|
||||
|
||||
kube::golang::is_statically_linked_library() {
|
||||
local e
|
||||
for e in "${KUBE_STATIC_LIBRARIES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
|
||||
# Allow individual overrides--e.g., so that you can get a static build of
|
||||
# kubectl for inclusion in a container.
|
||||
if [ -n "${KUBE_STATIC_OVERRIDES:+x}" ]; then
|
||||
for e in "${KUBE_STATIC_OVERRIDES[@]}"; do [[ "$1" == *"/$e" ]] && return 0; done;
|
||||
fi
|
||||
return 1;
|
||||
}
|
||||
|
||||
# kube::binaries_from_targets take a list of build targets and return the
|
||||
# full go package to be built
|
||||
kube::golang::binaries_from_targets() {
|
||||
local target
|
||||
for target; do
|
||||
# If the target starts with what looks like a domain name, assume it has a
|
||||
# fully-qualified package name rather than one that needs the Kubernetes
|
||||
# package prepended.
|
||||
if [[ "${target}" =~ ^([[:alnum:]]+".")+[[:alnum:]]+"/" ]]; then
|
||||
echo "${target}"
|
||||
else
|
||||
echo "${KUBE_GO_PACKAGE}/${target}"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Asks golang what it thinks the host platform is. The go tool chain does some
|
||||
# slightly different things when the target platform matches the host platform.
|
||||
kube::golang::host_platform() {
|
||||
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
|
||||
}
|
||||
|
||||
kube::golang::current_platform() {
|
||||
local os="${GOOS-}"
|
||||
if [[ -z $os ]]; then
|
||||
os=$(go env GOHOSTOS)
|
||||
fi
|
||||
|
||||
local arch="${GOARCH-}"
|
||||
if [[ -z $arch ]]; then
|
||||
arch=$(go env GOHOSTARCH)
|
||||
fi
|
||||
|
||||
echo "$os/$arch"
|
||||
}
|
||||
|
||||
# Takes the the platform name ($1) and sets the appropriate golang env variables
|
||||
# for that platform.
|
||||
kube::golang::set_platform_envs() {
|
||||
[[ -n ${1-} ]] || {
|
||||
kube::log::error_exit "!!! Internal error. No platform set in kube::golang::set_platform_envs"
|
||||
}
|
||||
|
||||
export GOOS=${platform%/*}
|
||||
export GOARCH=${platform##*/}
|
||||
|
||||
# Do not set CC when building natively on a platform, only if cross-compiling from linux/amd64
|
||||
if [[ $(kube::golang::host_platform) == "linux/amd64" ]]; then
|
||||
# Dynamic CGO linking for other server architectures than linux/amd64 goes here
|
||||
# If you want to include support for more server platforms than these, add arch-specific gcc names here
|
||||
case "${platform}" in
|
||||
"linux/arm")
|
||||
export CGO_ENABLED=1
|
||||
export CC=arm-linux-gnueabihf-gcc
|
||||
;;
|
||||
"linux/arm64")
|
||||
export CGO_ENABLED=1
|
||||
export CC=aarch64-linux-gnu-gcc
|
||||
;;
|
||||
"linux/ppc64le")
|
||||
export CGO_ENABLED=1
|
||||
export CC=powerpc64le-linux-gnu-gcc
|
||||
;;
|
||||
"linux/s390x")
|
||||
export CGO_ENABLED=1
|
||||
export CC=s390x-linux-gnu-gcc
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
kube::golang::unset_platform_envs() {
|
||||
unset GOOS
|
||||
unset GOARCH
|
||||
unset GOROOT
|
||||
unset CGO_ENABLED
|
||||
unset CC
|
||||
}
|
||||
|
||||
# Create the GOPATH tree under $KUBE_OUTPUT
|
||||
kube::golang::create_gopath_tree() {
|
||||
local go_pkg_dir="${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}"
|
||||
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
|
||||
|
||||
mkdir -p "${go_pkg_basedir}"
|
||||
|
||||
# TODO: This symlink should be relative.
|
||||
if [[ ! -e "${go_pkg_dir}" || "$(readlink ${go_pkg_dir})" != "${KUBE_ROOT}" ]]; then
|
||||
ln -snf "${KUBE_ROOT}" "${go_pkg_dir}"
|
||||
fi
|
||||
|
||||
cat >"${KUBE_GOPATH}/BUILD" <<EOF
|
||||
# This dummy BUILD file prevents Bazel from trying to descend through the
|
||||
# infinite loop created by the symlink at
|
||||
# ${go_pkg_dir}
|
||||
EOF
|
||||
}
|
||||
|
||||
# Ensure the go tool exists and is a viable version.
|
||||
kube::golang::verify_go_version() {
|
||||
if [[ -z "$(which go)" ]]; then
|
||||
kube::log::usage_from_stdin <<EOF
|
||||
Can't find 'go' in PATH, please fix and retry.
|
||||
See http://golang.org/doc/install for installation instructions.
|
||||
EOF
|
||||
return 2
|
||||
fi
|
||||
|
||||
local go_version
|
||||
go_version=($(go version))
|
||||
local minimum_go_version
|
||||
minimum_go_version=go1.8.3
|
||||
if [[ "${go_version[2]}" < "${minimum_go_version}" && "${go_version[2]}" != "devel" ]]; then
|
||||
kube::log::usage_from_stdin <<EOF
|
||||
Detected go version: ${go_version[*]}.
|
||||
Kubernetes requires ${minimum_go_version} or greater.
|
||||
Please install ${minimum_go_version} or later.
|
||||
EOF
|
||||
return 2
|
||||
fi
|
||||
}
|
||||
|
||||
# kube::golang::setup_env will check that the `go` commands is available in
|
||||
# ${PATH}. It will also check that the Go version is good enough for the
|
||||
# Kubernetes build.
|
||||
#
|
||||
# Inputs:
|
||||
# KUBE_EXTRA_GOPATH - If set, this is included in created GOPATH
|
||||
#
|
||||
# Outputs:
|
||||
# env-var GOPATH points to our local output dir
|
||||
# env-var GOBIN is unset (we want binaries in a predictable place)
|
||||
# env-var GO15VENDOREXPERIMENT=1
|
||||
# current directory is within GOPATH
|
||||
kube::golang::setup_env() {
|
||||
kube::golang::verify_go_version
|
||||
|
||||
kube::golang::create_gopath_tree
|
||||
|
||||
export GOPATH=${KUBE_GOPATH}
|
||||
|
||||
# Append KUBE_EXTRA_GOPATH to the GOPATH if it is defined.
|
||||
if [[ -n ${KUBE_EXTRA_GOPATH:-} ]]; then
|
||||
GOPATH="${GOPATH}:${KUBE_EXTRA_GOPATH}"
|
||||
fi
|
||||
|
||||
# Change directories so that we are within the GOPATH. Some tools get really
|
||||
# upset if this is not true. We use a whole fake GOPATH here to collect the
|
||||
# resultant binaries. Go will not let us use GOBIN with `go install` and
|
||||
# cross-compiling, and `go install -o <file>` only works for a single pkg.
|
||||
local subdir
|
||||
subdir=$(kube::realpath . | sed "s|$KUBE_ROOT||")
|
||||
cd "${KUBE_GOPATH}/src/${KUBE_GO_PACKAGE}/${subdir}"
|
||||
|
||||
# Set GOROOT so binaries that parse code can work properly.
|
||||
export GOROOT=$(go env GOROOT)
|
||||
|
||||
# Unset GOBIN in case it already exists in the current session.
|
||||
unset GOBIN
|
||||
|
||||
# This seems to matter to some tools (godep, ugorji, ginkgo...)
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
}
|
||||
|
||||
# This will take binaries from $GOPATH/bin and copy them to the appropriate
|
||||
# place in ${KUBE_OUTPUT_BINDIR}
|
||||
#
|
||||
# Ideally this wouldn't be necessary and we could just set GOBIN to
|
||||
# KUBE_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
|
||||
# install' will place binaries that match the host platform directly in $GOBIN
|
||||
# while placing cross compiled binaries into `platform_arch` subdirs. This
|
||||
# complicates pretty much everything else we do around packaging and such.
|
||||
kube::golang::place_bins() {
|
||||
local host_platform
|
||||
host_platform=$(kube::golang::host_platform)
|
||||
|
||||
V=2 kube::log::status "Placing binaries"
|
||||
|
||||
local platform
|
||||
for platform in "${KUBE_CLIENT_PLATFORMS[@]}"; do
|
||||
# The substitution on platform_src below will replace all slashes with
|
||||
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
|
||||
local platform_src="/${platform//\//_}"
|
||||
if [[ $platform == $host_platform ]]; then
|
||||
platform_src=""
|
||||
rm -f "${THIS_PLATFORM_BIN}"
|
||||
ln -s "${KUBE_OUTPUT_BINPATH}/${platform}" "${THIS_PLATFORM_BIN}"
|
||||
fi
|
||||
|
||||
local full_binpath_src="${KUBE_GOPATH}/bin${platform_src}"
|
||||
if [[ -d "${full_binpath_src}" ]]; then
|
||||
mkdir -p "${KUBE_OUTPUT_BINPATH}/${platform}"
|
||||
find "${full_binpath_src}" -maxdepth 1 -type f -exec \
|
||||
rsync -pc {} "${KUBE_OUTPUT_BINPATH}/${platform}" \;
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
kube::golang::fallback_if_stdlib_not_installable() {
|
||||
local go_root_dir=$(go env GOROOT);
|
||||
local go_host_os=$(go env GOHOSTOS);
|
||||
local go_host_arch=$(go env GOHOSTARCH);
|
||||
local cgo_pkg_dir=${go_root_dir}/pkg/${go_host_os}_${go_host_arch}_cgo;
|
||||
|
||||
if [ -e ${cgo_pkg_dir} ]; then
|
||||
return 0;
|
||||
fi
|
||||
|
||||
if [ -w ${go_root_dir}/pkg ]; then
|
||||
return 0;
|
||||
fi
|
||||
|
||||
kube::log::status "+++ Warning: stdlib pkg with cgo flag not found.";
|
||||
kube::log::status "+++ Warning: stdlib pkg cannot be rebuilt since ${go_root_dir}/pkg is not writable by `whoami`";
|
||||
kube::log::status "+++ Warning: Make ${go_root_dir}/pkg writable for `whoami` for a one-time stdlib install, Or"
|
||||
kube::log::status "+++ Warning: Rebuild stdlib using the command 'CGO_ENABLED=0 go install -a -installsuffix cgo std'";
|
||||
kube::log::status "+++ Falling back to go build, which is slower";
|
||||
|
||||
use_go_build=true
|
||||
}
|
||||
|
||||
# Builds the toolchain necessary for building kube. This needs to be
|
||||
# built only on the host platform.
|
||||
# TODO: Find this a proper home.
|
||||
# Ideally, not a shell script because testing shell scripts is painful.
|
||||
kube::golang::build_kube_toolchain() {
|
||||
local targets=(
|
||||
hack/cmd/teststale
|
||||
vendor/github.com/jteeuwen/go-bindata/go-bindata
|
||||
)
|
||||
|
||||
local binaries
|
||||
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
|
||||
|
||||
kube::log::status "Building the toolchain targets:" "${binaries[@]}"
|
||||
go install "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${binaries[@]:+${binaries[@]}}"
|
||||
}
|
||||
|
||||
# Try and replicate the native binary placement of go install without
|
||||
# calling go install.
|
||||
kube::golang::output_filename_for_binary() {
|
||||
local binary=$1
|
||||
local platform=$2
|
||||
local output_path="${KUBE_GOPATH}/bin"
|
||||
if [[ $platform != $host_platform ]]; then
|
||||
output_path="${output_path}/${platform//\//_}"
|
||||
fi
|
||||
local bin=$(basename "${binary}")
|
||||
if [[ ${GOOS} == "windows" ]]; then
|
||||
bin="${bin}.exe"
|
||||
fi
|
||||
echo "${output_path}/${bin}"
|
||||
}
|
||||
|
||||
kube::golang::build_binaries_for_platform() {
|
||||
local platform=$1
|
||||
local use_go_build=${2-}
|
||||
|
||||
local -a statics=()
|
||||
local -a nonstatics=()
|
||||
local -a tests=()
|
||||
|
||||
V=2 kube::log::info "Env for ${platform}: GOOS=${GOOS-} GOARCH=${GOARCH-} GOROOT=${GOROOT-} CGO_ENABLED=${CGO_ENABLED-} CC=${CC-}"
|
||||
|
||||
for binary in "${binaries[@]}"; do
|
||||
if [[ "${binary}" =~ ".test"$ ]]; then
|
||||
tests+=($binary)
|
||||
elif kube::golang::is_statically_linked_library "${binary}"; then
|
||||
statics+=($binary)
|
||||
else
|
||||
nonstatics+=($binary)
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${#statics[@]}" != 0 ]]; then
|
||||
kube::golang::fallback_if_stdlib_not_installable;
|
||||
fi
|
||||
|
||||
if [[ -n ${use_go_build:-} ]]; then
|
||||
kube::log::progress " "
|
||||
for binary in "${statics[@]:+${statics[@]}}"; do
|
||||
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
|
||||
CGO_ENABLED=0 go build -o "${outfile}" \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${binary}"
|
||||
kube::log::progress "*"
|
||||
done
|
||||
for binary in "${nonstatics[@]:+${nonstatics[@]}}"; do
|
||||
local outfile=$(kube::golang::output_filename_for_binary "${binary}" "${platform}")
|
||||
go build -o "${outfile}" \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${binary}"
|
||||
kube::log::progress "*"
|
||||
done
|
||||
kube::log::progress "\n"
|
||||
else
|
||||
# Use go install.
|
||||
if [[ "${#nonstatics[@]}" != 0 ]]; then
|
||||
go install "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${nonstatics[@]:+${nonstatics[@]}}"
|
||||
fi
|
||||
if [[ "${#statics[@]}" != 0 ]]; then
|
||||
CGO_ENABLED=0 go install -installsuffix cgo "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${statics[@]:+${statics[@]}}"
|
||||
fi
|
||||
fi
|
||||
|
||||
for test in "${tests[@]:+${tests[@]}}"; do
|
||||
local outfile=$(kube::golang::output_filename_for_binary "${test}" \
|
||||
"${platform}")
|
||||
|
||||
local testpkg="$(dirname ${test})"
|
||||
|
||||
# Staleness check always happens on the host machine, so we don't
|
||||
# have to locate the `teststale` binaries for the other platforms.
|
||||
# Since we place the host binaries in `$KUBE_GOPATH/bin`, we can
|
||||
# assume that the binary exists there, if it exists at all.
|
||||
# Otherwise, something has gone wrong with building the `teststale`
|
||||
# binary and we should safely proceed building the test binaries
|
||||
# assuming that they are stale. There is no good reason to error
|
||||
# out.
|
||||
if test -x "${KUBE_GOPATH}/bin/teststale" && ! "${KUBE_GOPATH}/bin/teststale" -binary "${outfile}" -package "${testpkg}"
|
||||
then
|
||||
continue
|
||||
fi
|
||||
|
||||
# `go test -c` below directly builds the binary. It builds the packages,
|
||||
# but it never installs them. `go test -i` only installs the dependencies
|
||||
# of the test, but not the test package itself. So neither `go test -c`
|
||||
# nor `go test -i` installs, for example, test/e2e.a. And without that,
|
||||
# doing a staleness check on k8s.io/kubernetes/test/e2e package always
|
||||
# returns true (always stale). And that's why we need to install the
|
||||
# test package.
|
||||
go install "${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
"${testpkg}"
|
||||
|
||||
mkdir -p "$(dirname ${outfile})"
|
||||
go test -i -c \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
-gcflags "${gogcflags}" \
|
||||
-ldflags "${goldflags}" \
|
||||
-o "${outfile}" \
|
||||
"${testpkg}"
|
||||
done
|
||||
}
|
||||
|
||||
# Return approximate physical memory available in gigabytes.
|
||||
kube::golang::get_physmem() {
|
||||
local mem
|
||||
|
||||
# Linux kernel version >=3.14, in kb
|
||||
if mem=$(grep MemAvailable /proc/meminfo | awk '{ print $2 }'); then
|
||||
echo $(( ${mem} / 1048576 ))
|
||||
return
|
||||
fi
|
||||
|
||||
# Linux, in kb
|
||||
if mem=$(grep MemTotal /proc/meminfo | awk '{ print $2 }'); then
|
||||
echo $(( ${mem} / 1048576 ))
|
||||
return
|
||||
fi
|
||||
|
||||
# OS X, in bytes. Note that get_physmem, as used, should only ever
|
||||
# run in a Linux container (because it's only used in the multiple
|
||||
# platform case, which is a Dockerized build), but this is provided
|
||||
# for completeness.
|
||||
if mem=$(sysctl -n hw.memsize 2>/dev/null); then
|
||||
echo $(( ${mem} / 1073741824 ))
|
||||
return
|
||||
fi
|
||||
|
||||
# If we can't infer it, just give up and assume a low memory system
|
||||
echo 1
|
||||
}
|
||||
|
||||
# Build binaries targets specified
|
||||
#
|
||||
# Input:
|
||||
# $@ - targets and go flags. If no targets are set then all binaries targets
|
||||
# are built.
|
||||
# KUBE_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
|
||||
# then just the host architecture is built.
|
||||
kube::golang::build_binaries() {
|
||||
# Create a sub-shell so that we don't pollute the outer environment
|
||||
(
|
||||
# Check for `go` binary and set ${GOPATH}.
|
||||
kube::golang::setup_env
|
||||
V=2 kube::log::info "Go version: $(go version)"
|
||||
|
||||
local host_platform
|
||||
host_platform=$(kube::golang::host_platform)
|
||||
|
||||
# Use eval to preserve embedded quoted strings.
|
||||
local goflags goldflags gogcflags
|
||||
eval "goflags=(${GOFLAGS:-})"
|
||||
goldflags="${GOLDFLAGS:-} $(kube::version::ldflags)"
|
||||
gogcflags="${GOGCFLAGS:-}"
|
||||
|
||||
local use_go_build
|
||||
local -a targets=()
|
||||
local arg
|
||||
|
||||
for arg; do
|
||||
if [[ "${arg}" == "--use_go_build" ]]; then
|
||||
use_go_build=true
|
||||
elif [[ "${arg}" == -* ]]; then
|
||||
# Assume arguments starting with a dash are flags to pass to go.
|
||||
goflags+=("${arg}")
|
||||
else
|
||||
targets+=("${arg}")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#targets[@]} -eq 0 ]]; then
|
||||
targets=("${KUBE_ALL_TARGETS[@]}")
|
||||
fi
|
||||
|
||||
local -a platforms=(${KUBE_BUILD_PLATFORMS:-})
|
||||
if [[ ${#platforms[@]} -eq 0 ]]; then
|
||||
platforms=("${host_platform}")
|
||||
fi
|
||||
|
||||
local binaries
|
||||
binaries=($(kube::golang::binaries_from_targets "${targets[@]}"))
|
||||
|
||||
local parallel=false
|
||||
if [[ ${#platforms[@]} -gt 1 ]]; then
|
||||
local gigs
|
||||
gigs=$(kube::golang::get_physmem)
|
||||
|
||||
if [[ ${gigs} -ge ${KUBE_PARALLEL_BUILD_MEMORY} ]]; then
|
||||
kube::log::status "Multiple platforms requested and available ${gigs}G >= threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in parallel"
|
||||
parallel=true
|
||||
else
|
||||
kube::log::status "Multiple platforms requested, but available ${gigs}G < threshold ${KUBE_PARALLEL_BUILD_MEMORY}G, building platforms in serial"
|
||||
parallel=false
|
||||
fi
|
||||
fi
|
||||
|
||||
# First build the toolchain before building any other targets
|
||||
kube::golang::build_kube_toolchain
|
||||
|
||||
kube::log::status "Generating bindata:" "${KUBE_BINDATAS[@]}"
|
||||
for bindata in ${KUBE_BINDATAS[@]}; do
|
||||
# Only try to generate bindata if the file exists, since in some cases
|
||||
# one-off builds of individual directories may exclude some files.
|
||||
if [[ -f "${KUBE_ROOT}/${bindata}" ]]; then
|
||||
go generate "${goflags[@]:+${goflags[@]}}" "${KUBE_ROOT}/${bindata}"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "${parallel}" == "true" ]]; then
|
||||
kube::log::status "Building go targets for {${platforms[*]}} in parallel (output will appear in a burst when complete):" "${targets[@]}"
|
||||
local platform
|
||||
for platform in "${platforms[@]}"; do (
|
||||
kube::golang::set_platform_envs "${platform}"
|
||||
kube::log::status "${platform}: go build started"
|
||||
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
|
||||
kube::log::status "${platform}: go build finished"
|
||||
) &> "/tmp//${platform//\//_}.build" &
|
||||
done
|
||||
|
||||
local fails=0
|
||||
for job in $(jobs -p); do
|
||||
wait ${job} || let "fails+=1"
|
||||
done
|
||||
|
||||
for platform in "${platforms[@]}"; do
|
||||
cat "/tmp//${platform//\//_}.build"
|
||||
done
|
||||
|
||||
exit ${fails}
|
||||
else
|
||||
for platform in "${platforms[@]}"; do
|
||||
kube::log::status "Building go targets for ${platform}:" "${targets[@]}"
|
||||
(
|
||||
kube::golang::set_platform_envs "${platform}"
|
||||
kube::golang::build_binaries_for_platform ${platform} ${use_go_build:-}
|
||||
)
|
||||
done
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
|
@ -0,0 +1,171 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# The root of the build/dist directory
|
||||
KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/../.." && pwd -P)"
|
||||
|
||||
KUBE_OUTPUT_SUBPATH="${KUBE_OUTPUT_SUBPATH:-_output/local}"
|
||||
KUBE_OUTPUT="${KUBE_ROOT}/${KUBE_OUTPUT_SUBPATH}"
|
||||
KUBE_OUTPUT_BINPATH="${KUBE_OUTPUT}/bin"
|
||||
|
||||
# This controls rsync compression. Set to a value > 0 to enable rsync
|
||||
# compression for build container
|
||||
KUBE_RSYNC_COMPRESS="${KUBE_RSYNC_COMPRESS:-0}"
|
||||
|
||||
# Set no_proxy for localhost if behind a proxy, otherwise,
|
||||
# the connections to localhost in scripts will time out
|
||||
export no_proxy=127.0.0.1,localhost
|
||||
|
||||
# This is a symlink to binaries for "this platform", e.g. build tools.
|
||||
THIS_PLATFORM_BIN="${KUBE_ROOT}/_output/bin"
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/lib/logging.sh"
|
||||
|
||||
kube::log::install_errexit
|
||||
|
||||
source "${KUBE_ROOT}/hack/lib/version.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/golang.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/etcd.sh"
|
||||
|
||||
KUBE_OUTPUT_HOSTBIN="${KUBE_OUTPUT_BINPATH}/$(kube::util::host_platform)"
|
||||
|
||||
# list of all available group versions. This should be used when generated code
|
||||
# or when starting an API server that you want to have everything.
|
||||
# most preferred version for a group should appear first
|
||||
KUBE_AVAILABLE_GROUP_VERSIONS="${KUBE_AVAILABLE_GROUP_VERSIONS:-\
|
||||
v1 \
|
||||
admissionregistration.k8s.io/v1alpha1 \
|
||||
admission.k8s.io/v1alpha1 \
|
||||
apps/v1beta1 \
|
||||
apps/v1beta2 \
|
||||
authentication.k8s.io/v1 \
|
||||
authentication.k8s.io/v1beta1 \
|
||||
authorization.k8s.io/v1 \
|
||||
authorization.k8s.io/v1beta1 \
|
||||
autoscaling/v1 \
|
||||
autoscaling/v2alpha1 \
|
||||
batch/v1 \
|
||||
batch/v2alpha1 \
|
||||
certificates.k8s.io/v1beta1 \
|
||||
extensions/v1beta1 \
|
||||
imagepolicy.k8s.io/v1alpha1 \
|
||||
networking.k8s.io/v1 \
|
||||
policy/v1beta1 \
|
||||
rbac.authorization.k8s.io/v1 \
|
||||
rbac.authorization.k8s.io/v1beta1 \
|
||||
rbac.authorization.k8s.io/v1alpha1 \
|
||||
scheduling.k8s.io/v1alpha1 \
|
||||
settings.k8s.io/v1alpha1 \
|
||||
storage.k8s.io/v1beta1 \
|
||||
storage.k8s.io/v1 \
|
||||
}"
|
||||
|
||||
# not all group versions are exposed by the server. This list contains those
|
||||
# which are not available so we don't generate clients or swagger for them
|
||||
KUBE_NONSERVER_GROUP_VERSIONS="
|
||||
abac.authorization.kubernetes.io/v0 \
|
||||
abac.authorization.kubernetes.io/v1beta1 \
|
||||
componentconfig/v1alpha1 \
|
||||
imagepolicy.k8s.io/v1alpha1\
|
||||
admission.k8s.io/v1alpha1\
|
||||
"
|
||||
|
||||
# This emulates "readlink -f" which is not available on MacOS X.
|
||||
# Test:
|
||||
# T=/tmp/$$.$RANDOM
|
||||
# mkdir $T
|
||||
# touch $T/file
|
||||
# mkdir $T/dir
|
||||
# ln -s $T/file $T/linkfile
|
||||
# ln -s $T/dir $T/linkdir
|
||||
# function testone() {
|
||||
# X=$(readlink -f $1 2>&1)
|
||||
# Y=$(kube::readlinkdashf $1 2>&1)
|
||||
# if [ "$X" != "$Y" ]; then
|
||||
# echo readlinkdashf $1: expected "$X", got "$Y"
|
||||
# fi
|
||||
# }
|
||||
# testone /
|
||||
# testone /tmp
|
||||
# testone $T
|
||||
# testone $T/file
|
||||
# testone $T/dir
|
||||
# testone $T/linkfile
|
||||
# testone $T/linkdir
|
||||
# testone $T/nonexistant
|
||||
# testone $T/linkdir/file
|
||||
# testone $T/linkdir/dir
|
||||
# testone $T/linkdir/linkfile
|
||||
# testone $T/linkdir/linkdir
|
||||
function kube::readlinkdashf {
|
||||
# run in a subshell for simpler 'cd'
|
||||
(
|
||||
if [[ -d "$1" ]]; then # This also catch symlinks to dirs.
|
||||
cd "$1"
|
||||
pwd -P
|
||||
else
|
||||
cd $(dirname "$1")
|
||||
local f
|
||||
f=$(basename "$1")
|
||||
if [[ -L "$f" ]]; then
|
||||
readlink "$f"
|
||||
else
|
||||
echo "$(pwd -P)/${f}"
|
||||
fi
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
# This emulates "realpath" which is not available on MacOS X
|
||||
# Test:
|
||||
# T=/tmp/$$.$RANDOM
|
||||
# mkdir $T
|
||||
# touch $T/file
|
||||
# mkdir $T/dir
|
||||
# ln -s $T/file $T/linkfile
|
||||
# ln -s $T/dir $T/linkdir
|
||||
# function testone() {
|
||||
# X=$(realpath $1 2>&1)
|
||||
# Y=$(kube::realpath $1 2>&1)
|
||||
# if [ "$X" != "$Y" ]; then
|
||||
# echo realpath $1: expected "$X", got "$Y"
|
||||
# fi
|
||||
# }
|
||||
# testone /
|
||||
# testone /tmp
|
||||
# testone $T
|
||||
# testone $T/file
|
||||
# testone $T/dir
|
||||
# testone $T/linkfile
|
||||
# testone $T/linkdir
|
||||
# testone $T/nonexistant
|
||||
# testone $T/linkdir/file
|
||||
# testone $T/linkdir/dir
|
||||
# testone $T/linkdir/linkfile
|
||||
# testone $T/linkdir/linkdir
|
||||
kube::realpath() {
|
||||
if [[ ! -e "$1" ]]; then
|
||||
echo "$1: No such file or directory" >&2
|
||||
return 1
|
||||
fi
|
||||
kube::readlinkdashf "$1"
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,849 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
kube::util::sortable_date() {
|
||||
date "+%Y%m%d-%H%M%S"
|
||||
}
|
||||
|
||||
kube::util::wait_for_url() {
|
||||
local url=$1
|
||||
local prefix=${2:-}
|
||||
local wait=${3:-1}
|
||||
local times=${4:-30}
|
||||
|
||||
which curl >/dev/null || {
|
||||
kube::log::usage "curl must be installed"
|
||||
exit 1
|
||||
}
|
||||
|
||||
local i
|
||||
for i in $(seq 1 $times); do
|
||||
local out
|
||||
if out=$(curl --max-time 1 -gkfs $url 2>/dev/null); then
|
||||
kube::log::status "On try ${i}, ${prefix}: ${out}"
|
||||
return 0
|
||||
fi
|
||||
sleep ${wait}
|
||||
done
|
||||
kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
|
||||
return 1
|
||||
}
|
||||
|
||||
# returns a random port
|
||||
kube::util::get_random_port() {
|
||||
awk -v min=1024 -v max=65535 'BEGIN{srand(); print int(min+rand()*(max-min+1))}'
|
||||
}
|
||||
|
||||
# use netcat to check if the host($1):port($2) is free (return 0 means free, 1 means used)
|
||||
kube::util::test_host_port_free() {
|
||||
local host=$1
|
||||
local port=$2
|
||||
local success=0
|
||||
local fail=1
|
||||
|
||||
which nc >/dev/null || {
|
||||
kube::log::usage "netcat isn't installed, can't verify if ${host}:${port} is free, skipping the check..."
|
||||
return ${success}
|
||||
}
|
||||
|
||||
if [ ! $(nc -vz "${host}" "${port}") ]; then
|
||||
kube::log::status "${host}:${port} is free, proceeding..."
|
||||
return ${success}
|
||||
else
|
||||
kube::log::status "${host}:${port} is already used"
|
||||
return ${fail}
|
||||
fi
|
||||
}
|
||||
|
||||
# Example: kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
|
||||
# See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
|
||||
kube::util::trap_add() {
|
||||
local trap_add_cmd
|
||||
trap_add_cmd=$1
|
||||
shift
|
||||
|
||||
for trap_add_name in "$@"; do
|
||||
local existing_cmd
|
||||
local new_cmd
|
||||
|
||||
# Grab the currently defined trap commands for this trap
|
||||
existing_cmd=`trap -p "${trap_add_name}" | awk -F"'" '{print $2}'`
|
||||
|
||||
if [[ -z "${existing_cmd}" ]]; then
|
||||
new_cmd="${trap_add_cmd}"
|
||||
else
|
||||
new_cmd="${existing_cmd};${trap_add_cmd}"
|
||||
fi
|
||||
|
||||
# Assign the test
|
||||
trap "${new_cmd}" "${trap_add_name}"
|
||||
done
|
||||
}
|
||||
|
||||
# Opposite of kube::util::ensure-temp-dir()
|
||||
kube::util::cleanup-temp-dir() {
|
||||
rm -rf "${KUBE_TEMP}"
|
||||
}
|
||||
|
||||
# Create a temp dir that'll be deleted at the end of this bash session.
|
||||
#
|
||||
# Vars set:
|
||||
# KUBE_TEMP
|
||||
kube::util::ensure-temp-dir() {
|
||||
if [[ -z ${KUBE_TEMP-} ]]; then
|
||||
KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
|
||||
kube::util::trap_add kube::util::cleanup-temp-dir EXIT
|
||||
fi
|
||||
}
|
||||
|
||||
# This figures out the host platform without relying on golang. We need this as
|
||||
# we don't want a golang install to be a prerequisite to building yet we need
|
||||
# this info to figure out where the final binaries are placed.
|
||||
kube::util::host_platform() {
|
||||
local host_os
|
||||
local host_arch
|
||||
case "$(uname -s)" in
|
||||
Darwin)
|
||||
host_os=darwin
|
||||
;;
|
||||
Linux)
|
||||
host_os=linux
|
||||
;;
|
||||
*)
|
||||
kube::log::error "Unsupported host OS. Must be Linux or Mac OS X."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
case "$(uname -m)" in
|
||||
x86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
i?86_64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
amd64*)
|
||||
host_arch=amd64
|
||||
;;
|
||||
aarch64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm64*)
|
||||
host_arch=arm64
|
||||
;;
|
||||
arm*)
|
||||
host_arch=arm
|
||||
;;
|
||||
i?86*)
|
||||
host_arch=x86
|
||||
;;
|
||||
s390x*)
|
||||
host_arch=s390x
|
||||
;;
|
||||
ppc64le*)
|
||||
host_arch=ppc64le
|
||||
;;
|
||||
*)
|
||||
kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
echo "${host_os}/${host_arch}"
|
||||
}
|
||||
|
||||
kube::util::find-binary-for-platform() {
|
||||
local -r lookfor="$1"
|
||||
local -r platform="$2"
|
||||
local locations=(
|
||||
"${KUBE_ROOT}/_output/bin/${lookfor}"
|
||||
"${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
|
||||
"${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
|
||||
"${KUBE_ROOT}/platforms/${platform}/${lookfor}"
|
||||
)
|
||||
# Also search for binary in bazel build tree.
|
||||
# In some cases we have to name the binary $BINARY_bin, since there was a
|
||||
# directory named $BINARY next to it.
|
||||
locations+=($(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
|
||||
\( -name "${lookfor}" -o -name "${lookfor}_bin" \) 2>/dev/null || true) )
|
||||
|
||||
# List most recently-updated location.
|
||||
local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
|
||||
echo -n "${bin}"
|
||||
}
|
||||
|
||||
kube::util::find-binary() {
|
||||
kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
|
||||
}
|
||||
|
||||
# Run all known doc generators (today gendocs and genman for kubectl)
|
||||
# $1 is the directory to put those generated documents
|
||||
kube::util::gen-docs() {
|
||||
local dest="$1"
|
||||
|
||||
# Find binary
|
||||
gendocs=$(kube::util::find-binary "gendocs")
|
||||
genkubedocs=$(kube::util::find-binary "genkubedocs")
|
||||
genman=$(kube::util::find-binary "genman")
|
||||
genyaml=$(kube::util::find-binary "genyaml")
|
||||
genfeddocs=$(kube::util::find-binary "genfeddocs")
|
||||
|
||||
mkdir -p "${dest}/docs/user-guide/kubectl/"
|
||||
"${gendocs}" "${dest}/docs/user-guide/kubectl/"
|
||||
mkdir -p "${dest}/docs/admin/"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
|
||||
"${genkubedocs}" "${dest}/docs/admin/" "kubelet"
|
||||
|
||||
# We don't really need federation-apiserver and federation-controller-manager
|
||||
# binaries to generate the docs. We just pass their names to decide which docs
|
||||
# to generate. The actual binary for running federation is hyperkube.
|
||||
"${genfeddocs}" "${dest}/docs/admin/" "federation-apiserver"
|
||||
"${genfeddocs}" "${dest}/docs/admin/" "federation-controller-manager"
|
||||
"${genfeddocs}" "${dest}/docs/admin/" "kubefed"
|
||||
|
||||
mkdir -p "${dest}/docs/man/man1/"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
|
||||
"${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-proxy"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubelet"
|
||||
"${genman}" "${dest}/docs/man/man1/" "kubectl"
|
||||
|
||||
mkdir -p "${dest}/docs/yaml/kubectl/"
|
||||
"${genyaml}" "${dest}/docs/yaml/kubectl/"
|
||||
|
||||
# create the list of generated files
|
||||
pushd "${dest}" > /dev/null
|
||||
touch docs/.generated_docs
|
||||
find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# Puts a placeholder for every generated doc. This makes the link checker work.
|
||||
kube::util::set-placeholder-gen-docs() {
|
||||
local list_file="${KUBE_ROOT}/docs/.generated_docs"
|
||||
if [ -e ${list_file} ]; then
|
||||
# remove all of the old docs; we don't want to check them in.
|
||||
while read file; do
|
||||
if [[ "${list_file}" != "${KUBE_ROOT}/${file}" ]]; then
|
||||
cp "${KUBE_ROOT}/hack/autogenerated_placeholder.txt" "${KUBE_ROOT}/${file}"
|
||||
fi
|
||||
done <"${list_file}"
|
||||
# The docs/.generated_docs file lists itself, so we don't need to explicitly
|
||||
# delete it.
|
||||
fi
|
||||
}
|
||||
|
||||
# Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
|
||||
# must be set.
|
||||
kube::util::remove-gen-docs() {
|
||||
if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
|
||||
# remove all of the old docs; we don't want to check them in.
|
||||
while read file; do
|
||||
rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
|
||||
done <"${KUBE_ROOT}/docs/.generated_docs"
|
||||
# The docs/.generated_docs file lists itself, so we don't need to explicitly
|
||||
# delete it.
|
||||
fi
|
||||
}
|
||||
|
||||
# Takes a path $1 to traverse for md files to append the ga-beacon tracking
|
||||
# link to, if needed. If $2 is set, just print files that are missing
|
||||
# the link.
|
||||
kube::util::gen-analytics() {
|
||||
local path="$1"
|
||||
local dryrun="${2:-}"
|
||||
local mdfiles dir link
|
||||
# find has some strange inconsistencies between darwin/linux. The
|
||||
# path to search must end in '/' for linux, but darwin will put an extra
|
||||
# slash in results if there is a trailing '/'.
|
||||
if [[ $( uname ) == 'Linux' ]]; then
|
||||
dir="${path}/"
|
||||
else
|
||||
dir="${path}"
|
||||
fi
|
||||
# We don't touch files in special dirs, and the kubectl docs are
|
||||
# autogenerated by gendocs.
|
||||
# Don't descend into .directories
|
||||
mdfiles=($( find "${dir}" -name "*.md" -type f \
|
||||
-not -path '*/\.*' \
|
||||
-not -path "${path}/vendor/*" \
|
||||
-not -path "${path}/staging/*" \
|
||||
-not -path "${path}/third_party/*" \
|
||||
-not -path "${path}/_gopath/*" \
|
||||
-not -path "${path}/_output/*" \
|
||||
-not -path "${path}/docs/user-guide/kubectl/kubectl*" ))
|
||||
for f in "${mdfiles[@]}"; do
|
||||
link=$(kube::util::analytics-link "${f#${path}/}")
|
||||
if grep -q -F -x "${link}" "${f}"; then
|
||||
continue
|
||||
elif [[ -z "${dryrun}" ]]; then
|
||||
echo -e "\n\n${link}" >> "${f}"
|
||||
else
|
||||
echo "$f"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Prints analytics link to append to a file at path $1.
|
||||
kube::util::analytics-link() {
|
||||
local path="$1"
|
||||
echo "[]()"
|
||||
}
|
||||
|
||||
# Takes a group/version and returns the path to its location on disk, sans
|
||||
# "pkg". E.g.:
|
||||
# * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
|
||||
# * default behavior for only a group: experimental -> apis/experimental
|
||||
# * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
|
||||
# * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
|
||||
# * Very special handling for when both group and version are "": / -> api
|
||||
kube::util::group-version-to-pkg-path() {
|
||||
staging_apis=(
|
||||
$(
|
||||
pushd ${KUBE_ROOT}/staging/src/k8s.io/api > /dev/null
|
||||
find . -name types.go | xargs -n1 dirname | sed "s|\./||g" | sort
|
||||
popd > /dev/null
|
||||
)
|
||||
)
|
||||
|
||||
local group_version="$1"
|
||||
|
||||
if [[ " ${staging_apis[@]} " =~ " ${group_version/.*k8s.io/} " ]]; then
|
||||
echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
|
||||
return
|
||||
fi
|
||||
|
||||
# "v1" is the API GroupVersion
|
||||
if [[ "${group_version}" == "v1" ]]; then
|
||||
echo "vendor/k8s.io/api/core/v1"
|
||||
return
|
||||
fi
|
||||
|
||||
# Special cases first.
|
||||
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
|
||||
# moving the results to pkg/apis/api.
|
||||
case "${group_version}" in
|
||||
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
|
||||
__internal)
|
||||
echo "pkg/api"
|
||||
;;
|
||||
federation/v1beta1)
|
||||
echo "federation/apis/federation/v1beta1"
|
||||
;;
|
||||
meta/v1)
|
||||
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
;;
|
||||
meta/v1)
|
||||
echo "../vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
;;
|
||||
meta/v1alpha1)
|
||||
echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
|
||||
;;
|
||||
meta/v1alpha1)
|
||||
echo "../vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
|
||||
;;
|
||||
unversioned)
|
||||
echo "pkg/api/unversioned"
|
||||
;;
|
||||
*.k8s.io)
|
||||
echo "pkg/apis/${group_version%.*k8s.io}"
|
||||
;;
|
||||
*.k8s.io/*)
|
||||
echo "pkg/apis/${group_version/.*k8s.io/}"
|
||||
;;
|
||||
*)
|
||||
echo "pkg/apis/${group_version%__internal}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Takes a group/version and returns the swagger-spec file name.
|
||||
# default behavior: extensions/v1beta1 -> extensions_v1beta1
|
||||
# special case for v1: v1 -> v1
|
||||
kube::util::gv-to-swagger-name() {
|
||||
local group_version="$1"
|
||||
case "${group_version}" in
|
||||
v1)
|
||||
echo "v1"
|
||||
;;
|
||||
*)
|
||||
echo "${group_version%/*}_${group_version#*/}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
# Fetches swagger spec from apiserver.
|
||||
# Assumed vars:
|
||||
# SWAGGER_API_PATH: Base path for swaggerapi on apiserver. Ex:
|
||||
# http://localhost:8080/swaggerapi.
|
||||
# SWAGGER_ROOT_DIR: Root dir where we want to to save the fetched spec.
|
||||
# VERSIONS: Array of group versions to include in swagger spec.
|
||||
kube::util::fetch-swagger-spec() {
|
||||
for ver in ${VERSIONS}; do
|
||||
if [[ " ${KUBE_NONSERVER_GROUP_VERSIONS} " == *" ${ver} "* ]]; then
|
||||
continue
|
||||
fi
|
||||
# fetch the swagger spec for each group version.
|
||||
if [[ ${ver} == "v1" ]]; then
|
||||
SUBPATH="api"
|
||||
else
|
||||
SUBPATH="apis"
|
||||
fi
|
||||
SUBPATH="${SUBPATH}/${ver}"
|
||||
SWAGGER_JSON_NAME="$(kube::util::gv-to-swagger-name ${ver}).json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
|
||||
|
||||
# fetch the swagger spec for the discovery mechanism at group level.
|
||||
if [[ ${ver} == "v1" ]]; then
|
||||
continue
|
||||
fi
|
||||
SUBPATH="apis/"${ver%/*}
|
||||
SWAGGER_JSON_NAME="${ver%/*}.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}${SUBPATH}" > "${SWAGGER_ROOT_DIR}/${SWAGGER_JSON_NAME}"
|
||||
done
|
||||
|
||||
# fetch swagger specs for other discovery mechanism.
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}" > "${SWAGGER_ROOT_DIR}/resourceListing.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}version" > "${SWAGGER_ROOT_DIR}/version.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}api" > "${SWAGGER_ROOT_DIR}/api.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}apis" > "${SWAGGER_ROOT_DIR}/apis.json"
|
||||
curl -w "\n" -fs "${SWAGGER_API_PATH}logs" > "${SWAGGER_ROOT_DIR}/logs.json"
|
||||
}
|
||||
|
||||
# Returns the name of the upstream remote repository name for the local git
|
||||
# repo, e.g. "upstream" or "origin".
|
||||
kube::util::git_upstream_remote_name() {
|
||||
git remote -v | grep fetch |\
|
||||
grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
|
||||
head -n 1 | awk '{print $1}'
|
||||
}
|
||||
|
||||
# Checks whether godep restore was run in the current GOPATH, i.e. that all referenced repos exist
|
||||
# and are checked out to the referenced rev.
|
||||
kube::util::godep_restored() {
|
||||
local -r godeps_json=${1:-Godeps/Godeps.json}
|
||||
local -r gopath=${2:-${GOPATH%:*}}
|
||||
if ! which jq &>/dev/null; then
|
||||
echo "jq not found. Please install." 1>&2
|
||||
return 1
|
||||
fi
|
||||
local root
|
||||
local old_rev=""
|
||||
while read path rev; do
|
||||
rev=$(echo "${rev}" | sed "s/['\"]//g") # remove quotes which are around revs sometimes
|
||||
|
||||
if [[ "${rev}" == "${old_rev}" ]] && [[ "${path}" == "${root}"* ]]; then
|
||||
# avoid checking the same git/hg root again
|
||||
continue
|
||||
fi
|
||||
|
||||
root="${path}"
|
||||
while [ "${root}" != "." -a ! -d "${gopath}/src/${root}/.git" -a ! -d "${gopath}/src/${root}/.hg" ]; do
|
||||
root=$(dirname "${root}")
|
||||
done
|
||||
if [ "${root}" == "." ]; then
|
||||
echo "No checkout of ${path} found in GOPATH \"${gopath}\"." 1>&2
|
||||
return 1
|
||||
fi
|
||||
local head
|
||||
if [ -d "${gopath}/src/${root}/.git" ]; then
|
||||
head="$(cd "${gopath}/src/${root}" && git rev-parse HEAD)"
|
||||
else
|
||||
head="$(cd "${gopath}/src/${root}" && hg parent --template '{node}')"
|
||||
fi
|
||||
if [ "${head}" != "${rev}" ]; then
|
||||
echo "Unexpected HEAD '${head}' at ${gopath}/src/${root}, expected '${rev}'." 1>&2
|
||||
return 1
|
||||
fi
|
||||
old_rev="${rev}"
|
||||
done < <(jq '.Deps|.[]|.ImportPath + " " + .Rev' -r < "${godeps_json}")
|
||||
return 0
|
||||
}
|
||||
|
||||
# Exits script if working directory is dirty. If it's run interactively in the terminal
|
||||
# the user can commit changes in a second terminal. This script will wait.
|
||||
kube::util::ensure_clean_working_dir() {
|
||||
while ! git diff HEAD --exit-code &>/dev/null; do
|
||||
echo -e "\nUnexpected dirty working directory:\n"
|
||||
git status -s | sed 's/^/ /'
|
||||
if ! tty -s; then
|
||||
exit 1
|
||||
fi
|
||||
echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
|
||||
read
|
||||
done 1>&2
|
||||
}
|
||||
|
||||
# Ensure that the given godep version is installed and in the path
|
||||
kube::util::ensure_godep_version() {
|
||||
GODEP_VERSION=${1:-"v79"}
|
||||
if [[ "$(godep version 2>/dev/null)" == *"godep ${GODEP_VERSION}"* ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
kube::util::ensure-temp-dir
|
||||
mkdir -p "${KUBE_TEMP}/go/src"
|
||||
|
||||
GOPATH="${KUBE_TEMP}/go" go get -d -u github.com/tools/godep 2>/dev/null
|
||||
pushd "${KUBE_TEMP}/go/src/github.com/tools/godep" >/dev/null
|
||||
git checkout -q "${GODEP_VERSION}"
|
||||
GOPATH="${KUBE_TEMP}/go" go install .
|
||||
popd >/dev/null
|
||||
|
||||
PATH="${KUBE_TEMP}/go/bin:${PATH}"
|
||||
hash -r # force bash to clear PATH cache
|
||||
godep version
|
||||
}
|
||||
|
||||
# Ensure that none of the staging repos is checked out in the GOPATH because this
|
||||
# easily confused godep.
|
||||
kube::util::ensure_no_staging_repos_in_gopath() {
|
||||
kube::util::ensure_single_dir_gopath
|
||||
local error=0
|
||||
for repo in $(ls ${KUBE_ROOT}/staging/src/k8s.io); do
|
||||
if [ -e "${GOPATH}/src/k8s.io/${repo}" ]; then
|
||||
echo "k8s.io/${repo} exists in GOPATH. Remove before running godep-save.sh." 1>&2
|
||||
error=1
|
||||
fi
|
||||
done
|
||||
if [ "${error}" = "1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Installs the specified go package at a particular commit.
|
||||
kube::util::go_install_from_commit() {
|
||||
local -r pkg=$1
|
||||
local -r commit=$2
|
||||
|
||||
kube::util::ensure-temp-dir
|
||||
mkdir -p "${KUBE_TEMP}/go/src"
|
||||
GOPATH="${KUBE_TEMP}/go" go get -d -u "${pkg}" 2>/dev/null
|
||||
(
|
||||
cd "${KUBE_TEMP}/go/src/${pkg}"
|
||||
git checkout -q "${commit}"
|
||||
GOPATH="${KUBE_TEMP}/go" go install "${pkg}"
|
||||
)
|
||||
PATH="${KUBE_TEMP}/go/bin:${PATH}"
|
||||
hash -r # force bash to clear PATH cache
|
||||
}
|
||||
|
||||
# Checks that the GOPATH is simple, i.e. consists only of one directory, not multiple.
|
||||
kube::util::ensure_single_dir_gopath() {
|
||||
if [[ "${GOPATH}" == *:* ]]; then
|
||||
echo "GOPATH must consist of a single directory." 1>&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Checks whether there are any files matching pattern $2 changed between the
|
||||
# current branch and upstream branch named by $1.
|
||||
# Returns 1 (false) if there are no changes, 0 (true) if there are changes
|
||||
# detected.
|
||||
kube::util::has_changes_against_upstream_branch() {
|
||||
local -r git_branch=$1
|
||||
local -r pattern=$2
|
||||
local -r not_pattern=${3:-totallyimpossiblepattern}
|
||||
local full_branch
|
||||
|
||||
full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
|
||||
echo "Checking for '${pattern}' changes against '${full_branch}'"
|
||||
# make sure the branch is valid, otherwise the check will pass erroneously.
|
||||
if ! git describe "${full_branch}" >/dev/null; then
|
||||
# abort!
|
||||
exit 1
|
||||
fi
|
||||
# notice this uses ... to find the first shared ancestor
|
||||
if git diff --name-only "${full_branch}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
|
||||
return 0
|
||||
fi
|
||||
# also check for pending changes
|
||||
if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
|
||||
echo "Detected '${pattern}' uncommitted changes."
|
||||
return 0
|
||||
fi
|
||||
echo "No '${pattern}' changes detected."
|
||||
return 1
|
||||
}
|
||||
|
||||
kube::util::download_file() {
|
||||
local -r url=$1
|
||||
local -r destination_file=$2
|
||||
|
||||
rm ${destination_file} 2&> /dev/null || true
|
||||
|
||||
for i in $(seq 5)
|
||||
do
|
||||
if ! curl -fsSL --retry 3 --keepalive-time 2 ${url} -o ${destination_file}; then
|
||||
echo "Downloading ${url} failed. $((5-i)) retries left."
|
||||
sleep 1
|
||||
else
|
||||
echo "Downloading ${url} succeed"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Test whether openssl is installed.
|
||||
# Sets:
|
||||
# OPENSSL_BIN: The path to the openssl binary to use
|
||||
function kube::util::test_openssl_installed {
|
||||
openssl version >& /dev/null
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "Failed to run openssl. Please ensure openssl is installed"
|
||||
exit 1
|
||||
fi
|
||||
OPENSSL_BIN=$(command -v openssl)
|
||||
}
|
||||
|
||||
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
|
||||
# purpose is dropped in after "key encipherment", you usually want
|
||||
# '"client auth"'
|
||||
# '"server auth"'
|
||||
# '"client auth","server auth"'
|
||||
function kube::util::create_signing_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local id=$3
|
||||
local purpose=$4
|
||||
# Create client ca
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
|
||||
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
|
||||
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
|
||||
EOF
|
||||
}
|
||||
|
||||
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
|
||||
function kube::util::create_client_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca=$3
|
||||
local id=$4
|
||||
local cn=${5:-$4}
|
||||
local groups=""
|
||||
local SEP=""
|
||||
shift 5
|
||||
while [ -n "${1:-}" ]; do
|
||||
groups+="${SEP}{\"O\":\"$1\"}"
|
||||
SEP=","
|
||||
shift 1
|
||||
done
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
cd ${dest_dir}
|
||||
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
|
||||
mv "client-${id}-key.pem" "client-${id}.key"
|
||||
mv "client-${id}.pem" "client-${id}.crt"
|
||||
rm -f "client-${id}.csr"
|
||||
EOF
|
||||
}
|
||||
|
||||
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
|
||||
function kube::util::create_serving_certkey {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca=$3
|
||||
local id=$4
|
||||
local cn=${5:-$4}
|
||||
local hosts=""
|
||||
local SEP=""
|
||||
shift 5
|
||||
while [ -n "${1:-}" ]; do
|
||||
hosts+="${SEP}\"$1\""
|
||||
SEP=","
|
||||
shift 1
|
||||
done
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
cd ${dest_dir}
|
||||
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
|
||||
mv "serving-${id}-key.pem" "serving-${id}.key"
|
||||
mv "serving-${id}.pem" "serving-${id}.crt"
|
||||
rm -f "serving-${id}.csr"
|
||||
EOF
|
||||
}
|
||||
|
||||
# creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
|
||||
function kube::util::write_client_kubeconfig {
|
||||
local sudo=$1
|
||||
local dest_dir=$2
|
||||
local ca_file=$3
|
||||
local api_host=$4
|
||||
local api_port=$5
|
||||
local client_id=$6
|
||||
local token=${7:-}
|
||||
cat <<EOF | ${sudo} tee "${dest_dir}"/${client_id}.kubeconfig > /dev/null
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: ${ca_file}
|
||||
server: https://${api_host}:${api_port}/
|
||||
name: local-up-cluster
|
||||
users:
|
||||
- user:
|
||||
token: ${token}
|
||||
client-certificate: ${dest_dir}/client-${client_id}.crt
|
||||
client-key: ${dest_dir}/client-${client_id}.key
|
||||
name: local-up-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local-up-cluster
|
||||
user: local-up-cluster
|
||||
name: local-up-cluster
|
||||
current-context: local-up-cluster
|
||||
EOF
|
||||
|
||||
# flatten the kubeconfig files to make them self contained
|
||||
username=$(whoami)
|
||||
${sudo} /bin/bash -e <<EOF
|
||||
$(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
|
||||
mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
|
||||
chown ${username} "${dest_dir}/${client_id}.kubeconfig"
|
||||
EOF
|
||||
}
|
||||
|
||||
# Determines if docker can be run, failures may simply require that the user be added to the docker group.
|
||||
function kube::util::ensure_docker_daemon_connectivity {
|
||||
DOCKER=(docker ${DOCKER_OPTS})
|
||||
if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
|
||||
cat <<'EOF' >&2
|
||||
Can't connect to 'docker' daemon. please fix and retry.
|
||||
|
||||
Possible causes:
|
||||
- Docker Daemon not started
|
||||
- Linux: confirm via your init system
|
||||
- macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
|
||||
- macOS w/ Docker for Mac: Check the menu bar and start the Docker application
|
||||
- DOCKER_HOST hasn't been set or is set incorrectly
|
||||
- Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
|
||||
- macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
|
||||
- macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
|
||||
- Other things to check:
|
||||
- Linux: User isn't in 'docker' group. Add and relogin.
|
||||
- Something like 'sudo usermod -a -G docker ${USER}'
|
||||
- RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
|
||||
EOF
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Wait for background jobs to finish. Return with
|
||||
# an error status if any of the jobs failed.
|
||||
kube::util::wait-for-jobs() {
|
||||
local fail=0
|
||||
local job
|
||||
for job in $(jobs -p); do
|
||||
wait "${job}" || fail=$((fail + 1))
|
||||
done
|
||||
return ${fail}
|
||||
}
|
||||
|
||||
# kube::util::join <delim> <list...>
|
||||
# Concatenates the list elements with the delimiter passed as first parameter
|
||||
#
|
||||
# Ex: kube::util::join , a b c
|
||||
# -> a,b,c
|
||||
function kube::util::join {
|
||||
local IFS="$1"
|
||||
shift
|
||||
echo "$*"
|
||||
}
|
||||
|
||||
# Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
|
||||
#
|
||||
# Assumed vars:
|
||||
# $1 (cfssl directory) (optional)
|
||||
#
|
||||
# Sets:
|
||||
# CFSSL_BIN: The path of the installed cfssl binary
|
||||
# CFSSLJSON_BIN: The path of the installed cfssljson binary
|
||||
#
|
||||
function kube::util::ensure-cfssl {
|
||||
if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
|
||||
CFSSL_BIN=$(command -v cfssl)
|
||||
CFSSLJSON_BIN=$(command -v cfssljson)
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Create a temp dir for cfssl if no directory was given
|
||||
local cfssldir=${1:-}
|
||||
if [[ -z "${cfssldir}" ]]; then
|
||||
kube::util::ensure-temp-dir
|
||||
cfssldir="${KUBE_TEMP}/cfssl"
|
||||
fi
|
||||
|
||||
mkdir -p "${cfssldir}"
|
||||
pushd "${cfssldir}" > /dev/null
|
||||
|
||||
echo "Unable to successfully run 'cfssl' from $PATH; downloading instead..."
|
||||
kernel=$(uname -s)
|
||||
case "${kernel}" in
|
||||
Linux)
|
||||
curl -s -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
|
||||
curl -s -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
|
||||
;;
|
||||
Darwin)
|
||||
curl -s -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
|
||||
curl -s -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
|
||||
;;
|
||||
*)
|
||||
echo "Unknown, unsupported platform: ${kernel}." >&2
|
||||
echo "Supported platforms: Linux, Darwin." >&2
|
||||
exit 2
|
||||
esac
|
||||
|
||||
chmod +x cfssl || true
|
||||
chmod +x cfssljson || true
|
||||
|
||||
CFSSL_BIN="${cfssldir}/cfssl"
|
||||
CFSSLJSON_BIN="${cfssldir}/cfssljson"
|
||||
if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
|
||||
echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
|
||||
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
|
||||
exit 1
|
||||
fi
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
# kube::util::ensure_dockerized
|
||||
# Confirms that the script is being run inside a kube-build image
|
||||
#
|
||||
function kube::util::ensure_dockerized {
|
||||
if [[ -f /kube-build-image ]]; then
|
||||
return 0
|
||||
else
|
||||
echo "ERROR: This script is designed to be run inside a kube-build container"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Some useful colors.
|
||||
if [[ -z "${color_start-}" ]]; then
|
||||
declare -r color_start="\033["
|
||||
declare -r color_red="${color_start}0;31m"
|
||||
declare -r color_yellow="${color_start}0;33m"
|
||||
declare -r color_green="${color_start}0;32m"
|
||||
declare -r color_norm="${color_start}0m"
|
||||
fi
|
||||
|
||||
# ex: ts=2 sw=2 et filetype=sh
|
||||
|
|
@ -0,0 +1,154 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Version management helpers. These functions help to set, save and load the
|
||||
# following variables:
|
||||
#
|
||||
# KUBE_GIT_COMMIT - The git commit id corresponding to this
|
||||
# source code.
|
||||
# KUBE_GIT_TREE_STATE - "clean" indicates no changes since the git commit id
|
||||
# "dirty" indicates source code changes after the git commit id
|
||||
# KUBE_GIT_VERSION - "vX.Y" used to indicate the last release version.
|
||||
# KUBE_GIT_MAJOR - The major part of the version
|
||||
# KUBE_GIT_MINOR - The minor component of the version
|
||||
|
||||
# Grovels through git to set a set of env variables.
|
||||
#
|
||||
# If KUBE_GIT_VERSION_FILE, this function will load from that file instead of
|
||||
# querying git.
|
||||
kube::version::get_version_vars() {
|
||||
if [[ -n ${KUBE_GIT_VERSION_FILE-} ]]; then
|
||||
kube::version::load_version_vars "${KUBE_GIT_VERSION_FILE}"
|
||||
return
|
||||
fi
|
||||
|
||||
local git=(git --work-tree "${KUBE_ROOT}")
|
||||
|
||||
if [[ -n ${KUBE_GIT_COMMIT-} ]] || KUBE_GIT_COMMIT=$("${git[@]}" rev-parse "HEAD^{commit}" 2>/dev/null); then
|
||||
if [[ -z ${KUBE_GIT_TREE_STATE-} ]]; then
|
||||
# Check if the tree is dirty. default to dirty
|
||||
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
|
||||
KUBE_GIT_TREE_STATE="clean"
|
||||
else
|
||||
KUBE_GIT_TREE_STATE="dirty"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Use git describe to find the version based on annotated tags.
|
||||
if [[ -n ${KUBE_GIT_VERSION-} ]] || KUBE_GIT_VERSION=$("${git[@]}" describe --tags --abbrev=14 "${KUBE_GIT_COMMIT}^{commit}" 2>/dev/null); then
|
||||
# This translates the "git describe" to an actual semver.org
|
||||
# compatible semantic version that looks something like this:
|
||||
# v1.1.0-alpha.0.6+84c76d1142ea4d
|
||||
#
|
||||
# TODO: We continue calling this "git version" because so many
|
||||
# downstream consumers are expecting it there.
|
||||
DASHES_IN_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/[^-]//g")
|
||||
if [[ "${DASHES_IN_VERSION}" == "---" ]] ; then
|
||||
# We have distance to subversion (v1.1.0-subversion-1-gCommitHash)
|
||||
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/")
|
||||
elif [[ "${DASHES_IN_VERSION}" == "--" ]] ; then
|
||||
# We have distance to base tag (v1.1.0-1-gCommitHash)
|
||||
KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-g\([0-9a-f]\{14\}\)$/+\1/")
|
||||
fi
|
||||
if [[ "${KUBE_GIT_TREE_STATE}" == "dirty" ]]; then
|
||||
# git describe --dirty only considers changes to existing files, but
|
||||
# that is problematic since new untracked .go files affect the build,
|
||||
# so use our idea of "dirty" from git status instead.
|
||||
KUBE_GIT_VERSION+="-dirty"
|
||||
fi
|
||||
|
||||
|
||||
# Try to match the "git describe" output to a regex to try to extract
|
||||
# the "major" and "minor" versions and whether this is the exact tagged
|
||||
# version or whether the tree is between two tagged versions.
|
||||
if [[ "${KUBE_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?$ ]]; then
|
||||
KUBE_GIT_MAJOR=${BASH_REMATCH[1]}
|
||||
KUBE_GIT_MINOR=${BASH_REMATCH[2]}
|
||||
if [[ -n "${BASH_REMATCH[4]}" ]]; then
|
||||
KUBE_GIT_MINOR+="+"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Saves the environment flags to $1
|
||||
kube::version::save_version_vars() {
|
||||
local version_file=${1-}
|
||||
[[ -n ${version_file} ]] || {
|
||||
echo "!!! Internal error. No file specified in kube::version::save_version_vars"
|
||||
return 1
|
||||
}
|
||||
|
||||
cat <<EOF >"${version_file}"
|
||||
KUBE_GIT_COMMIT='${KUBE_GIT_COMMIT-}'
|
||||
KUBE_GIT_TREE_STATE='${KUBE_GIT_TREE_STATE-}'
|
||||
KUBE_GIT_VERSION='${KUBE_GIT_VERSION-}'
|
||||
KUBE_GIT_MAJOR='${KUBE_GIT_MAJOR-}'
|
||||
KUBE_GIT_MINOR='${KUBE_GIT_MINOR-}'
|
||||
EOF
|
||||
}
|
||||
|
||||
# Loads up the version variables from file $1
|
||||
kube::version::load_version_vars() {
|
||||
local version_file=${1-}
|
||||
[[ -n ${version_file} ]] || {
|
||||
echo "!!! Internal error. No file specified in kube::version::load_version_vars"
|
||||
return 1
|
||||
}
|
||||
|
||||
source "${version_file}"
|
||||
}
|
||||
|
||||
kube::version::ldflag() {
|
||||
local key=${1}
|
||||
local val=${2}
|
||||
|
||||
# If you update these, also update the list pkg/version/def.bzl.
|
||||
echo "-X ${KUBE_GO_PACKAGE}/pkg/version.${key}=${val}"
|
||||
echo "-X ${KUBE_GO_PACKAGE}/vendor/k8s.io/client-go/pkg/version.${key}=${val}"
|
||||
}
|
||||
|
||||
# Prints the value that needs to be passed to the -ldflags parameter of go build
|
||||
# in order to set the Kubernetes based on the git tree status.
|
||||
# IMPORTANT: if you update any of these, also update the lists in
|
||||
# pkg/version/def.bzl and hack/print-workspace-status.sh.
|
||||
kube::version::ldflags() {
|
||||
kube::version::get_version_vars
|
||||
|
||||
local buildDate=
|
||||
[[ -z ${SOURCE_DATE_EPOCH-} ]] || buildDate="--date=@${SOURCE_DATE_EPOCH}"
|
||||
local -a ldflags=($(kube::version::ldflag "buildDate" "$(date ${buildDate} -u +'%Y-%m-%dT%H:%M:%SZ')"))
|
||||
if [[ -n ${KUBE_GIT_COMMIT-} ]]; then
|
||||
ldflags+=($(kube::version::ldflag "gitCommit" "${KUBE_GIT_COMMIT}"))
|
||||
ldflags+=($(kube::version::ldflag "gitTreeState" "${KUBE_GIT_TREE_STATE}"))
|
||||
fi
|
||||
|
||||
if [[ -n ${KUBE_GIT_VERSION-} ]]; then
|
||||
ldflags+=($(kube::version::ldflag "gitVersion" "${KUBE_GIT_VERSION}"))
|
||||
fi
|
||||
|
||||
if [[ -n ${KUBE_GIT_MAJOR-} && -n ${KUBE_GIT_MINOR-} ]]; then
|
||||
ldflags+=(
|
||||
$(kube::version::ldflag "gitMajor" "${KUBE_GIT_MAJOR}")
|
||||
$(kube::version::ldflag "gitMinor" "${KUBE_GIT_MINOR}")
|
||||
)
|
||||
fi
|
||||
|
||||
# The -ldflags parameter takes a single string, so join the output.
|
||||
echo "${ldflags[*]-}"
|
||||
}
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
# Lists of API Versions of each groups that should be tested, groups are
|
||||
# separated by comma, lists are separated by semicolon. e.g.,
|
||||
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
|
||||
# TODO: It's going to be:
|
||||
# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,extensions/v1beta1"}
|
||||
# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go)
|
||||
# ONLY the last version is tested in each group.
|
||||
ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$)
|
||||
KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
|
||||
|
||||
# Give integration tests longer to run
|
||||
# TODO: allow a larger value to be passed in
|
||||
#KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 240s}
|
||||
KUBE_TIMEOUT="-timeout 600s"
|
||||
KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"}
|
||||
LOG_LEVEL=${LOG_LEVEL:-2}
|
||||
KUBE_TEST_ARGS=${KUBE_TEST_ARGS:-}
|
||||
# Default glog module settings.
|
||||
KUBE_TEST_VMODULE=${KUBE_TEST_VMODULE:-"garbagecollector*=6,graph_builder*=6"}
|
||||
|
||||
kube::test::find_integration_test_dirs() {
|
||||
(
|
||||
cd ${KUBE_ROOT}
|
||||
find test/integration/ -name '*_test.go' -print0 \
|
||||
| xargs -0n1 dirname | sed "s|^|${KUBE_GO_PACKAGE}/|" \
|
||||
| LC_ALL=C sort -u
|
||||
)
|
||||
}
|
||||
|
||||
CLEANUP_REQUIRED=
|
||||
cleanup() {
|
||||
if [[ -z "${CLEANUP_REQUIRED}" ]]; then
|
||||
return
|
||||
fi
|
||||
kube::log::status "Cleaning up etcd"
|
||||
kube::etcd::cleanup
|
||||
CLEANUP_REQUIRED=
|
||||
kube::log::status "Integration test cleanup complete"
|
||||
}
|
||||
|
||||
runTests() {
|
||||
kube::log::status "Starting etcd instance"
|
||||
CLEANUP_REQUIRED=1
|
||||
kube::etcd::start
|
||||
kube::log::status "Running integration test cases"
|
||||
|
||||
KUBE_RACE="-race"
|
||||
make -C "${KUBE_ROOT}" run-test \
|
||||
WHAT="${WHAT:-$(kube::test::find_integration_test_dirs | paste -sd' ' -)}" \
|
||||
GOFLAGS="${GOFLAGS:-}" \
|
||||
KUBE_TEST_ARGS="${KUBE_TEST_ARGS:-} ${SHORT:--short=true} --vmodule=${KUBE_TEST_VMODULE} --alsologtostderr=true" \
|
||||
KUBE_RACE="" \
|
||||
KUBE_TIMEOUT="${KUBE_TIMEOUT}" \
|
||||
KUBE_TEST_API_VERSIONS="$1"
|
||||
|
||||
cleanup
|
||||
}
|
||||
|
||||
checkEtcdOnPath() {
|
||||
kube::log::status "Checking etcd is on PATH"
|
||||
which etcd && return
|
||||
kube::log::status "Cannot find etcd, cannot run integration tests."
|
||||
kube::log::status "Please see https://github.com/kubernetes/community/blob/master/contributors/devel/testing.md#install-etcd-dependency for instructions."
|
||||
kube::log::usage "You can use 'hack/install-etcd.sh' to install a copy in third_party/."
|
||||
return 1
|
||||
}
|
||||
|
||||
checkEtcdOnPath
|
||||
|
||||
# Run cleanup to stop etcd on interrupt or other kill signal.
|
||||
trap cleanup EXIT
|
||||
|
||||
# If a test case is specified, just run once with v1 API version and exit
|
||||
if [[ -n "${KUBE_TEST_ARGS}" ]]; then
|
||||
runTests v1
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Convert the CSV to an array of API versions to test
|
||||
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
|
||||
for apiVersion in "${apiVersions[@]}"; do
|
||||
runTests "${apiVersion}"
|
||||
done
|
||||
|
|
@ -0,0 +1,370 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
|
||||
kube::golang::setup_env
|
||||
|
||||
# start the cache mutation detector by default so that cache mutators will be found
|
||||
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
||||
export KUBE_CACHE_MUTATION_DETECTOR
|
||||
|
||||
# panic the server on watch decode errors since they are considered coder mistakes
|
||||
KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
|
||||
export KUBE_PANIC_WATCH_DECODE_ERROR
|
||||
|
||||
# Handle case where OS has sha#sum commands, instead of shasum.
|
||||
if which shasum >/dev/null 2>&1; then
|
||||
SHA1SUM="shasum -a1"
|
||||
elif which sha1sum >/dev/null 2>&1; then
|
||||
SHA1SUM="sha1sum"
|
||||
else
|
||||
echo "Failed to find shasum or sha1sum utility." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
kube::test::find_dirs() {
|
||||
(
|
||||
cd ${KUBE_ROOT}
|
||||
find -L . -not \( \
|
||||
\( \
|
||||
-path './_artifacts/*' \
|
||||
-o -path './bazel-*/*' \
|
||||
-o -path './_output/*' \
|
||||
-o -path './_gopath/*' \
|
||||
-o -path './cmd/kubeadm/test/*' \
|
||||
-o -path './contrib/podex/*' \
|
||||
-o -path './output/*' \
|
||||
-o -path './release/*' \
|
||||
-o -path './target/*' \
|
||||
-o -path './test/e2e/*' \
|
||||
-o -path './test/e2e_node/*' \
|
||||
-o -path './test/integration/*' \
|
||||
-o -path './third_party/*' \
|
||||
-o -path './staging/*' \
|
||||
-o -path './vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
|
||||
|
||||
find -L . \
|
||||
-path './_output' -prune \
|
||||
-o -path './vendor/k8s.io/client-go/*' \
|
||||
-o -path './vendor/k8s.io/apiserver/*' \
|
||||
-o -path './test/e2e_node/system/*' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
|
||||
|
||||
# run tests for client-go
|
||||
find ./staging/src/k8s.io/client-go -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
# run tests for apiserver
|
||||
find ./staging/src/k8s.io/apiserver -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
# run tests for apimachinery
|
||||
find ./staging/src/k8s.io/apimachinery -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
find ./staging/src/k8s.io/kube-aggregator -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
find ./staging/src/k8s.io/apiextensions-apiserver -not \( \
|
||||
\( \
|
||||
-path '*/test/integration/*' \
|
||||
\) -prune \
|
||||
\) -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
|
||||
find ./staging/src/k8s.io/sample-apiserver -name '*_test.go' \
|
||||
-name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
|
||||
)
|
||||
}
|
||||
|
||||
KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
|
||||
KUBE_COVER=${KUBE_COVER:-n} # set to 'y' to enable coverage collection
|
||||
KUBE_COVERMODE=${KUBE_COVERMODE:-atomic}
|
||||
# How many 'go test' instances to run simultaneously when running tests in
|
||||
# coverage mode.
|
||||
KUBE_COVERPROCS=${KUBE_COVERPROCS:-4}
|
||||
KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing
|
||||
# Set to the goveralls binary path to report coverage results to Coveralls.io.
|
||||
KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-}
|
||||
# Lists of API Versions of each groups that should be tested, groups are
|
||||
# separated by comma, lists are separated by semicolon. e.g.,
|
||||
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
|
||||
# FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go)
|
||||
# ONLY the last version is tested in each group.
|
||||
ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$),federation/v1beta1
|
||||
KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
|
||||
# once we have multiple group supports
|
||||
# Create a junit-style XML test report in this directory if set.
|
||||
KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-}
|
||||
# Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
|
||||
# set.
|
||||
KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
|
||||
|
||||
kube::test::usage() {
|
||||
kube::log::usage_from_stdin <<EOF
|
||||
usage: $0 [OPTIONS] [TARGETS]
|
||||
|
||||
OPTIONS:
|
||||
-p <number> : number of parallel workers, must be >= 1
|
||||
EOF
|
||||
}
|
||||
|
||||
isnum() {
|
||||
[[ "$1" =~ ^[0-9]+$ ]]
|
||||
}
|
||||
|
||||
PARALLEL="${PARALLEL:-1}"
|
||||
while getopts "hp:i:" opt ; do
|
||||
case $opt in
|
||||
h)
|
||||
kube::test::usage
|
||||
exit 0
|
||||
;;
|
||||
p)
|
||||
PARALLEL="$OPTARG"
|
||||
if ! isnum "${PARALLEL}" || [[ "${PARALLEL}" -le 0 ]]; then
|
||||
kube::log::usage "'$0': argument to -p must be numeric and greater than 0"
|
||||
kube::test::usage
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
i)
|
||||
kube::log::usage "'$0': use GOFLAGS='-count <num-iterations>'"
|
||||
kube::test::usage
|
||||
exit 1
|
||||
;;
|
||||
?)
|
||||
kube::test::usage
|
||||
exit 1
|
||||
;;
|
||||
:)
|
||||
kube::log::usage "Option -$OPTARG <value>"
|
||||
kube::test::usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
# Use eval to preserve embedded quoted strings.
|
||||
eval "goflags=(${GOFLAGS:-})"
|
||||
eval "testargs=(${KUBE_TEST_ARGS:-})"
|
||||
|
||||
# Used to filter verbose test output.
|
||||
go_test_grep_pattern=".*"
|
||||
|
||||
# The go-junit-report tool needs full test case information to produce a
|
||||
# meaningful report.
|
||||
if [[ -n "${KUBE_JUNIT_REPORT_DIR}" ]] ; then
|
||||
goflags+=(-v)
|
||||
# Show only summary lines by matching lines like "status package/test"
|
||||
go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+"
|
||||
fi
|
||||
|
||||
# Filter out arguments that start with "-" and move them to goflags.
|
||||
testcases=()
|
||||
for arg; do
|
||||
if [[ "${arg}" == -* ]]; then
|
||||
goflags+=("${arg}")
|
||||
else
|
||||
testcases+=("${arg}")
|
||||
fi
|
||||
done
|
||||
if [[ ${#testcases[@]} -eq 0 ]]; then
|
||||
testcases=($(kube::test::find_dirs))
|
||||
fi
|
||||
set -- "${testcases[@]+${testcases[@]}}"
|
||||
|
||||
junitFilenamePrefix() {
|
||||
if [[ -z "${KUBE_JUNIT_REPORT_DIR}" ]]; then
|
||||
echo ""
|
||||
return
|
||||
fi
|
||||
mkdir -p "${KUBE_JUNIT_REPORT_DIR}"
|
||||
# This filename isn't parsed by anything, and we must avoid
|
||||
# exceeding 255 character filename limit. KUBE_TEST_API
|
||||
# barely fits there and in coverage mode test names are
|
||||
# appended to generated file names, easily exceeding
|
||||
# 255 chars in length. So let's just use a sha1 hash of it.
|
||||
local KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
|
||||
echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_HASH}_$(kube::util::sortable_date)"
|
||||
}
|
||||
|
||||
produceJUnitXMLReport() {
|
||||
local -r junit_filename_prefix=$1
|
||||
if [[ -z "${junit_filename_prefix}" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
local test_stdout_filenames
|
||||
local junit_xml_filename
|
||||
test_stdout_filenames=$(ls ${junit_filename_prefix}*.stdout)
|
||||
junit_xml_filename="${junit_filename_prefix}.xml"
|
||||
if ! command -v go-junit-report >/dev/null 2>&1; then
|
||||
kube::log::error "go-junit-report not found; please install with " \
|
||||
"go get -u github.com/jstemmer/go-junit-report"
|
||||
return
|
||||
fi
|
||||
cat ${test_stdout_filenames} | go-junit-report > "${junit_xml_filename}"
|
||||
if [[ ! ${KUBE_KEEP_VERBOSE_TEST_OUTPUT} =~ ^[yY]$ ]]; then
|
||||
rm ${test_stdout_filenames}
|
||||
fi
|
||||
kube::log::status "Saved JUnit XML test report to ${junit_xml_filename}"
|
||||
}
|
||||
|
||||
runTests() {
|
||||
local junit_filename_prefix
|
||||
junit_filename_prefix=$(junitFilenamePrefix)
|
||||
|
||||
# If we're not collecting coverage, run all requested tests with one 'go test'
|
||||
# command, which is much faster.
|
||||
if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then
|
||||
kube::log::status "Running tests without code coverage"
|
||||
# `go test` does not install the things it builds. `go test -i` installs
|
||||
# the build artifacts but doesn't run the tests. The two together provide
|
||||
# a large speedup for tests that do not need to be rebuilt.
|
||||
go test -i "${goflags[@]:+${goflags[@]}}" \
|
||||
${KUBE_RACE} ${KUBE_TIMEOUT} "${@}" \
|
||||
"${testargs[@]:+${testargs[@]}}"
|
||||
go test "${goflags[@]:+${goflags[@]}}" \
|
||||
${KUBE_RACE} ${KUBE_TIMEOUT} "${@}" \
|
||||
"${testargs[@]:+${testargs[@]}}" \
|
||||
| tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} \
|
||||
| grep "${go_test_grep_pattern}" && rc=$? || rc=$?
|
||||
produceJUnitXMLReport "${junit_filename_prefix}"
|
||||
return ${rc}
|
||||
fi
|
||||
|
||||
# Create coverage report directories.
|
||||
KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
|
||||
cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API_HASH}/$(kube::util::sortable_date)"
|
||||
cover_profile="coverage.out" # Name for each individual coverage profile
|
||||
kube::log::status "Saving coverage output in '${cover_report_dir}'"
|
||||
mkdir -p "${@+${@/#/${cover_report_dir}/}}"
|
||||
|
||||
# Run all specified tests, collecting coverage results. Go currently doesn't
|
||||
# support collecting coverage across multiple packages at once, so we must issue
|
||||
# separate 'go test' commands for each package and then combine at the end.
|
||||
# To speed things up considerably, we can at least use xargs -P to run multiple
|
||||
# 'go test' commands at once.
|
||||
# To properly parse the test results if generating a JUnit test report, we
|
||||
# must make sure the output from PARALLEL runs is not mixed. To achieve this,
|
||||
# we spawn a subshell for each PARALLEL process, redirecting the output to
|
||||
# separate files.
|
||||
|
||||
# ignore paths:
|
||||
# vendor/k8s.io/kube-gen/cmd/generator: is fragile when run under coverage, so ignore it for now.
|
||||
# https://github.com/kubernetes/kubernetes/issues/24967
|
||||
# vendor/k8s.io/client-go/1.4/rest: causes cover internal errors
|
||||
# https://github.com/golang/go/issues/16540
|
||||
cover_ignore_dirs="vendor/k8s.io/kube-gen/cmd/generator|vendor/k8s.io/client-go/1.4/rest"
|
||||
for path in $(echo $cover_ignore_dirs | sed 's/|/ /g'); do
|
||||
echo -e "skipped\tk8s.io/kubernetes/$path"
|
||||
done
|
||||
#
|
||||
# `go test` does not install the things it builds. `go test -i` installs
|
||||
# the build artifacts but doesn't run the tests. The two together provide
|
||||
# a large speedup for tests that do not need to be rebuilt.
|
||||
printf "%s\n" "${@}" \
|
||||
| grep -Ev $cover_ignore_dirs \
|
||||
| xargs -I{} -n 1 -P ${KUBE_COVERPROCS} \
|
||||
bash -c "set -o pipefail; _pkg=\"\$0\"; _pkg_out=\${_pkg//\//_}; \
|
||||
go test -i ${goflags[@]:+${goflags[@]}} \
|
||||
${KUBE_RACE} \
|
||||
${KUBE_TIMEOUT} \
|
||||
-cover -covermode=\"${KUBE_COVERMODE}\" \
|
||||
-coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \
|
||||
\"\${_pkg}\" \
|
||||
${testargs[@]:+${testargs[@]}}
|
||||
go test ${goflags[@]:+${goflags[@]}} \
|
||||
${KUBE_RACE} \
|
||||
${KUBE_TIMEOUT} \
|
||||
-cover -covermode=\"${KUBE_COVERMODE}\" \
|
||||
-coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \
|
||||
\"\${_pkg}\" \
|
||||
${testargs[@]:+${testargs[@]}} \
|
||||
| tee ${junit_filename_prefix:+\"${junit_filename_prefix}-\$_pkg_out.stdout\"} \
|
||||
| grep \"${go_test_grep_pattern}\"" \
|
||||
{} \
|
||||
&& test_result=$? || test_result=$?
|
||||
|
||||
produceJUnitXMLReport "${junit_filename_prefix}"
|
||||
|
||||
COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out"
|
||||
{
|
||||
# The combined coverage profile needs to start with a line indicating which
|
||||
# coverage mode was used (set, count, or atomic). This line is included in
|
||||
# each of the coverage profiles generated when running 'go test -cover', but
|
||||
# we strip these lines out when combining so that there's only one.
|
||||
echo "mode: ${KUBE_COVERMODE}"
|
||||
|
||||
# Include all coverage reach data in the combined profile, but exclude the
|
||||
# 'mode' lines, as there should be only one.
|
||||
for x in `find "${cover_report_dir}" -name "${cover_profile}"`; do
|
||||
cat $x | grep -h -v "^mode:" || true
|
||||
done
|
||||
} >"${COMBINED_COVER_PROFILE}"
|
||||
|
||||
coverage_html_file="${cover_report_dir}/combined-coverage.html"
|
||||
go tool cover -html="${COMBINED_COVER_PROFILE}" -o="${coverage_html_file}"
|
||||
kube::log::status "Combined coverage report: ${coverage_html_file}"
|
||||
|
||||
return ${test_result}
|
||||
}
|
||||
|
||||
reportCoverageToCoveralls() {
|
||||
if [[ ${KUBE_COVER} =~ ^[yY]$ ]] && [[ -x "${KUBE_GOVERALLS_BIN}" ]]; then
|
||||
kube::log::status "Reporting coverage results to Coveralls for service ${CI_NAME:-}"
|
||||
${KUBE_GOVERALLS_BIN} -coverprofile="${COMBINED_COVER_PROFILE}" \
|
||||
${CI_NAME:+"-service=${CI_NAME}"} \
|
||||
${COVERALLS_REPO_TOKEN:+"-repotoken=${COVERALLS_REPO_TOKEN}"} \
|
||||
|| true
|
||||
fi
|
||||
}
|
||||
|
||||
checkFDs() {
|
||||
# several unittests panic when httptest cannot open more sockets
|
||||
# due to the low default files limit on OS X. Warn about low limit.
|
||||
local fileslimit="$(ulimit -n)"
|
||||
if [[ $fileslimit -lt 1000 ]]; then
|
||||
echo "WARNING: ulimit -n (files) should be at least 1000, is $fileslimit, may cause test failure";
|
||||
fi
|
||||
}
|
||||
|
||||
checkFDs
|
||||
|
||||
|
||||
# Convert the CSVs to arrays.
|
||||
IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
|
||||
apiVersionsCount=${#apiVersions[@]}
|
||||
for (( i=0; i<${apiVersionsCount}; i++ )); do
|
||||
apiVersion=${apiVersions[i]}
|
||||
echo "Running tests for APIVersion: $apiVersion"
|
||||
# KUBE_TEST_API sets the version of each group to be tested.
|
||||
KUBE_TEST_API="${apiVersion}" runTests "$@"
|
||||
done
|
||||
|
||||
# We might run the tests for multiple versions, but we want to report only
|
||||
# one of them to coveralls. Here we report coverage from the last run.
|
||||
reportCoverageToCoveralls
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"defaulttolerationseconds_test.go",
|
||||
"main_test.go",
|
||||
],
|
||||
tags = [
|
||||
"automanaged",
|
||||
"etcd",
|
||||
"integration",
|
||||
],
|
||||
deps = [
|
||||
"//pkg/api/helper:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//plugin/pkg/admission/defaulttolerationseconds:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequotaallocator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourcequotaallocator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFun(t *testing.T) {
|
||||
|
||||
}
|
||||
Loading…
Reference in New Issue