Vendor the test-infra scripts (#353)

* shared scripts from test-infra live in //vendor/github.com/knative/test-infra/scripts;
* created `//hack/update-deps.sh` to properly update deps, including test-infra;
* all bash scripts were updated to use the vendored scripts;

Part of knative/test-infra#30.
This commit is contained in:
Adriano Cunha 2018-08-23 21:27:59 -07:00 committed by Knative Prow Robot
parent 26a432a20f
commit 9f5fed536d
10 changed files with 909 additions and 10 deletions

8
Gopkg.lock generated
View File

@ -70,6 +70,14 @@
packages = ["pkg/event"]
revision = "2b0383b8e4d67ffac446b17a7922bf7e5d9f5362"
[[projects]]
branch = "master"
digest = "1:d6415e6b744ec877c21fe734067636b9ee149af77276b08a3d33dd8698abf947"
name = "github.com/knative/test-infra"
packages = ["."]
pruneopts = "T"
revision = "4a4a682ee1fd31f33e450406393c3553b9ec5c2a"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]

View File

@ -1,6 +1,10 @@
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
required = [
"github.com/knative/test-infra",
]
ignored = [
"github.com/knative/docs/serving/samples/grpc-ping-go*",
]
@ -9,3 +13,8 @@ ignored = [
go-tests = true
unused-packages = true
non-go = true
[[prune.project]]
name = "github.com/knative/test-infra"
unused-packages = false
non-go = false

29
hack/update-deps.sh Normal file
View File

@ -0,0 +1,29 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh
set -o errexit
set -o nounset
set -o pipefail
cd ${REPO_ROOT_DIR}
# Ensure we have everything we need under vendor/
dep ensure
# Keep the only dir in knative/test-infra we're interested in
find vendor/github.com/knative/test-infra -mindepth 1 -maxdepth 1 ! -name scripts -exec rm -fr {} \;

View File

@ -23,11 +23,7 @@
# Calling this script without arguments will create a new cluster in
# project $PROJECT_ID, run the tests and delete the cluster.
# Load github.com/knative/test-infra/images/prow-tests/scripts/e2e-tests.sh
[ -f /workspace/e2e-tests.sh ] \
&& source /workspace/e2e-tests.sh \
|| eval "$(docker run --entrypoint sh gcr.io/knative-tests/test-infra/prow-tests -c 'cat e2e-tests.sh')"
[ -v KNATIVE_TEST_INFRA ] || exit 1
source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
# Script entry point.

View File

@ -18,11 +18,7 @@
# It is started by prow for each PR.
# For convenience, it can also be executed manually.
# Load github.com/knative/test-infra/images/prow-tests/scripts/presubmit-tests.sh
[ -f /workspace/presubmit-tests.sh ] \
&& source /workspace/presubmit-tests.sh \
|| eval "$(docker run --entrypoint sh gcr.io/knative-tests/test-infra/prow-tests -c 'cat presubmit-tests.sh')"
[ -v KNATIVE_TEST_INFRA ] || exit 1
source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
function build_tests() {
header "TODO(#67): Write build tests"

View File

@ -0,0 +1,3 @@
# Helper scripts
This directory contains helper scripts used by Prow test jobs, as well and local development scripts.

313
vendor/github.com/knative/test-infra/scripts/e2e-tests.sh generated vendored Executable file
View File

@ -0,0 +1,313 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative E2E test scripts. To use it:
# 1. Source this script.
# 2. [optional] Write the teardown() function, which will tear down your test
# resources.
# 3. [optional] Write the dump_extra_cluster_state() function. It will be called
# when a test fails, and can dump extra information about the current state of
# the cluster (tipically using kubectl).
# 4. Call the initialize() function passing $@ (without quotes).
# 5. Write logic for the end-to-end tests. Run all go tests using report_go_test()
# and call fail_test() or success() if any of them failed. The envitronment
# variables DOCKER_REPO_OVERRIDE, K8S_CLUSTER_OVERRIDE and K8S_USER_OVERRIDE
# will be set accordingly to the test cluster. You can also use the following
# boolean (0 is false, 1 is true) environment variables for the logic:
# EMIT_METRICS: true if --emit-metrics is passed.
# USING_EXISTING_CLUSTER: true if the test cluster is an already existing one,
# and not a temporary cluster created by kubetest.
# All environment variables above are marked read-only.
# Notes:
# 1. Calling your script without arguments will create a new cluster in the GCP
# project $PROJECT_ID and run the tests against it.
# 2. Calling your script with --run-tests and the variables K8S_CLUSTER_OVERRIDE,
# K8S_USER_OVERRIDE and DOCKER_REPO_OVERRIDE set will immediately start the
# tests against the cluster.
source $(dirname ${BASH_SOURCE})/library.sh
# Build a resource name based on $E2E_BASE_NAME, a suffix and $BUILD_NUMBER.
# Restricts the name length to 40 chars (the limit for resource names in GCP).
# Name will have the form $E2E_BASE_NAME-<PREFIX>$BUILD_NUMBER.
# Parameters: $1 - name suffix
function build_resource_name() {
local prefix=${E2E_BASE_NAME}-$1
local suffix=${BUILD_NUMBER}
# Restrict suffix length to 20 chars
if [[ -n "${suffix}" ]]; then
suffix=${suffix:${#suffix}<20?0:-20}
fi
echo "${prefix:0:20}${suffix}"
}
# Test cluster parameters
readonly E2E_BASE_NAME=k$(basename ${REPO_ROOT_DIR})
readonly E2E_CLUSTER_NAME=$(build_resource_name e2e-cls)
readonly E2E_NETWORK_NAME=$(build_resource_name e2e-net)
readonly E2E_CLUSTER_REGION=us-central1
readonly E2E_CLUSTER_ZONE=${E2E_CLUSTER_REGION}-a
readonly E2E_CLUSTER_NODES=3
readonly E2E_CLUSTER_MACHINE=n1-standard-4
readonly TEST_RESULT_FILE=/tmp/${E2E_BASE_NAME}-e2e-result
# Tear down the test resources.
function teardown_test_resources() {
header "Tearing down test environment"
# Free resources in GCP project.
if (( ! USING_EXISTING_CLUSTER )) && [[ "$(type -t teardown)" == "function" ]]; then
teardown
fi
# Delete Knative Serving images when using prow.
if (( IS_PROW )); then
echo "Images in ${DOCKER_REPO_OVERRIDE}:"
gcloud container images list --repository=${DOCKER_REPO_OVERRIDE}
delete_gcr_images ${DOCKER_REPO_OVERRIDE}
else
# Delete the kubernetes source downloaded by kubetest
rm -fr kubernetes kubernetes.tar.gz
fi
}
# Exit test, dumping current state info.
# Parameters: $1 - error message (optional).
function fail_test() {
[[ -n $1 ]] && echo "ERROR: $1"
dump_cluster_state
exit 1
}
# Download the k8s binaries required by kubetest.
function download_k8s() {
local version=${SERVING_GKE_VERSION}
if [[ "${version}" == "latest" ]]; then
# Fetch latest valid version
local versions="$(gcloud container get-server-config \
--project=${GCP_PROJECT} \
--format='value(validMasterVersions)' \
--region=${E2E_CLUSTER_REGION})"
local gke_versions=(`echo -n ${versions//;/ /}`)
# Get first (latest) version, excluding the "-gke.#" suffix
version="${gke_versions[0]%-*}"
echo "Latest GKE is ${version}, from [${versions//;/, /}]"
elif [[ "${version}" == "default" ]]; then
echo "ERROR: `default` GKE version is not supported yet"
return 1
fi
# Download k8s to staging dir
version=v${version}
local staging_dir=${GOPATH}/src/k8s.io/kubernetes/_output/gcs-stage
rm -fr ${staging_dir}
staging_dir=${staging_dir}/${version}
mkdir -p ${staging_dir}
pushd ${staging_dir}
export KUBERNETES_PROVIDER=gke
export KUBERNETES_RELEASE=${version}
curl -fsSL https://get.k8s.io | bash
local result=$?
if [[ ${result} -eq 0 ]]; then
mv kubernetes/server/kubernetes-server-*.tar.gz .
mv kubernetes/client/kubernetes-client-*.tar.gz .
rm -fr kubernetes
# Create an empty kubernetes test tarball; we don't use it but kubetest will fetch it
tar -czf kubernetes-test.tar.gz -T /dev/null
fi
popd
return ${result}
}
# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too.
# This is intended to be called when a test fails to provide debugging information.
function dump_cluster_state() {
echo "***************************************"
echo "*** TEST FAILED ***"
echo "*** Start of information dump ***"
echo "***************************************"
echo ">>> All resources:"
kubectl get all --all-namespaces
echo ">>> Services:"
kubectl get services --all-namespaces
echo ">>> Events:"
kubectl get events --all-namespaces
[[ "$(type -t dump_extra_cluster_state)" == "function" ]] && dump_extra_cluster_state
echo "***************************************"
echo "*** TEST FAILED ***"
echo "*** End of information dump ***"
echo "***************************************"
}
# Create a test cluster with kubetest and call the current script again.
function create_test_cluster() {
header "Creating test cluster"
# Smallest cluster required to run the end-to-end-tests
local CLUSTER_CREATION_ARGS=(
--gke-create-args="--enable-autoscaling --min-nodes=1 --max-nodes=${E2E_CLUSTER_NODES} --scopes=cloud-platform"
--gke-shape={\"default\":{\"Nodes\":${E2E_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}}
--provider=gke
--deployment=gke
--cluster="${E2E_CLUSTER_NAME}"
--gcp-zone="${E2E_CLUSTER_ZONE}"
--gcp-network="${E2E_NETWORK_NAME}"
--gke-environment=prod
)
if (( ! IS_PROW )); then
CLUSTER_CREATION_ARGS+=(--gcp-project=${PROJECT_ID:?"PROJECT_ID must be set to the GCP project where the tests are run."})
else
CLUSTER_CREATION_ARGS+=(--gcp-service-account=/etc/service-account/service-account.json)
fi
# SSH keys are not used, but kubetest checks for their existence.
# Touch them so if they don't exist, empty files are create to satisfy the check.
touch $HOME/.ssh/google_compute_engine.pub
touch $HOME/.ssh/google_compute_engine
# Clear user and cluster variables, so they'll be set to the test cluster.
# DOCKER_REPO_OVERRIDE is not touched because when running locally it must
# be a writeable docker repo.
export K8S_USER_OVERRIDE=
export K8S_CLUSTER_OVERRIDE=
# Get the current GCP project
export GCP_PROJECT=${PROJECT_ID}
[[ -z ${GCP_PROJECT} ]] && export GCP_PROJECT=$(gcloud config get-value project)
# Assume test failed (see more details at the end of this script).
echo -n "1"> ${TEST_RESULT_FILE}
local test_cmd_args="--run-tests"
(( EMIT_METRICS )) && test_cmd_args+=" --emit-metrics"
echo "Test script is ${E2E_SCRIPT}"
download_k8s || return 1
kubetest "${CLUSTER_CREATION_ARGS[@]}" \
--up \
--down \
--extract local \
--gcp-node-image ${SERVING_GKE_IMAGE} \
--test-cmd "${E2E_SCRIPT}" \
--test-cmd-args "${test_cmd_args}"
echo "Test subprocess exited with code $?"
# Delete target pools and health checks that might have leaked.
# See https://github.com/knative/serving/issues/959 for details.
# TODO(adrcunha): Remove once the leak issue is resolved.
local http_health_checks="$(gcloud compute target-pools list \
--project=${GCP_PROJECT} --format='value(healthChecks)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \
grep httpHealthChecks | tr '\n' ' ')"
local target_pools="$(gcloud compute target-pools list \
--project=${GCP_PROJECT} --format='value(name)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \
tr '\n' ' ')"
if [[ -n "${target_pools}" ]]; then
echo "Found leaked target pools, deleting"
gcloud compute forwarding-rules delete -q --project=${GCP_PROJECT} --region=${E2E_CLUSTER_REGION} ${target_pools}
gcloud compute target-pools delete -q --project=${GCP_PROJECT} --region=${E2E_CLUSTER_REGION} ${target_pools}
fi
if [[ -n "${http_health_checks}" ]]; then
echo "Found leaked health checks, deleting"
gcloud compute http-health-checks delete -q --project=${GCP_PROJECT} ${http_health_checks}
fi
local result="$(cat ${TEST_RESULT_FILE})"
echo "Test result code is $result"
exit ${result}
}
# Setup the test cluster for running the tests.
function setup_test_cluster() {
# Fail fast during setup.
set -o errexit
set -o pipefail
# Set the required variables if necessary.
if [[ -z ${K8S_USER_OVERRIDE} ]]; then
export K8S_USER_OVERRIDE=$(gcloud config get-value core/account)
fi
if [[ -z ${K8S_CLUSTER_OVERRIDE} ]]; then
USING_EXISTING_CLUSTER=0
export K8S_CLUSTER_OVERRIDE=$(kubectl config current-context)
acquire_cluster_admin_role ${K8S_USER_OVERRIDE} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_ZONE}
# Make sure we're in the default namespace. Currently kubetest switches to
# test-pods namespace when creating the cluster.
kubectl config set-context $K8S_CLUSTER_OVERRIDE --namespace=default
fi
readonly USING_EXISTING_CLUSTER
if [[ -z ${DOCKER_REPO_OVERRIDE} ]]; then
export DOCKER_REPO_OVERRIDE=gcr.io/$(gcloud config get-value project)/${E2E_BASE_NAME}-e2e-img
fi
echo "- Cluster is ${K8S_CLUSTER_OVERRIDE}"
echo "- User is ${K8S_USER_OVERRIDE}"
echo "- Docker is ${DOCKER_REPO_OVERRIDE}"
trap teardown_test_resources EXIT
if (( USING_EXISTING_CLUSTER )) && [[ "$(type -t teardown)" == "function" ]]; then
echo "Deleting any previous SUT instance"
teardown
fi
readonly K8S_CLUSTER_OVERRIDE
readonly K8S_USER_OVERRIDE
readonly DOCKER_REPO_OVERRIDE
# Handle failures ourselves, so we can dump useful info.
set +o errexit
set +o pipefail
}
function success() {
# kubetest teardown might fail and thus incorrectly report failure of the
# script, even if the tests pass.
# We store the real test result to return it later, ignoring any teardown
# failure in kubetest.
# TODO(adrcunha): Get rid of this workaround.
echo -n "0"> ${TEST_RESULT_FILE}
echo "**************************************"
echo "*** ALL TESTS PASSED ***"
echo "**************************************"
exit 0
}
RUN_TESTS=0
EMIT_METRICS=0
USING_EXISTING_CLUSTER=1
E2E_SCRIPT=""
# Parse flags and initialize the test cluster.
function initialize() {
# Normalize calling script path; we can't use readlink because it's not available everywhere
E2E_SCRIPT=$0
[[ ${E2E_SCRIPT} =~ ^[\./].* ]] || E2E_SCRIPT="./$0"
E2E_SCRIPT="$(cd ${E2E_SCRIPT%/*} && echo $PWD/${E2E_SCRIPT##*/})"
readonly E2E_SCRIPT
cd ${REPO_ROOT_DIR}
for parameter in $@; do
case $parameter in
--run-tests) RUN_TESTS=1 ;;
--emit-metrics) EMIT_METRICS=1 ;;
*)
echo "error: unknown option ${parameter}"
echo "usage: $0 [--run-tests][--emit-metrics]"
exit 1
;;
esac
shift
done
readonly RUN_TESTS
readonly EMIT_METRICS
if (( ! RUN_TESTS )); then
create_test_cluster
else
setup_test_cluster
fi
}

309
vendor/github.com/knative/test-infra/scripts/library.sh generated vendored Executable file
View File

@ -0,0 +1,309 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a collection of useful bash functions and constants, intended
# to be used in test scripts and the like. It doesn't do anything when
# called from command line.
# Default GKE version to be used with Knative Serving
readonly SERVING_GKE_VERSION=latest
readonly SERVING_GKE_IMAGE=cos
# Public images and yaml files.
readonly KNATIVE_ISTIO_YAML=https://storage.googleapis.com/knative-releases/serving/latest/istio.yaml
readonly KNATIVE_SERVING_RELEASE=https://storage.googleapis.com/knative-releases/serving/latest/release.yaml
readonly KNATIVE_BUILD_RELEASE=https://storage.googleapis.com/knative-releases/build/latest/release.yaml
readonly KNATIVE_EVENTING_RELEASE=https://storage.googleapis.com/knative-releases/eventing/latest/release.yaml
# Useful environment variables
[[ -n "${PROW_JOB_ID}" ]] && IS_PROW=1 || IS_PROW=0
readonly IS_PROW
readonly REPO_ROOT_DIR="$(git rev-parse --show-toplevel)"
# Display a box banner.
# Parameters: $1 - character to use for the box.
# $2 - banner message.
function make_banner() {
local msg="$1$1$1$1 $2 $1$1$1$1"
local border="${msg//[-0-9A-Za-z _.,]/$1}"
echo -e "${border}\n${msg}\n${border}"
}
# Simple header for logging purposes.
function header() {
local upper="$(echo $1 | tr a-z A-Z)"
make_banner "=" "${upper}"
}
# Simple subheader for logging purposes.
function subheader() {
make_banner "-" "$1"
}
# Simple warning banner for logging purposes.
function warning() {
make_banner "!" "$1"
}
# Remove ALL images in the given GCR repository.
# Parameters: $1 - GCR repository.
function delete_gcr_images() {
for image in $(gcloud --format='value(name)' container images list --repository=$1); do
echo "Checking ${image} for removal"
delete_gcr_images ${image}
for digest in $(gcloud --format='get(digest)' container images list-tags ${image} --limit=99999); do
local full_image="${image}@${digest}"
echo "Removing ${full_image}"
gcloud container images delete -q --force-delete-tags ${full_image}
done
done
}
# Waits until the given object doesn't exist.
# Parameters: $1 - the kind of the object.
# $2 - object's name.
# $3 - namespace (optional).
function wait_until_object_does_not_exist() {
local KUBECTL_ARGS="get $1 $2"
local DESCRIPTION="$1 $2"
if [[ -n $3 ]]; then
KUBECTL_ARGS="get -n $3 $1 $2"
DESCRIPTION="$1 $3/$2"
fi
echo -n "Waiting until ${DESCRIPTION} does not exist"
for i in {1..150}; do # timeout after 5 minutes
kubectl ${KUBECTL_ARGS} 2>&1 > /dev/null || return 0
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist"
kubectl ${KUBECTL_ARGS}
return 1
}
# Waits until all pods are running in the given namespace.
# Parameters: $1 - namespace.
function wait_until_pods_running() {
echo -n "Waiting until all pods in namespace $1 are up"
for i in {1..150}; do # timeout after 5 minutes
local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)"
# All pods must be running
local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l)
if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then
local all_ready=1
while read pod ; do
local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`)
# All containers must be ready
[[ -z ${status[0]} ]] && all_ready=0 && break
[[ -z ${status[1]} ]] && all_ready=0 && break
[[ ${status[0]} -lt 1 ]] && all_ready=0 && break
[[ ${status[1]} -lt 1 ]] && all_ready=0 && break
[[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break
done <<< $(echo "${pods}" | grep -v Completed)
if (( all_ready )); then
echo -e "\nAll pods are up:\n${pods}"
return 0
fi
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}"
kubectl get pods -n $1
return 1
}
# Waits until the given service has an external IP address.
# Parameters: $1 - namespace.
# $2 - service name.
function wait_until_service_has_external_ip() {
echo -n "Waiting until service $2 in namespace $1 has an external IP"
for i in {1..150}; do # timeout after 15 minutes
local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
if [[ -n "${ip}" ]]; then
echo -e "\nService $2.$1 has IP $ip"
return 0
fi
echo -n "."
sleep 6
done
echo -e "\n\nERROR: timeout waiting for service $svc.$ns to have an external IP"
kubectl get pods -n $1
return 1
}
# Returns the name of the pod of the given app.
# Parameters: $1 - app name.
# $2 - namespace (optional).
function get_app_pod() {
local namespace=""
[[ -n $2 ]] && namespace="-n $2"
kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[0].metadata.name}"
}
# Sets the given user as cluster admin.
# Parameters: $1 - user
# $2 - cluster name
# $3 - cluster zone
function acquire_cluster_admin_role() {
# Get the password of the admin and use it, as the service account (or the user)
# might not have the necessary permission.
local password=$(gcloud --format="value(masterAuth.password)" \
container clusters describe $2 --zone=$3)
kubectl config set-credentials cluster-admin \
--username=admin --password=${password}
kubectl config set-context $(kubectl config current-context) \
--user=cluster-admin
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole=cluster-admin \
--user=$1
# Reset back to the default account
gcloud container clusters get-credentials \
$2 --zone=$3 --project $(gcloud config get-value project)
}
# Runs a go test and generate a junit summary through bazel.
# Parameters: $1... - parameters to go test
function report_go_test() {
# Just run regular go tests if not on Prow.
if (( ! IS_PROW )); then
go test $@
return
fi
local report=$(mktemp)
local summary=$(mktemp)
local failed=0
# Run tests in verbose mode to capture details.
# go doesn't like repeating -v, so remove if passed.
local args=("${@/-v}")
go test -race -v ${args[@]} > ${report} || failed=$?
# Tests didn't run.
[[ ! -s ${report} ]] && return 1
# Create WORKSPACE file, required to use bazel
touch WORKSPACE
local targets=""
# Parse the report and generate fake tests for each passing/failing test.
while read line ; do
local fields=(`echo -n ${line}`)
local field0="${fields[0]}"
local field1="${fields[1]}"
local name=${fields[2]}
# Ignore subtests (those containing slashes)
if [[ -n "${name##*/*}" ]]; then
if [[ ${field1} == PASS: || ${field1} == FAIL: ]]; then
# Populate BUILD.bazel
local src="${name}.sh"
echo "exit 0" > ${src}
if [[ ${field1} == "FAIL:" ]]; then
read error
echo "cat <<ERROR-EOF" > ${src}
echo "${error}" >> ${src}
echo "ERROR-EOF" >> ${src}
echo "exit 1" >> ${src}
fi
chmod +x ${src}
echo "sh_test(name=\"${name}\", srcs=[\"${src}\"])" >> BUILD.bazel
elif [[ ${field0} == FAIL || ${field0} == ok ]]; then
# Update the summary with the result for the package
echo "${line}" >> ${summary}
# Create the package structure, move tests and BUILD file
local package=${field1/github.com\//}
mkdir -p ${package}
targets="${targets} //${package}/..."
mv *.sh BUILD.bazel ${package}
fi
fi
done < ${report}
# If any test failed, show the detailed report.
# Otherwise, just show the summary.
# Exception: when emitting metrics, dump the full report.
if (( failed )) || [[ "$@" == *" -emitmetrics"* ]]; then
cat ${report}
else
cat ${summary}
fi
# Always generate the junit summary.
bazel test ${targets} > /dev/null 2>&1
return ${failed}
}
# Install the latest stable Knative/serving in the current cluster.
function start_latest_knative_serving() {
header "Starting Knative Serving"
subheader "Installing Istio"
kubectl apply -f ${KNATIVE_ISTIO_YAML} || return 1
wait_until_pods_running istio-system || return 1
kubectl label namespace default istio-injection=enabled || return 1
subheader "Installing Knative Serving"
kubectl apply -f ${KNATIVE_SERVING_RELEASE} || return 1
wait_until_pods_running knative-serving || return 1
wait_until_pods_running knative-build || return 1
}
# Install the latest stable Knative/build in the current cluster.
function start_latest_knative_build() {
header "Starting Knative Build"
subheader "Installing Istio"
kubectl apply -f ${KNATIVE_ISTIO_YAML} || return 1
wait_until_pods_running istio-system || return 1
subheader "Installing Knative Build"
kubectl apply -f ${KNATIVE_BUILD_RELEASE} || return 1
wait_until_pods_running knative-build || return 1
}
# Run dep-collector, installing it first if necessary.
# Parameters: $1..$n - parameters passed to dep-collector.
function run_dep_collector() {
local local_dep_collector="$(which dep-collector)"
if [[ -z ${local_dep_collector} ]]; then
go get -u github.com/mattmoor/dep-collector
fi
dep-collector $@
}
# Run dep-collector to update licenses.
# Parameters: $1 - output file, relative to repo root dir.
# $2...$n - directories and files to inspect.
function update_licenses() {
cd ${REPO_ROOT_DIR} || return 1
local dst=$1
shift
run_dep_collector $@ > ./${dst}
}
# Run dep-collector to check for forbidden liceses.
# Parameters: $1...$n - directories and files to inspect.
function check_licenses() {
# Fetch the google/licenseclassifier for its license db
go get -u github.com/google/licenseclassifier
# Check that we don't have any forbidden licenses in our images.
run_dep_collector -check $@
}
# Check links in all .md files in the repo.
function check_links_in_markdown() {
local checker="markdown-link-check"
if ! hash ${checker} 2>/dev/null; then
warning "${checker} not installed, not checking links in .md files"
return 0
fi
local failed=0
for md_file in $(find ${REPO_ROOT_DIR} -name \*.md); do
${checker} -q ${md_file} || failed=1
done
return ${failed}
}

View File

@ -0,0 +1,118 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script to run the presubmit tests. To use it:
# 1. Source this script.
# 2. Define the functions build_tests(), unit_tests() and
# integration_tests(). They should run all tests (i.e., not fail
# fast), and return 0 if all passed, 1 if a failure occurred.
# The environment variables RUN_BUILD_TESTS, RUN_UNIT_TESTS and
# RUN_INTEGRATION_TESTS are set to 0 (false) or 1 (true) accordingly.
# If --emit-metrics is passed, EMIT_METRICS will be set to 1.
# 3. Call the main() function passing $@ (without quotes).
#
# Running the script without parameters, or with the --all-tests
# flag, causes all tests to be executed, in the right order.
# Use the flags --build-tests, --unit-tests and --integration-tests
# to run a specific set of tests. The flag --emit-metrics is used
# to emit metrics when running the tests.
source $(dirname ${BASH_SOURCE})/library.sh
# Extensions or file patterns that don't require presubmit tests.
readonly NO_PRESUBMIT_FILES=(\.md \.png ^OWNERS)
# Options set by command-line flags.
RUN_BUILD_TESTS=0
RUN_UNIT_TESTS=0
RUN_INTEGRATION_TESTS=0
EMIT_METRICS=0
# Exit presubmit tests if only documentation files were changed.
function exit_if_presubmit_not_required() {
if [[ -n "${PULL_PULL_SHA}" ]]; then
# On a presubmit job
local changes="$(git diff --name-only ${PULL_PULL_SHA} ${PULL_BASE_SHA})"
local no_presubmit_pattern="${NO_PRESUBMIT_FILES[*]}"
local no_presubmit_pattern="\(${no_presubmit_pattern// /\\|}\)$"
echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${changes}"
if [[ -z "$(echo "${changes}" | grep -v ${no_presubmit_pattern})" ]]; then
# Nothing changed other than files that don't require presubmit tests
header "Commit only contains changes that don't affect tests, skipping"
exit 0
fi
fi
}
# Process flags and run tests accordingly.
function main() {
exit_if_presubmit_not_required
local all_parameters=$@
[[ -z $1 ]] && all_parameters="--all-tests"
for parameter in ${all_parameters}; do
case ${parameter} in
--all-tests)
RUN_BUILD_TESTS=1
RUN_UNIT_TESTS=1
RUN_INTEGRATION_TESTS=1
shift
;;
--build-tests)
RUN_BUILD_TESTS=1
shift
;;
--unit-tests)
RUN_UNIT_TESTS=1
shift
;;
--integration-tests)
RUN_INTEGRATION_TESTS=1
shift
;;
--emit-metrics)
EMIT_METRICS=1
shift
;;
*)
echo "error: unknown option ${parameter}"
exit 1
;;
esac
done
readonly RUN_BUILD_TESTS
readonly RUN_UNIT_TESTS
readonly RUN_INTEGRATION_TESTS
readonly EMIT_METRICS
cd ${REPO_ROOT_DIR}
# Tests to be performed, in the right order if --all-tests is passed.
local result=0
if (( RUN_BUILD_TESTS )); then
build_tests || result=1
fi
if (( RUN_UNIT_TESTS )); then
unit_tests || result=1
fi
if (( RUN_INTEGRATION_TESTS )); then
integration_tests || result=1
fi
exit ${result}
}

118
vendor/github.com/knative/test-infra/scripts/release.sh generated vendored Executable file
View File

@ -0,0 +1,118 @@
#!/bin/bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative release scripts. To use it:
# 1. Source this script.
# 2. Call the parse_flags() function passing $@ (without quotes).
# 3. Call the run_validation_tests() passing the script or executable that
# runs the release validation tests.
# 4. Write logic for the release process. Use the following boolean (0 is
# false, 1 is true) environment variables for the logic:
# SKIP_TESTS: true if --skip-tests is passed. This is handled automatically
# by the run_validation_tests() function.
# TAG_RELEASE: true if --tag-release is passed. In this case, the
# environment variable TAG will contain the release tag in the
# form vYYYYMMDD-<commit_short_hash>.
# PUBLISH_RELEASE: true if --publish is passed. In this case, the environment
# variable KO_FLAGS will be updated with the -L option.
# SKIP_TESTS, TAG_RELEASE and PUBLISH_RELEASE default to false for safety.
# All environment variables above, except KO_FLAGS, are marked read-only once
# parse_flags() is called.
source $(dirname ${BASH_SOURCE})/library.sh
# Simple banner for logging purposes.
function banner() {
make_banner "@" "$1"
}
# Tag images in the yaml file with a tag. If not tag is passed, does nothing.
# Parameters: $1 - yaml file to parse for images.
# $2 - registry where the images are stored.
# $3 - tag to apply (optional).
function tag_images_in_yaml() {
[[ -z $3 ]] && return 0
echo "Tagging images with $3"
for image in $(grep -o "$2/[a-z\./-]\+@sha256:[0-9a-f]\+" $1); do
gcloud -q container images add-tag ${image} ${image%%@*}:$3
done
}
# Copy the given yaml file to a GCS bucket. Image is tagged :latest, and optionally :$2.
# Parameters: $1 - yaml file to copy.
# $2 - destination bucket name.
# $3 - tag to apply (optional).
function publish_yaml() {
gsutil cp $1 gs://$2/latest/
[[ -n $3 ]] && gsutil cp $1 gs://$2/previous/$3/
}
SKIP_TESTS=0
TAG_RELEASE=0
PUBLISH_RELEASE=0
TAG=""
KO_FLAGS="-P -L"
# Parses flags and sets environment variables accordingly.
function parse_flags() {
cd ${REPO_ROOT_DIR}
for parameter in $@; do
case $parameter in
--skip-tests) SKIP_TESTS=1 ;;
--tag-release) TAG_RELEASE=1 ;;
--notag-release) TAG_RELEASE=0 ;;
--publish)
PUBLISH_RELEASE=1
# Remove -L from ko flags
KO_FLAGS="${KO_FLAGS/-L}"
;;
--nopublish)
PUBLISH_RELEASE=0
# Add -L to ko flags
KO_FLAGS="-L ${KO_FLAGS}"
shift
;;
*)
echo "error: unknown option ${parameter}"
exit 1
;;
esac
shift
done
TAG=""
if (( TAG_RELEASE )); then
# Currently we're not considering the tags in refs/tags namespace.
commit=$(git describe --always --dirty)
# Like kubernetes, image tag is vYYYYMMDD-commit
TAG="v$(date +%Y%m%d)-${commit}"
fi
readonly SKIP_TESTS
readonly TAG_RELEASE
readonly PUBLISH_RELEASE
readonly TAG
}
# Run tests (unless --skip-tests was passed). Conveniently displays a banner indicating so.
# Parameters: $1 - executable that runs the tests.
function run_validation_tests() {
if (( ! SKIP_TESTS )); then
banner "Running release validation tests"
# Run tests.
$1
fi
}