Use test-infra direct insertion method (#840)

Now available from knative/test-infra repo directly.
This commit is contained in:
coryrc 2020-05-25 06:57:47 -07:00 committed by GitHub
parent 9803cd6efd
commit 2df459577e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 2925 additions and 6 deletions

View File

@ -17,7 +17,7 @@
# Documentation about this script and how to use it can be found
# at https://github.com/knative/test-infra/tree/master/ci
source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/release.sh
source $(dirname $0)/../scripts/test-infra/release.sh
source $(dirname $0)/build-flags.sh
function build_release() {

View File

@ -15,7 +15,7 @@
# limitations under the License.
readonly ROOT_DIR=$(dirname $0)/..
source ${ROOT_DIR}/vendor/knative.dev/test-infra/scripts/library.sh
source ${ROOT_DIR}/scripts/test-infra/library.sh
set -o errexit
set -o nounset
@ -28,7 +28,6 @@ VERSION="master"
# The list of dependencies that we track at HEAD and periodically
# float forward in this repository.
FLOATING_DEPS=(
"knative.dev/test-infra"
"knative.dev/pkg@${VERSION}"
"knative.dev/serving@${VERSION}"
"knative.dev/eventing@${VERSION}"
@ -48,6 +47,7 @@ readonly GO_GET
if (( GO_GET )); then
go get -d ${FLOATING_DEPS[@]}
"${ROOT_DIR}/scripts/test-infra/update-test-infra.sh" --update --ref "${VERSION}"
fi

View File

@ -18,7 +18,7 @@ set -o errexit
set -o nounset
set -o pipefail
source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/library.sh
source $(dirname $0)/../scripts/test-infra/library.sh
# Needed later
go install golang.org/x/tools/cmd/goimports

View File

@ -0,0 +1 @@
21faa1b1d5d571df6b8f67af72f6c7a269646f21

View File

@ -0,0 +1,356 @@
# Helper scripts
This directory contains helper scripts used by Prow test jobs, as well as local
development scripts.
## Using the `presubmit-tests.sh` helper script
This is a helper script to run the presubmit tests. To use it:
1. Source this script.
1. [optional] Define the function `build_tests()`. If you don't define this
function, the default action for running the build tests is to:
- check markdown files
- run `go build` on the entire repo
- run `/hack/verify-codegen.sh` (if it exists)
- check licenses in all go packages
The markdown link checker tool doesn't check `localhost` links by default.
Its configuration file, `markdown-link-check-config.json`, lives in the
`test-infra/scripts` directory. To override it, create a file with the same
name, containing the custom config in the `/test` directory.
The markdown lint tool ignores long lines by default. Its configuration file,
`markdown-lint-config.rc`, lives in the `test-infra/scripts` directory. To
override it, create a file with the same name, containing the custom config
in the `/test` directory.
1. [optional] Customize the default build test runner, if you're using it. Set
the following environment variables if the default values don't fit your
needs:
- `DISABLE_MD_LINTING`: Disable linting markdown files, defaults to 0
(false).
- `DISABLE_MD_LINK_CHECK`: Disable checking links in markdown files, defaults
to 0 (false).
- `PRESUBMIT_TEST_FAIL_FAST`: Fail the presubmit test immediately if a test
fails, defaults to 0 (false).
1. [optional] Define the functions `pre_build_tests()` and/or
`post_build_tests()`. These functions will be called before or after the
build tests (either your custom one or the default action) and will cause the
test to fail if they don't return success.
1. [optional] Define the function `unit_tests()`. If you don't define this
function, the default action for running the unit tests is to run all go
tests in the repo.
1. [optional] Define the functions `pre_unit_tests()` and/or
`post_unit_tests()`. These functions will be called before or after the unit
tests (either your custom one or the default action) and will cause the test
to fail if they don't return success.
1. [optional] Define the function `integration_tests()`. If you don't define
this function, the default action for running the integration tests is to run
all run all `./test/e2e-*tests.sh` scripts, in sequence.
1. [optional] Define the functions `pre_integration_tests()` and/or
`post_integration_tests()`. These functions will be called before or after
the integration tests (either your custom one or the default action) and will
cause the test to fail if they don't return success.
1. Call the `main()` function passing `"$@"` (with quotes).
Running the script without parameters, or with the `--all-tests` flag causes all
tests to be executed, in the right order (i.e., build, then unit, then
integration tests).
Use the flags `--build-tests`, `--unit-tests` and `--integration-tests` to run a
specific set of tests.
To run specific programs as a test, use the `--run-test` flag, and provide the
program as the argument. If arguments are required for the program, pass
everything as a single quotes argument. For example,
`./presubmit-tests.sh --run-test "test/my/test data"`. This flag can be used
repeatedly, and each one will be ran in sequential order.
The script will automatically skip all presubmit tests for PRs where all changed
files are exempt of tests (e.g., a PR changing only the `OWNERS` file).
Also, for PRs touching only markdown files, the unit and integration tests are
skipped.
### Sample presubmit test script
```bash
source vendor/knative.dev/test-infra/scripts/presubmit-tests.sh
function post_build_tests() {
echo "Cleaning up after build tests"
rm -fr ./build-cache
}
function unit_tests() {
make -C tests test
}
function pre_integration_tests() {
echo "Cleaning up before integration tests"
rm -fr ./staging-area
}
# We use the default integration test runner.
main "$@"
```
## Using the `e2e-tests.sh` helper script
This is a helper script for Knative E2E test scripts. To use it:
1. [optional] Customize the test cluster. Set the following environment
variables if the default values don't fit your needs:
- `E2E_CLUSTER_REGION`: Cluster region, defaults to `us-central1`.
- `E2E_CLUSTER_BACKUP_REGIONS`: Space-separated list of regions to retry test
cluster creation in case of stockout. Defaults to `us-west1 us-east1`.
- `E2E_CLUSTER_ZONE`: Cluster zone (e.g., `a`), defaults to none (i.e. use a
regional cluster).
- `E2E_CLUSTER_BACKUP_ZONES`: Space-separated list of zones to retry test
cluster creation in case of stockout. If defined,
`E2E_CLUSTER_BACKUP_REGIONS` will be ignored thus it defaults to none.
- `E2E_CLUSTER_MACHINE`: Cluster node machine type, defaults to
`e2-standard-4}`.
- `E2E_MIN_CLUSTER_NODES`: Minimum number of nodes in the cluster when
autoscaling, defaults to 1.
- `E2E_MAX_CLUSTER_NODES`: Maximum number of nodes in the cluster when
autoscaling, defaults to 3.
1. Source the script.
1. [optional] Write the `knative_setup()` function, which will set up your
system under test (e.g., Knative Serving). This function won't be called if
you use the `--skip-knative-setup` flag.
1. [optional] Write the `knative_teardown()` function, which will tear down your
system under test (e.g., Knative Serving). This function won't be called if
you use the `--skip-knative-setup` flag.
1. [optional] Write the `test_setup()` function, which will set up the test
resources.
1. [optional] Write the `test_teardown()` function, which will tear down the
test resources.
1. [optional] Write the `cluster_setup()` function, which will set up any
resources before the test cluster is created.
1. [optional] Write the `cluster_teardown()` function, which will tear down any
resources after the test cluster is destroyed.
1. [optional] Write the `dump_extra_cluster_state()` function. It will be called
when a test fails, and can dump extra information about the current state of
the cluster (typically using `kubectl`).
1. [optional] Write the `parse_flags()` function. It will be called whenever an
unrecognized flag is passed to the script, allowing you to define your own
flags. The function must return 0 if the flag is unrecognized, or the number
of items to skip in the command line if the flag was parsed successfully. For
example, return 1 for a simple flag, and 2 for a flag with a parameter.
1. Call the `initialize()` function passing `$@` (without quotes).
1. Write logic for the end-to-end tests. Run all go tests using `go_test_e2e()`
(or `report_go_test()` if you need a more fine-grained control) and call
`fail_test()` or `success()` if any of them failed. The environment variable
`KO_DOCKER_REPO` and `E2E_PROJECT_ID` will be set according to the test
cluster.
**Notes:**
1. Calling your script without arguments will create a new cluster in the GCP
project `$PROJECT_ID` and run the tests against it.
1. Calling your script with `--run-tests` and the variable `KO_DOCKER_REPO` set
will immediately start the tests against the cluster currently configured for
`kubectl`.
1. By default `knative_teardown()` and `test_teardown()` will be called after
the tests finish, use `--skip-teardowns` if you don't want them to be called.
1. By default Istio is installed on the cluster via Addon, use
`--skip-istio-addon` if you choose not to have it preinstalled.
1. You can force running the tests against a specific GKE cluster version by
using the `--cluster-version` flag and passing a full version as the flag
value.
### Sample end-to-end test script
This script will test that the latest Knative Serving nightly release works. It
defines a special flag (`--no-knative-wait`) that causes the script not to wait
for Knative Serving to be up before running the tests. It also requires that the
test cluster is created in a specific region, `us-west2`.
```bash
# This test requires a cluster in LA
E2E_CLUSTER_REGION=us-west2
source vendor/knative.dev/test-infra/scripts/e2e-tests.sh
function knative_setup() {
start_latest_knative_serving
if (( WAIT_FOR_KNATIVE )); then
wait_until_pods_running knative-serving || fail_test "Knative Serving not up"
fi
}
function parse_flags() {
if [[ "$1" == "--no-knative-wait" ]]; then
WAIT_FOR_KNATIVE=0
return 1
fi
return 0
}
WAIT_FOR_KNATIVE=1
initialize $@
# TODO: use go_test_e2e to run the tests.
kubectl get pods || fail_test
success
```
## Using the `performance-tests.sh` helper script
This is a helper script for Knative performance test scripts. In combination
with specific Prow jobs, it can automatically manage the environment for running
benchmarking jobs for each repo. To use it:
1. Source the script.
1. [optional] Customize GCP project settings for the benchmarks. Set the
following environment variables if the default value doesn't fit your needs:
- `PROJECT_NAME`: GCP project name for keeping the clusters that run the
benchmarks. Defaults to `knative-performance`.
- `SERVICE_ACCOUNT_NAME`: Service account name for controlling GKE clusters
and interacting with [Mako](https://github.com/google/mako) server. It MUST
have `Kubernetes Engine Admin` and `Storage Admin` role, and be
[whitelisted](https://github.com/google/mako/blob/master/docs/ACCESS.md) by
Mako admin. Defaults to `mako-job`.
1. [optional] Customize root path of the benchmarks. This root folder should
contain and only contain all benchmarks you want to run continuously. Set the
following environment variable if the default value doesn't fit your needs:
- `BENCHMARK_ROOT_PATH`: Benchmark root path, defaults to
`test/performance/benchmarks`. Each repo can decide which folder to put its
benchmarks in, and override this environment variable to be the path of
that folder.
1. [optional] Write the `update_knative` function, which will update your system
under test (e.g. Knative Serving).
1. [optional] Write the `update_benchmark` function, which will update the
underlying resources for the benchmark (usually Knative resources and
Kubernetes cronjobs for benchmarking). This function accepts a parameter,
which is the benchmark name in the current repo.
1. Call the `main()` function with all parameters (e.g. `$@`).
### Sample performance test script
This script will update `Knative serving` and the given benchmark.
```bash
source vendor/knative.dev/test-infra/scripts/performance-tests.sh
function update_knative() {
echo ">> Updating serving"
ko apply -f config/ || abort "failed to apply serving"
}
function update_benchmark() {
echo ">> Updating benchmark $1"
ko apply -f ${BENCHMARK_ROOT_PATH}/$1 || abort "failed to apply benchmark $1"
}
main $@
```
## Using the `release.sh` helper script
This is a helper script for Knative release scripts. To use it:
1. Source the script.
1. [optional] By default, the release script will run
`./test/presubmit-tests.sh` as the release validation tests. If you need to
run something else, set the environment variable `VALIDATION_TESTS` to the
executable to run.
1. Write logic for building the release in a function named `build_release()`.
Set the environment variable `ARTIFACTS_TO_PUBLISH` to the list of files
created, space separated. Use the following boolean (0 is false, 1 is true)
and string environment variables for the logic:
- `RELEASE_VERSION`: contains the release version if `--version` was passed.
This also overrides the value of the `TAG` variable as `v<version>`.
- `RELEASE_BRANCH`: contains the release branch if `--branch` was passed.
Otherwise it's empty and `master` HEAD will be considered the release
branch.
- `RELEASE_NOTES`: contains the filename with the release notes if
`--release-notes` was passed. The release notes is a simple markdown file.
- `RELEASE_GCS_BUCKET`: contains the GCS bucket name to store the manifests
if `--release-gcs` was passed, otherwise the default value
`knative-nightly/<repo>` will be used. It is empty if `--publish` was not
passed.
- `RELEASE_DIR`: contains the directory to store the manifests if
`--release-dir` was passed. Defaults to empty value, but if `--nopublish`
was passed then points to the repository root directory.
- `BUILD_COMMIT_HASH`: the commit short hash for the current repo. If the
current git tree is dirty, it will have `-dirty` appended to it.
- `BUILD_YYYYMMDD`: current UTC date in `YYYYMMDD` format.
- `BUILD_TIMESTAMP`: human-readable UTC timestamp in `YYYY-MM-DD HH:MM:SS`
format.
- `BUILD_TAG`: a tag in the form `v$BUILD_YYYYMMDD-$BUILD_COMMIT_HASH`.
- `KO_DOCKER_REPO`: contains the GCR to store the images if `--release-gcr`
was passed, otherwise the default value `gcr.io/knative-nightly` will be
used. It is set to `ko.local` if `--publish` was not passed.
- `SKIP_TESTS`: true if `--skip-tests` was passed. This is handled
automatically.
- `TAG_RELEASE`: true if `--tag-release` was passed. In this case, the
environment variable `TAG` will contain the release tag in the form
`v$BUILD_TAG`.
- `PUBLISH_RELEASE`: true if `--publish` was passed. In this case, the
environment variable `KO_FLAGS` will be updated with the `-L` option and
`TAG` will contain the release tag in the form `v$RELEASE_VERSION`.
- `PUBLISH_TO_GITHUB`: true if `--version`, `--branch` and
`--publish-release` were passed.
All boolean environment variables default to false for safety.
All environment variables above, except `KO_FLAGS`, are marked read-only once
`main()` is called (see below).
1. Call the `main()` function passing `"$@"` (with quotes).
### Sample release script
```bash
source vendor/knative.dev/test-infra/scripts/release.sh
function build_release() {
# config/ contains the manifests
ko resolve ${KO_FLAGS} -f config/ > release.yaml
ARTIFACTS_TO_PUBLISH="release.yaml"
}
main "$@"
```

View File

@ -0,0 +1,23 @@
/*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scripts
import (
"fmt"
)
func main() {
fmt.Println("This is a dummy go file so `go dep` can be used with knative/test-infra/scripts")
fmt.Println("This file can be safely removed if one day this directory contains real, useful go code")
}

504
scripts/test-infra/e2e-tests.sh Executable file
View File

@ -0,0 +1,504 @@
#!/usr/bin/env bash
# Copyright 2019 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative E2E test scripts.
# See README.md for instructions on how to use it.
source $(dirname ${BASH_SOURCE})/library.sh
# Build a resource name based on $E2E_BASE_NAME, a suffix and $BUILD_NUMBER.
# Restricts the name length to 40 chars (the limit for resource names in GCP).
# Name will have the form $E2E_BASE_NAME-<PREFIX>$BUILD_NUMBER.
# Parameters: $1 - name suffix
function build_resource_name() {
local prefix=${E2E_BASE_NAME}-$1
local suffix=${BUILD_NUMBER}
# Restrict suffix length to 20 chars
if [[ -n "${suffix}" ]]; then
suffix=${suffix:${#suffix}<20?0:-20}
fi
local name="${prefix:0:20}${suffix}"
# Ensure name doesn't end with "-"
echo "${name%-}"
}
# Test cluster parameters
# Configurable parameters
# export E2E_CLUSTER_REGION and E2E_CLUSTER_ZONE as they're used in the cluster setup subprocess
export E2E_CLUSTER_REGION=${E2E_CLUSTER_REGION:-us-central1}
# By default we use regional clusters.
export E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-}
# Default backup regions in case of stockouts; by default we don't fall back to a different zone in the same region
readonly E2E_CLUSTER_BACKUP_REGIONS=${E2E_CLUSTER_BACKUP_REGIONS:-us-west1 us-east1}
readonly E2E_CLUSTER_BACKUP_ZONES=${E2E_CLUSTER_BACKUP_ZONES:-}
readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-e2-standard-4}
readonly E2E_GKE_ENVIRONMENT=${E2E_GKE_ENVIRONMENT:-prod}
readonly E2E_GKE_COMMAND_GROUP=${E2E_GKE_COMMAND_GROUP:-beta}
# Each knative repository may have a different cluster size requirement here,
# so we allow calling code to set these parameters. If they are not set we
# use some sane defaults.
readonly E2E_MIN_CLUSTER_NODES=${E2E_MIN_CLUSTER_NODES:-1}
readonly E2E_MAX_CLUSTER_NODES=${E2E_MAX_CLUSTER_NODES:-3}
readonly E2E_BASE_NAME="k${REPO_NAME}"
readonly E2E_CLUSTER_NAME=$(build_resource_name e2e-cls)
readonly E2E_NETWORK_NAME=$(build_resource_name e2e-net)
readonly TEST_RESULT_FILE=/tmp/${E2E_BASE_NAME}-e2e-result
# Flag whether test is using a boskos GCP project
IS_BOSKOS=0
# Tear down the test resources.
function teardown_test_resources() {
# On boskos, save time and don't teardown as the cluster will be destroyed anyway.
(( IS_BOSKOS )) && return
header "Tearing down test environment"
function_exists test_teardown && test_teardown
(( ! SKIP_KNATIVE_SETUP )) && function_exists knative_teardown && knative_teardown
# Delete the kubernetes source downloaded by kubetest
rm -fr kubernetes kubernetes.tar.gz
}
# Run the given E2E tests. Assume tests are tagged e2e, unless `-tags=XXX` is passed.
# Parameters: $1..$n - any go test flags, then directories containing the tests to run.
function go_test_e2e() {
local test_options=""
local go_options=""
[[ ! " $@" == *" -tags="* ]] && go_options="-tags=e2e"
report_go_test -v -race -count=1 ${go_options} $@ ${test_options}
}
# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too.
# This is intended to be called when a test fails to provide debugging information.
function dump_cluster_state() {
echo "***************************************"
echo "*** E2E TEST FAILED ***"
echo "*** Start of information dump ***"
echo "***************************************"
local output="${ARTIFACTS}/k8s.dump-$(basename ${E2E_SCRIPT}).txt"
echo ">>> The dump is located at ${output}"
for crd in $(kubectl api-resources --verbs=list -o name | sort); do
local count="$(kubectl get $crd --all-namespaces --no-headers 2>/dev/null | wc -l)"
echo ">>> ${crd} (${count} objects)"
if [[ "${count}" > "0" ]]; then
echo ">>> ${crd} (${count} objects)" >> ${output}
echo ">>> Listing" >> ${output}
kubectl get ${crd} --all-namespaces >> ${output}
echo ">>> Details" >> ${output}
if [[ "${crd}" == "secrets" ]]; then
echo "Secrets are ignored for security reasons" >> ${output}
else
kubectl get ${crd} --all-namespaces -o yaml >> ${output}
fi
fi
done
if function_exists dump_extra_cluster_state; then
echo ">>> Extra dump" >> ${output}
dump_extra_cluster_state >> ${output}
fi
echo "***************************************"
echo "*** E2E TEST FAILED ***"
echo "*** End of information dump ***"
echo "***************************************"
}
# On a Prow job, save some metadata about the test for Testgrid.
function save_metadata() {
(( ! IS_PROW )) && return
local geo_key="Region"
local geo_value="${E2E_CLUSTER_REGION}"
if [[ -n "${E2E_CLUSTER_ZONE}" ]]; then
geo_key="Zone"
geo_value="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}"
fi
local cluster_version="$(gcloud container clusters list --project=${E2E_PROJECT_ID} --format='value(currentMasterVersion)')"
cat << EOF > ${ARTIFACTS}/metadata.json
{
"E2E:${geo_key}": "${geo_value}",
"E2E:Machine": "${E2E_CLUSTER_MACHINE}",
"E2E:Version": "${cluster_version}",
"E2E:MinNodes": "${E2E_MIN_CLUSTER_NODES}",
"E2E:MaxNodes": "${E2E_MAX_CLUSTER_NODES}"
}
EOF
}
# Set E2E_CLUSTER_VERSION to a specific GKE version.
# Parameters: $1 - target GKE version (X.Y, X.Y.Z, X.Y.Z-gke.W, default or gke-latest).
# $2 - region[-zone] where the clusteer will be created.
function resolve_k8s_version() {
local target_version="$1"
if [[ "${target_version}" == "default" ]]; then
local version="$(gcloud container get-server-config \
--format='value(defaultClusterVersion)' \
--zone=$2)"
[[ -z "${version}" ]] && return 1
E2E_CLUSTER_VERSION="${version}"
echo "Using default version, ${E2E_CLUSTER_VERSION}"
return 0
fi
# Fetch valid versions
local versions="$(gcloud container get-server-config \
--format='value(validMasterVersions)' \
--zone=$2)"
[[ -z "${versions}" ]] && return 1
local gke_versions=($(echo -n "${versions//;/ }"))
echo "Available GKE versions in $2 are [${versions//;/, }]"
if [[ "${target_version}" == "gke-latest" ]]; then
# Get first (latest) version
E2E_CLUSTER_VERSION="${gke_versions[0]}"
echo "Using latest version, ${E2E_CLUSTER_VERSION}"
else
local latest="$(echo "${gke_versions[@]}" | tr ' ' '\n' | grep -E ^${target_version} | sort -V | tail -1)"
if [[ -z "${latest}" ]]; then
echo "ERROR: version ${target_version} is not available"
return 1
fi
E2E_CLUSTER_VERSION="${latest}"
echo "Using ${E2E_CLUSTER_VERSION} for supplied version ${target_version}"
fi
return 0
}
# Create a test cluster with kubetest and call the current script again.
function create_test_cluster() {
# Fail fast during setup.
set -o errexit
set -o pipefail
if function_exists cluster_setup; then
cluster_setup || fail_test "cluster setup failed"
fi
echo "Cluster will have a minimum of ${E2E_MIN_CLUSTER_NODES} and a maximum of ${E2E_MAX_CLUSTER_NODES} nodes."
# Smallest cluster required to run the end-to-end-tests
local CLUSTER_CREATION_ARGS=(
--gke-create-command="container clusters create --quiet --enable-autoscaling --min-nodes=${E2E_MIN_CLUSTER_NODES} --max-nodes=${E2E_MAX_CLUSTER_NODES} --scopes=cloud-platform --enable-basic-auth --no-issue-client-certificate ${GKE_ADDONS} ${EXTRA_CLUSTER_CREATION_FLAGS[@]}"
--gke-shape={\"default\":{\"Nodes\":${E2E_MIN_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}}
--provider=gke
--deployment=gke
--cluster="${E2E_CLUSTER_NAME}"
--gcp-network="${E2E_NETWORK_NAME}"
--gcp-node-image="${SERVING_GKE_IMAGE}"
--gke-environment="${E2E_GKE_ENVIRONMENT}"
--gke-command-group="${E2E_GKE_COMMAND_GROUP}"
--test=false
--up
)
if (( ! IS_BOSKOS )); then
CLUSTER_CREATION_ARGS+=(--gcp-project=${GCP_PROJECT})
fi
# SSH keys are not used, but kubetest checks for their existence.
# Touch them so if they don't exist, empty files are create to satisfy the check.
mkdir -p $HOME/.ssh
touch $HOME/.ssh/google_compute_engine.pub
touch $HOME/.ssh/google_compute_engine
# Assume test failed (see details in set_test_return_code()).
set_test_return_code 1
local gcloud_project="${GCP_PROJECT}"
[[ -z "${gcloud_project}" ]] && gcloud_project="$(gcloud config get-value project)"
echo "gcloud project is ${gcloud_project}"
echo "gcloud user is $(gcloud config get-value core/account)"
(( IS_BOSKOS )) && echo "Using boskos for the test cluster"
[[ -n "${GCP_PROJECT}" ]] && echo "GCP project for test cluster is ${GCP_PROJECT}"
echo "Test script is ${E2E_SCRIPT}"
# Set arguments for this script again
local test_cmd_args="--run-tests"
(( SKIP_KNATIVE_SETUP )) && test_cmd_args+=" --skip-knative-setup"
[[ -n "${GCP_PROJECT}" ]] && test_cmd_args+=" --gcp-project ${GCP_PROJECT}"
[[ -n "${E2E_SCRIPT_CUSTOM_FLAGS[@]}" ]] && test_cmd_args+=" ${E2E_SCRIPT_CUSTOM_FLAGS[@]}"
local extra_flags=()
if (( IS_BOSKOS )); then
# Add arbitrary duration, wait for Boskos projects acquisition before error out
extra_flags+=(--boskos-wait-duration=20m)
elif (( ! SKIP_TEARDOWNS )); then
# Only let kubetest tear down the cluster if not using Boskos and teardowns are not expected to be skipped,
# it's done by Janitor if using Boskos
extra_flags+=(--down)
fi
# Set a minimal kubernetes environment that satisfies kubetest
# TODO(adrcunha): Remove once https://github.com/kubernetes/test-infra/issues/13029 is fixed.
local kubedir="$(mktemp -d -t kubernetes.XXXXXXXXXX)"
local test_wrapper="${kubedir}/e2e-test.sh"
mkdir ${kubedir}/cluster
ln -s "$(which kubectl)" ${kubedir}/cluster/kubectl.sh
echo "#!/usr/bin/env bash" > ${test_wrapper}
echo "cd $(pwd) && set -x" >> ${test_wrapper}
echo "${E2E_SCRIPT} ${test_cmd_args}" >> ${test_wrapper}
chmod +x ${test_wrapper}
cd ${kubedir}
# Create cluster and run the tests
create_test_cluster_with_retries "${CLUSTER_CREATION_ARGS[@]}" \
--test-cmd "${test_wrapper}" \
${extra_flags[@]} \
${EXTRA_KUBETEST_FLAGS[@]}
echo "Test subprocess exited with code $?"
# Ignore any errors below, this is a best-effort cleanup and shouldn't affect the test result.
set +o errexit
function_exists cluster_teardown && cluster_teardown
local result=$(get_test_return_code)
echo "Artifacts were written to ${ARTIFACTS}"
echo "Test result code is ${result}"
exit ${result}
}
# Retry backup regions/zones if cluster creations failed due to stockout.
# Parameters: $1..$n - any kubetest flags other than geo flag.
function create_test_cluster_with_retries() {
local cluster_creation_log=/tmp/${E2E_BASE_NAME}-cluster_creation-log
# zone_not_provided is a placeholder for e2e_cluster_zone to make for loop below work
local zone_not_provided="zone_not_provided"
local e2e_cluster_regions=(${E2E_CLUSTER_REGION})
local e2e_cluster_zones=(${E2E_CLUSTER_ZONE})
if [[ -n "${E2E_CLUSTER_BACKUP_ZONES}" ]]; then
e2e_cluster_zones+=(${E2E_CLUSTER_BACKUP_ZONES})
elif [[ -n "${E2E_CLUSTER_BACKUP_REGIONS}" ]]; then
e2e_cluster_regions+=(${E2E_CLUSTER_BACKUP_REGIONS})
e2e_cluster_zones=(${zone_not_provided})
else
echo "No backup region/zone set, cluster creation will fail in case of stockout"
fi
local e2e_cluster_target_version="${E2E_CLUSTER_VERSION}"
for e2e_cluster_region in "${e2e_cluster_regions[@]}"; do
for e2e_cluster_zone in "${e2e_cluster_zones[@]}"; do
E2E_CLUSTER_REGION=${e2e_cluster_region}
E2E_CLUSTER_ZONE=${e2e_cluster_zone}
[[ "${E2E_CLUSTER_ZONE}" == "${zone_not_provided}" ]] && E2E_CLUSTER_ZONE=""
local cluster_creation_zone="${E2E_CLUSTER_REGION}"
[[ -n "${E2E_CLUSTER_ZONE}" ]] && cluster_creation_zone="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}"
resolve_k8s_version ${e2e_cluster_target_version} ${cluster_creation_zone} || return 1
header "Creating test cluster ${E2E_CLUSTER_VERSION} in ${cluster_creation_zone}"
# Don't fail test for kubetest, as it might incorrectly report test failure
# if teardown fails (for details, see success() below)
set +o errexit
export CLUSTER_API_VERSION=${E2E_CLUSTER_VERSION}
run_go_tool k8s.io/test-infra/kubetest \
kubetest "$@" --gcp-region=${cluster_creation_zone} 2>&1 | tee ${cluster_creation_log}
# Exit if test succeeded
[[ "$(get_test_return_code)" == "0" ]] && return 0
# Retry if cluster creation failed because of:
# - stockout (https://github.com/knative/test-infra/issues/592)
# - latest GKE not available in this region/zone yet (https://github.com/knative/test-infra/issues/694)
[[ -z "$(grep -Fo 'does not have enough resources available to fulfill' ${cluster_creation_log})" \
&& -z "$(grep -Fo 'ResponseError: code=400, message=No valid versions with the prefix' ${cluster_creation_log})" \
&& -z "$(grep -Po 'ResponseError: code=400, message=Master version "[0-9a-z\-\.]+" is unsupported' ${cluster_creation_log})" \
&& -z "$(grep -Po 'only \d+ nodes out of \d+ have registered; this is likely due to Nodes failing to start correctly' ${cluster_creation_log})" ]] \
&& return 1
done
done
echo "No more region/zones to try, quitting"
return 1
}
# Setup the test cluster for running the tests.
function setup_test_cluster() {
# Fail fast during setup.
set -o errexit
set -o pipefail
header "Test cluster setup"
kubectl get nodes
header "Setting up test cluster"
# Set the actual project the test cluster resides in
# It will be a project assigned by Boskos if test is running on Prow,
# otherwise will be ${GCP_PROJECT} set up by user.
export E2E_PROJECT_ID="$(gcloud config get-value project)"
readonly E2E_PROJECT_ID
# Save some metadata about cluster creation for using in prow and testgrid
save_metadata
local k8s_user=$(gcloud config get-value core/account)
local k8s_cluster=$(kubectl config current-context)
is_protected_cluster ${k8s_cluster} && \
abort "kubeconfig context set to ${k8s_cluster}, which is forbidden"
# If cluster admin role isn't set, this is a brand new cluster
# Setup the admin role and also KO_DOCKER_REPO if it is a GKE cluster
if [[ -z "$(kubectl get clusterrolebinding cluster-admin-binding 2> /dev/null)" && "${k8s_cluster}" =~ ^gke_.* ]]; then
acquire_cluster_admin_role ${k8s_user} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_REGION} ${E2E_CLUSTER_ZONE}
# Incorporate an element of randomness to ensure that each run properly publishes images.
export KO_DOCKER_REPO=gcr.io/${E2E_PROJECT_ID}/${E2E_BASE_NAME}-e2e-img/${RANDOM}
fi
# Safety checks
is_protected_gcr ${KO_DOCKER_REPO} && \
abort "\$KO_DOCKER_REPO set to ${KO_DOCKER_REPO}, which is forbidden"
# Use default namespace for all subsequent kubectl commands in this context
kubectl config set-context ${k8s_cluster} --namespace=default
echo "- gcloud project is ${E2E_PROJECT_ID}"
echo "- gcloud user is ${k8s_user}"
echo "- Cluster is ${k8s_cluster}"
echo "- Docker is ${KO_DOCKER_REPO}"
export KO_DATA_PATH="${REPO_ROOT_DIR}/.git"
# Do not run teardowns if we explicitly want to skip them.
(( ! SKIP_TEARDOWNS )) && trap teardown_test_resources EXIT
# Handle failures ourselves, so we can dump useful info.
set +o errexit
set +o pipefail
if (( ! SKIP_KNATIVE_SETUP )) && function_exists knative_setup; then
# Wait for Istio installation to complete, if necessary, before calling knative_setup.
(( ! SKIP_ISTIO_ADDON )) && (wait_until_batch_job_complete istio-system || return 1)
knative_setup || fail_test "Knative setup failed"
fi
if function_exists test_setup; then
test_setup || fail_test "test setup failed"
fi
}
# Gets the exit of the test script.
# For more details, see set_test_return_code().
function get_test_return_code() {
echo $(cat ${TEST_RESULT_FILE})
}
# Set the return code that the test script will return.
# Parameters: $1 - return code (0-255)
function set_test_return_code() {
# kubetest teardown might fail and thus incorrectly report failure of the
# script, even if the tests pass.
# We store the real test result to return it later, ignoring any teardown
# failure in kubetest.
# TODO(adrcunha): Get rid of this workaround.
echo -n "$1"> ${TEST_RESULT_FILE}
}
# Signal (as return code and in the logs) that all E2E tests passed.
function success() {
set_test_return_code 0
echo "**************************************"
echo "*** E2E TESTS PASSED ***"
echo "**************************************"
exit 0
}
# Exit test, dumping current state info.
# Parameters: $1 - error message (optional).
function fail_test() {
set_test_return_code 1
[[ -n $1 ]] && echo "ERROR: $1"
dump_cluster_state
exit 1
}
RUN_TESTS=0
SKIP_KNATIVE_SETUP=0
SKIP_ISTIO_ADDON=0
SKIP_TEARDOWNS=0
GCP_PROJECT=""
E2E_SCRIPT=""
E2E_CLUSTER_VERSION=""
GKE_ADDONS=""
EXTRA_CLUSTER_CREATION_FLAGS=()
EXTRA_KUBETEST_FLAGS=()
E2E_SCRIPT_CUSTOM_FLAGS=()
# Parse flags and initialize the test cluster.
function initialize() {
E2E_SCRIPT="$(get_canonical_path $0)"
E2E_CLUSTER_VERSION="${SERVING_GKE_VERSION}"
cd ${REPO_ROOT_DIR}
while [[ $# -ne 0 ]]; do
local parameter=$1
# Try parsing flag as a custom one.
if function_exists parse_flags; then
parse_flags $@
local skip=$?
if [[ ${skip} -ne 0 ]]; then
# Skip parsed flag (and possibly argument) and continue
# Also save it to it's passed through to the test script
for ((i=1;i<=skip;i++)); do
E2E_SCRIPT_CUSTOM_FLAGS+=("$1")
shift
done
continue
fi
fi
# Try parsing flag as a standard one.
case ${parameter} in
--run-tests) RUN_TESTS=1 ;;
--skip-knative-setup) SKIP_KNATIVE_SETUP=1 ;;
--skip-teardowns) SKIP_TEARDOWNS=1 ;;
--skip-istio-addon) SKIP_ISTIO_ADDON=1 ;;
*)
[[ $# -ge 2 ]] || abort "missing parameter after $1"
shift
case ${parameter} in
--gcp-project) GCP_PROJECT=$1 ;;
--cluster-version) E2E_CLUSTER_VERSION=$1 ;;
--cluster-creation-flag) EXTRA_CLUSTER_CREATION_FLAGS+=($1) ;;
--kubetest-flag) EXTRA_KUBETEST_FLAGS+=($1) ;;
*) abort "unknown option ${parameter}" ;;
esac
esac
shift
done
# Use PROJECT_ID if set, unless --gcp-project was used.
if [[ -n "${PROJECT_ID:-}" && -z "${GCP_PROJECT}" ]]; then
echo "\$PROJECT_ID is set to '${PROJECT_ID}', using it to run the tests"
GCP_PROJECT="${PROJECT_ID}"
fi
if (( ! IS_PROW )) && (( ! RUN_TESTS )) && [[ -z "${GCP_PROJECT}" ]]; then
abort "set \$PROJECT_ID or use --gcp-project to select the GCP project where the tests are run"
fi
(( IS_PROW )) && [[ -z "${GCP_PROJECT}" ]] && IS_BOSKOS=1
(( SKIP_ISTIO_ADDON )) || GKE_ADDONS="--addons=Istio"
readonly RUN_TESTS
readonly GCP_PROJECT
readonly IS_BOSKOS
readonly EXTRA_CLUSTER_CREATION_FLAGS
readonly EXTRA_KUBETEST_FLAGS
readonly SKIP_KNATIVE_SETUP
readonly SKIP_TEARDOWNS
readonly GKE_ADDONS
if (( ! RUN_TESTS )); then
create_test_cluster
else
setup_test_cluster
fi
}

747
scripts/test-infra/library.sh Executable file
View File

@ -0,0 +1,747 @@
#!/usr/bin/env bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a collection of useful bash functions and constants, intended
# to be used in test scripts and the like. It doesn't do anything when
# called from command line.
# GCP project where all tests related resources live
readonly KNATIVE_TESTS_PROJECT=knative-tests
# Default GKE version to be used with Knative Serving
readonly SERVING_GKE_VERSION=gke-latest
readonly SERVING_GKE_IMAGE=cos
# Conveniently set GOPATH if unset
if [[ ! -v GOPATH ]]; then
export GOPATH="$(go env GOPATH)"
if [[ -z "${GOPATH}" ]]; then
echo "WARNING: GOPATH not set and go binary unable to provide it"
fi
fi
# Useful environment variables
[[ -v PROW_JOB_ID ]] && IS_PROW=1 || IS_PROW=0
readonly IS_PROW
[[ ! -v REPO_ROOT_DIR ]] && REPO_ROOT_DIR="$(git rev-parse --show-toplevel)"
readonly REPO_ROOT_DIR
readonly REPO_NAME="$(basename ${REPO_ROOT_DIR})"
# Useful flags about the current OS
IS_LINUX=0
IS_OSX=0
IS_WINDOWS=0
case "${OSTYPE}" in
darwin*) IS_OSX=1 ;;
linux*) IS_LINUX=1 ;;
msys*) IS_WINDOWS=1 ;;
*) echo "** Internal error in library.sh, unknown OS '${OSTYPE}'" ; exit 1 ;;
esac
readonly IS_LINUX
readonly IS_OSX
readonly IS_WINDOWS
# Set ARTIFACTS to an empty temp dir if unset
if [[ -z "${ARTIFACTS:-}" ]]; then
export ARTIFACTS="$(mktemp -d)"
fi
# On a Prow job, redirect stderr to stdout so it's synchronously added to log
(( IS_PROW )) && exec 2>&1
# Print error message and exit 1
# Parameters: $1..$n - error message to be displayed
function abort() {
echo "error: $@"
exit 1
}
# Display a box banner.
# Parameters: $1 - character to use for the box.
# $2 - banner message.
function make_banner() {
local msg="$1$1$1$1 $2 $1$1$1$1"
local border="${msg//[-0-9A-Za-z _.,\/()\']/$1}"
echo -e "${border}\n${msg}\n${border}"
# TODO(adrcunha): Remove once logs have timestamps on Prow
# For details, see https://github.com/kubernetes/test-infra/issues/10100
echo -e "$1$1$1$1 $(TZ='America/Los_Angeles' date)\n${border}"
}
# Simple header for logging purposes.
function header() {
local upper="$(echo $1 | tr a-z A-Z)"
make_banner "=" "${upper}"
}
# Simple subheader for logging purposes.
function subheader() {
make_banner "-" "$1"
}
# Simple warning banner for logging purposes.
function warning() {
make_banner "!" "$1"
}
# Checks whether the given function exists.
function function_exists() {
[[ "$(type -t $1)" == "function" ]]
}
# Waits until the given object doesn't exist.
# Parameters: $1 - the kind of the object.
# $2 - object's name.
# $3 - namespace (optional).
function wait_until_object_does_not_exist() {
local KUBECTL_ARGS="get $1 $2"
local DESCRIPTION="$1 $2"
if [[ -n $3 ]]; then
KUBECTL_ARGS="get -n $3 $1 $2"
DESCRIPTION="$1 $3/$2"
fi
echo -n "Waiting until ${DESCRIPTION} does not exist"
for i in {1..150}; do # timeout after 5 minutes
if ! kubectl ${KUBECTL_ARGS} > /dev/null 2>&1; then
echo -e "\n${DESCRIPTION} does not exist"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist"
kubectl ${KUBECTL_ARGS}
return 1
}
# Waits until all pods are running in the given namespace.
# Parameters: $1 - namespace.
function wait_until_pods_running() {
echo -n "Waiting until all pods in namespace $1 are up"
local failed_pod=""
for i in {1..150}; do # timeout after 5 minutes
local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)"
# All pods must be running (ignore ImagePull error to allow the pod to retry)
local not_running_pods=$(echo "${pods}" | grep -v Running | grep -v Completed | grep -v ErrImagePull | grep -v ImagePullBackOff)
if [[ -n "${pods}" ]] && [[ -z "${not_running_pods}" ]]; then
# All Pods are running or completed. Verify the containers on each Pod.
local all_ready=1
while read pod ; do
local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`)
# Set this Pod as the failed_pod. If nothing is wrong with it, then after the checks, set
# failed_pod to the empty string.
failed_pod=$(echo -n "${pod}" | cut -f1 -d' ')
# All containers must be ready
[[ -z ${status[0]} ]] && all_ready=0 && break
[[ -z ${status[1]} ]] && all_ready=0 && break
[[ ${status[0]} -lt 1 ]] && all_ready=0 && break
[[ ${status[1]} -lt 1 ]] && all_ready=0 && break
[[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break
# All the tests passed, this is not a failed pod.
failed_pod=""
done <<< "$(echo "${pods}" | grep -v Completed)"
if (( all_ready )); then
echo -e "\nAll pods are up:\n${pods}"
return 0
fi
elif [[ -n "${not_running_pods}" ]]; then
# At least one Pod is not running, just save the first one's name as the failed_pod.
failed_pod="$(echo "${not_running_pods}" | head -n 1 | cut -f1 -d' ')"
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}"
if [[ -n "${failed_pod}" ]]; then
echo -e "\n\nFailed Pod (data in YAML format) - ${failed_pod}\n"
kubectl -n $1 get pods "${failed_pod}" -oyaml
echo -e "\n\nPod Logs\n"
kubectl -n $1 logs "${failed_pod}" --all-containers
fi
return 1
}
# Waits until all batch jobs complete in the given namespace.
# Parameters: $1 - namespace.
function wait_until_batch_job_complete() {
echo -n "Waiting until all batch jobs in namespace $1 run to completion."
for i in {1..150}; do # timeout after 5 minutes
local jobs=$(kubectl get jobs -n $1 --no-headers \
-ocustom-columns='n:{.metadata.name},c:{.spec.completions},s:{.status.succeeded}')
# All jobs must be complete
local not_complete=$(echo "${jobs}" | awk '{if ($2!=$3) print $0}' | wc -l)
if [[ ${not_complete} -eq 0 ]]; then
echo -e "\nAll jobs are complete:\n${jobs}"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: timeout waiting for jobs to complete\n${jobs}"
return 1
}
# Waits until the given service has an external address (IP/hostname).
# Parameters: $1 - namespace.
# $2 - service name.
function wait_until_service_has_external_ip() {
echo -n "Waiting until service $2 in namespace $1 has an external address (IP/hostname)"
for i in {1..150}; do # timeout after 15 minutes
local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
if [[ -n "${ip}" ]]; then
echo -e "\nService $2.$1 has IP $ip"
return 0
fi
local hostname=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
if [[ -n "${hostname}" ]]; then
echo -e "\nService $2.$1 has hostname $hostname"
return 0
fi
echo -n "."
sleep 6
done
echo -e "\n\nERROR: timeout waiting for service $2.$1 to have an external address"
kubectl get pods -n $1
return 1
}
# Waits until the given service has an external address (IP/hostname) that allow HTTP connections.
# Parameters: $1 - namespace.
# $2 - service name.
function wait_until_service_has_external_http_address() {
local ns=$1
local svc=$2
local sleep_seconds=6
local attempts=150
echo -n "Waiting until service $ns/$svc has an external address (IP/hostname)"
for attempt in $(seq 1 $attempts); do # timeout after 15 minutes
local address=$(kubectl get svc $svc -n $ns -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
if [[ -n "${address}" ]]; then
echo -e "Service $ns/$svc has IP $address"
else
address=$(kubectl get svc $svc -n $ns -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
if [[ -n "${address}" ]]; then
echo -e "Service $ns/$svc has hostname $address"
fi
fi
if [[ -n "${address}" ]]; then
local status=$(curl -s -o /dev/null -w "%{http_code}" http://"${address}")
if [[ $status != "" && $status != "000" ]]; then
echo -e "$address is ready: prober observed HTTP $status"
return 0
else
echo -e "$address is not ready: prober observed HTTP $status"
fi
fi
echo -n "."
sleep $sleep_seconds
done
echo -e "\n\nERROR: timeout waiting for service $ns/$svc to have an external HTTP address"
return 1
}
# Waits for the endpoint to be routable.
# Parameters: $1 - External ingress IP address.
# $2 - cluster hostname.
function wait_until_routable() {
echo -n "Waiting until cluster $2 at $1 has a routable endpoint"
for i in {1..150}; do # timeout after 5 minutes
local val=$(curl -H "Host: $2" "http://$1" 2>/dev/null)
if [[ -n "$val" ]]; then
echo -e "\nEndpoint is now routable"
return 0
fi
echo -n "."
sleep 2
done
echo -e "\n\nERROR: Timed out waiting for endpoint to be routable"
return 1
}
# Returns the name of the first pod of the given app.
# Parameters: $1 - app name.
# $2 - namespace (optional).
function get_app_pod() {
local pods=($(get_app_pods $1 $2))
echo "${pods[0]}"
}
# Returns the name of all pods of the given app.
# Parameters: $1 - app name.
# $2 - namespace (optional).
function get_app_pods() {
local namespace=""
[[ -n $2 ]] && namespace="-n $2"
kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[*].metadata.name}"
}
# Capitalize the first letter of each word.
# Parameters: $1..$n - words to capitalize.
function capitalize() {
local capitalized=()
for word in $@; do
local initial="$(echo ${word:0:1}| tr 'a-z' 'A-Z')"
capitalized+=("${initial}${word:1}")
done
echo "${capitalized[@]}"
}
# Dumps pod logs for the given app.
# Parameters: $1 - app name.
# $2 - namespace.
function dump_app_logs() {
echo ">>> ${REPO_NAME_FORMATTED} $1 logs:"
for pod in $(get_app_pods "$1" "$2")
do
echo ">>> Pod: $pod"
kubectl -n "$2" logs "$pod" --all-containers
done
}
# Sets the given user as cluster admin.
# Parameters: $1 - user
# $2 - cluster name
# $3 - cluster region
# $4 - cluster zone, optional
function acquire_cluster_admin_role() {
echo "Acquiring cluster-admin role for user '$1'"
local geoflag="--region=$3"
[[ -n $4 ]] && geoflag="--zone=$3-$4"
# Get the password of the admin and use it, as the service account (or the user)
# might not have the necessary permission.
local password=$(gcloud --format="value(masterAuth.password)" \
container clusters describe $2 ${geoflag})
if [[ -n "${password}" ]]; then
# Cluster created with basic authentication
kubectl config set-credentials cluster-admin \
--username=admin --password=${password}
else
local cert=$(mktemp)
local key=$(mktemp)
echo "Certificate in ${cert}, key in ${key}"
gcloud --format="value(masterAuth.clientCertificate)" \
container clusters describe $2 ${geoflag} | base64 --decode > ${cert}
gcloud --format="value(masterAuth.clientKey)" \
container clusters describe $2 ${geoflag} | base64 --decode > ${key}
kubectl config set-credentials cluster-admin \
--client-certificate=${cert} --client-key=${key}
fi
kubectl config set-context $(kubectl config current-context) \
--user=cluster-admin
kubectl create clusterrolebinding cluster-admin-binding \
--clusterrole=cluster-admin \
--user=$1
# Reset back to the default account
gcloud container clusters get-credentials \
$2 ${geoflag} --project $(gcloud config get-value project)
}
# Run a command through tee and capture its output.
# Parameters: $1 - file where the output will be stored.
# $2... - command to run.
function capture_output() {
local report="$1"
shift
"$@" 2>&1 | tee "${report}"
local failed=( ${PIPESTATUS[@]} )
[[ ${failed[0]} -eq 0 ]] && failed=${failed[1]} || failed=${failed[0]}
return ${failed}
}
# Print failed step, which could be highlighted by spyglass.
# Parameters: $1...n - description of step that failed
function step_failed() {
local spyglass_token="Step failed:"
echo "${spyglass_token} $@"
}
# Create a temporary file with the given extension in a way that works on both Linux and macOS.
# Parameters: $1 - file name without extension (e.g. 'myfile_XXXX')
# $2 - file extension (e.g. 'xml')
function mktemp_with_extension() {
local nameprefix
local fullname
nameprefix="$(mktemp $1)"
fullname="${nameprefix}.$2"
mv ${nameprefix} ${fullname}
echo ${fullname}
}
# Create a JUnit XML for a test.
# Parameters: $1 - check class name as an identifier (e.g. BuildTests)
# $2 - check name as an identifier (e.g., GoBuild)
# $3 - failure message (can contain newlines), optional (means success)
function create_junit_xml() {
local xml="$(mktemp_with_extension ${ARTIFACTS}/junit_XXXXXXXX xml)"
local failure=""
if [[ "$3" != "" ]]; then
# Transform newlines into HTML code.
# Also escape `<` and `>` as here: https://github.com/golang/go/blob/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/src/encoding/json/encode.go#L48,
# this is temporary solution for fixing https://github.com/knative/test-infra/issues/1204,
# which should be obsolete once Test-infra 2.0 is in place
local msg="$(echo -n "$3" | sed 's/$/\&#xA;/g' | sed 's/</\\u003c/' | sed 's/>/\\u003e/' | sed 's/&/\\u0026/' | tr -d '\n')"
failure="<failure message=\"Failed\" type=\"\">${msg}</failure>"
fi
cat << EOF > "${xml}"
<testsuites>
<testsuite tests="1" failures="1" time="0.000" name="$1">
<testcase classname="" name="$2" time="0.0">
${failure}
</testcase>
</testsuite>
</testsuites>
EOF
}
# Runs a go test and generate a junit summary.
# Parameters: $1... - parameters to go test
function report_go_test() {
# Run tests in verbose mode to capture details.
# go doesn't like repeating -v, so remove if passed.
local args=" $@ "
local go_test="go test -v ${args/ -v / }"
# Just run regular go tests if not on Prow.
echo "Running tests with '${go_test}'"
local report="$(mktemp)"
capture_output "${report}" ${go_test}
local failed=$?
echo "Finished run, return code is ${failed}"
# Install go-junit-report if necessary.
run_go_tool github.com/jstemmer/go-junit-report go-junit-report --help > /dev/null 2>&1
local xml="$(mktemp_with_extension ${ARTIFACTS}/junit_XXXXXXXX xml)"
cat ${report} \
| go-junit-report \
| sed -e "s#\"\(github\.com/knative\|knative\.dev\)/${REPO_NAME}/#\"#g" \
> ${xml}
echo "XML report written to ${xml}"
if [[ -n "$(grep '<testsuites></testsuites>' ${xml})" ]]; then
# XML report is empty, something's wrong; use the output as failure reason
create_junit_xml _go_tests "GoTests" "$(cat ${report})"
fi
# Capture and report any race condition errors
local race_errors="$(sed -n '/^WARNING: DATA RACE$/,/^==================$/p' ${report})"
create_junit_xml _go_tests "DataRaceAnalysis" "${race_errors}"
if (( ! IS_PROW )); then
# Keep the suffix, so files are related.
local logfile=${xml/junit_/go_test_}
logfile=${logfile/.xml/.log}
cp ${report} ${logfile}
echo "Test log written to ${logfile}"
fi
return ${failed}
}
# Install Knative Serving in the current cluster.
# Parameters: $1 - Knative Serving manifest.
function start_knative_serving() {
header "Starting Knative Serving"
subheader "Installing Knative Serving"
echo "Installing Serving CRDs from $1"
kubectl apply --selector knative.dev/crd-install=true -f "$1"
echo "Installing the rest of serving components from $1"
kubectl apply -f "$1"
wait_until_pods_running knative-serving || return 1
}
# Install Knative Monitoring in the current cluster.
# Parameters: $1 - Knative Monitoring manifest.
function start_knative_monitoring() {
header "Starting Knative Monitoring"
subheader "Installing Knative Monitoring"
# namespace istio-system needs to be created first, due to the comment
# mentioned in
# https://github.com/knative/serving/blob/4202efc0dc12052edc0630515b101cbf8068a609/config/monitoring/tracing/zipkin/100-zipkin.yaml#L21
kubectl create namespace istio-system 2>/dev/null
echo "Installing Monitoring from $1"
kubectl apply -f "$1" || return 1
wait_until_pods_running knative-monitoring || return 1
wait_until_pods_running istio-system || return 1
}
# Install the stable release Knative/serving in the current cluster.
# Parameters: $1 - Knative Serving version number, e.g. 0.6.0.
function start_release_knative_serving() {
start_knative_serving "https://storage.googleapis.com/knative-releases/serving/previous/v$1/serving.yaml"
}
# Install the latest stable Knative Serving in the current cluster.
function start_latest_knative_serving() {
start_knative_serving "${KNATIVE_SERVING_RELEASE}"
}
# Install Knative Eventing in the current cluster.
# Parameters: $1 - Knative Eventing manifest.
function start_knative_eventing() {
header "Starting Knative Eventing"
subheader "Installing Knative Eventing"
echo "Installing Eventing CRDs from $1"
kubectl apply --selector knative.dev/crd-install=true -f "$1"
echo "Installing the rest of eventing components from $1"
kubectl apply -f "$1"
wait_until_pods_running knative-eventing || return 1
}
# Install the stable release Knative/eventing in the current cluster.
# Parameters: $1 - Knative Eventing version number, e.g. 0.6.0.
function start_release_knative_eventing() {
start_knative_eventing "https://storage.googleapis.com/knative-releases/eventing/previous/v$1/eventing.yaml"
}
# Install the latest stable Knative Eventing in the current cluster.
function start_latest_knative_eventing() {
start_knative_eventing "${KNATIVE_EVENTING_RELEASE}"
}
# Run a go tool, installing it first if necessary.
# Parameters: $1 - tool package/dir for go get/install.
# $2 - tool to run.
# $3..$n - parameters passed to the tool.
function run_go_tool() {
local tool=$2
local install_failed=0
if [[ -z "$(which ${tool})" ]]; then
local action=get
[[ $1 =~ ^[\./].* ]] && action=install
# Avoid running `go get` from root dir of the repository, as it can change go.sum and go.mod files.
# See discussions in https://github.com/golang/go/issues/27643.
if [[ ${action} == "get" && $(pwd) == "${REPO_ROOT_DIR}" ]]; then
local temp_dir="$(mktemp -d)"
# Swallow the output as we are returning the stdout in the end.
pushd "${temp_dir}" > /dev/null 2>&1
GOFLAGS="" go ${action} "$1" || install_failed=1
popd > /dev/null 2>&1
else
GOFLAGS="" go ${action} "$1" || install_failed=1
fi
fi
(( install_failed )) && return ${install_failed}
shift 2
${tool} "$@"
}
# Run go-licenses to update licenses.
# Parameters: $1 - output file, relative to repo root dir.
# $2 - directory to inspect.
function update_licenses() {
cd "${REPO_ROOT_DIR}" || return 1
local dst=$1
local dir=$2
shift
run_go_tool github.com/google/go-licenses go-licenses save "${dir}" --save_path="${dst}" --force || \
{ echo "--- FAIL: go-licenses failed to update licenses"; return 1; }
# Hack to make sure directories retain write permissions after save. This
# can happen if the directory being copied is a Go module.
# See https://github.com/google/go-licenses/issues/11
chmod -R +w "${dst}"
}
# Run go-licenses to check for forbidden licenses.
function check_licenses() {
# Check that we don't have any forbidden licenses.
run_go_tool github.com/google/go-licenses go-licenses check "${REPO_ROOT_DIR}/..." || \
{ echo "--- FAIL: go-licenses failed the license check"; return 1; }
}
# Run the given linter on the given files, checking it exists first.
# Parameters: $1 - tool
# $2 - tool purpose (for error message if tool not installed)
# $3 - tool parameters (quote if multiple parameters used)
# $4..$n - files to run linter on
function run_lint_tool() {
local checker=$1
local params=$3
if ! hash ${checker} 2>/dev/null; then
warning "${checker} not installed, not $2"
return 127
fi
shift 3
local failed=0
for file in $@; do
${checker} ${params} ${file} || failed=1
done
return ${failed}
}
# Check links in the given markdown files.
# Parameters: $1...$n - files to inspect
function check_links_in_markdown() {
# https://github.com/raviqqe/liche
local config="${REPO_ROOT_DIR}/test/markdown-link-check-config.rc"
[[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-link-check-config.rc"
local options="$(grep '^-' ${config} | tr \"\n\" ' ')"
run_lint_tool liche "checking links in markdown files" "-d ${REPO_ROOT_DIR} ${options}" $@
}
# Check format of the given markdown files.
# Parameters: $1..$n - files to inspect
function lint_markdown() {
# https://github.com/markdownlint/markdownlint
local config="${REPO_ROOT_DIR}/test/markdown-lint-config.rc"
[[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-lint-config.rc"
run_lint_tool mdl "linting markdown files" "-c ${config}" $@
}
# Return whether the given parameter is an integer.
# Parameters: $1 - integer to check
function is_int() {
[[ -n $1 && $1 =~ ^[0-9]+$ ]]
}
# Return whether the given parameter is the knative release/nightly GCF.
# Parameters: $1 - full GCR name, e.g. gcr.io/knative-foo-bar
function is_protected_gcr() {
[[ -n $1 && $1 =~ ^gcr.io/knative-(releases|nightly)/?$ ]]
}
# Return whether the given parameter is any cluster under ${KNATIVE_TESTS_PROJECT}.
# Parameters: $1 - Kubernetes cluster context (output of kubectl config current-context)
function is_protected_cluster() {
# Example: gke_knative-tests_us-central1-f_prow
[[ -n $1 && $1 =~ ^gke_${KNATIVE_TESTS_PROJECT}_us\-[a-zA-Z0-9]+\-[a-z]+_[a-z0-9\-]+$ ]]
}
# Return whether the given parameter is ${KNATIVE_TESTS_PROJECT}.
# Parameters: $1 - project name
function is_protected_project() {
[[ -n $1 && "$1" == "${KNATIVE_TESTS_PROJECT}" ]]
}
# Remove symlinks in a path that are broken or lead outside the repo.
# Parameters: $1 - path name, e.g. vendor
function remove_broken_symlinks() {
for link in $(find $1 -type l); do
# Remove broken symlinks
if [[ ! -e ${link} ]]; then
unlink ${link}
continue
fi
# Get canonical path to target, remove if outside the repo
local target="$(ls -l ${link})"
target="${target##* -> }"
[[ ${target} == /* ]] || target="./${target}"
target="$(cd `dirname ${link}` && cd ${target%/*} && echo $PWD/${target##*/})"
if [[ ${target} != *github.com/knative/* && ${target} != *knative.dev/* ]]; then
unlink ${link}
continue
fi
done
}
# Returns the canonical path of a filesystem object.
# Parameters: $1 - path to return in canonical form
# $2 - base dir for relative links; optional, defaults to current
function get_canonical_path() {
# We don't use readlink because it's not available on every platform.
local path=$1
local pwd=${2:-.}
[[ ${path} == /* ]] || path="${pwd}/${path}"
echo "$(cd ${path%/*} && echo $PWD/${path##*/})"
}
# List changed files in the current PR.
# This is implemented as a function so it can be mocked in unit tests.
# It will fail if a file name ever contained a newline character (which is bad practice anyway)
function list_changed_files() {
if [[ -v PULL_BASE_SHA ]] && [[ -v PULL_PULL_SHA ]]; then
# Avoid warning when there are more than 1085 files renamed:
# https://stackoverflow.com/questions/7830728/warning-on-diff-renamelimit-variable-when-doing-git-push
git config diff.renames 0
git --no-pager diff --name-only ${PULL_BASE_SHA}..${PULL_PULL_SHA}
else
# Do our best if not running in Prow
git diff --name-only HEAD^
fi
}
# Returns the current branch.
function current_branch() {
local branch_name=""
# Get the branch name from Prow's env var, see https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md.
# Otherwise, try getting the current branch from git.
(( IS_PROW )) && branch_name="${PULL_BASE_REF:-}"
[[ -z "${branch_name}" ]] && branch_name="$(git rev-parse --abbrev-ref HEAD)"
echo "${branch_name}"
}
# Returns whether the current branch is a release branch.
function is_release_branch() {
[[ $(current_branch) =~ ^release-[0-9\.]+$ ]]
}
# Returns the URL to the latest manifest for the given Knative project.
# Parameters: $1 - repository name of the given project
# $2 - name of the yaml file, without extension
function get_latest_knative_yaml_source() {
local repo_name="$1"
local yaml_name="$2"
# If it's a release branch, the yaml source URL should point to a specific version.
if is_release_branch; then
# Extract the release major&minor version from the branch name.
local branch_name="$(current_branch)"
local major_minor="${branch_name##release-}"
# Find the latest release manifest with the same major&minor version.
local yaml_source_path="$(
gsutil ls gs://knative-releases/${repo_name}/previous/v${major_minor}.*/${yaml_name}.yaml 2> /dev/null \
| sort \
| tail -n 1 \
| cut -b6-)"
# The version does exist, return it.
if [[ -n "${yaml_source_path}" ]]; then
echo "https://storage.googleapis.com/${yaml_source_path}"
return
fi
# Otherwise, fall back to nightly.
fi
echo "https://storage.googleapis.com/knative-nightly/${repo_name}/latest/${yaml_name}.yaml"
}
function shellcheck_new_files() {
declare -a array_of_files
local failed=0
readarray -t -d '\n' array_of_files < <(list_changed_files)
for filename in "${array_of_files[@]}"; do
if echo "${filename}" | grep -q "^vendor/"; then
continue
fi
if file "${filename}" | grep -q "shell script"; then
# SC1090 is "Can't follow non-constant source"; we will scan files individually
if shellcheck -e SC1090 "${filename}"; then
echo "--- PASS: shellcheck on ${filename}"
else
echo "--- FAIL: shellcheck on ${filename}"
failed=1
fi
fi
done
if [[ ${failed} -eq 1 ]]; then
fail_script "shellcheck failures"
fi
}
# Initializations that depend on previous functions.
# These MUST come last.
readonly _TEST_INFRA_SCRIPTS_DIR="$(dirname $(get_canonical_path ${BASH_SOURCE[0]}))"
readonly REPO_NAME_FORMATTED="Knative $(capitalize ${REPO_NAME//-/ })"
# Public latest nightly or release yaml files.
readonly KNATIVE_SERVING_RELEASE="$(get_latest_knative_yaml_source "serving" "serving")"
readonly KNATIVE_EVENTING_RELEASE="$(get_latest_knative_yaml_source "eventing" "eventing")"
readonly KNATIVE_MONITORING_RELEASE="$(get_latest_knative_yaml_source "serving" "monitoring")"

View File

@ -0,0 +1,5 @@
# For help, see
# https://github.com/raviqqe/liche/blob/master/README.md
# Don't check localhost links and don't check templated links
-x "(^https?://localhost($|[:/].*))|(^https://.*{{.*$)"

View File

@ -0,0 +1,5 @@
# For help, see
# https://github.com/markdownlint/markdownlint/blob/master/docs/configuration.md
# Ignore long lines
rules "~MD013"

View File

@ -0,0 +1,156 @@
#!/bin/bash
# Copyright 2019 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative performance test scripts.
# See README.md for instructions on how to use it.
source $(dirname ${BASH_SOURCE})/library.sh
# Configurable parameters.
# If not provided, they will fall back to the default values.
readonly BENCHMARK_ROOT_PATH=${BENCHMARK_ROOT_PATH:-test/performance/benchmarks}
readonly PROJECT_NAME=${PROJECT_NAME:-knative-performance}
readonly SERVICE_ACCOUNT_NAME=${SERVICE_ACCOUNT_NAME:-mako-job@knative-performance.iam.gserviceaccount.com}
# Setup env vars.
export KO_DOCKER_REPO="gcr.io/${PROJECT_NAME}"
# Constants
readonly GOOGLE_APPLICATION_CREDENTIALS="/etc/performance-test/service-account.json"
readonly GITHUB_TOKEN="/etc/performance-test/github-token"
readonly SLACK_READ_TOKEN="/etc/performance-test/slack-read-token"
readonly SLACK_WRITE_TOKEN="/etc/performance-test/slack-write-token"
# Set up the user for cluster operations.
function setup_user() {
echo ">> Setting up user"
echo "Using gcloud user ${SERVICE_ACCOUNT_NAME}"
gcloud config set core/account ${SERVICE_ACCOUNT_NAME}
echo "Using gcloud project ${PROJECT_NAME}"
gcloud config set core/project ${PROJECT_NAME}
}
# Update resources installed on the cluster.
# Parameters: $1 - cluster name
# $2 - cluster region/zone
function update_cluster() {
# --zone option can work with both region and zone, (e.g. us-central1 and
# us-central1-a), so we don't need to add extra check here.
gcloud container clusters get-credentials $1 --zone=$2 --project=${PROJECT_NAME} || abort "failed to get cluster creds"
# Set up the configmap to run benchmarks in production
echo ">> Setting up 'prod' config-mako on cluster $1 in zone $2"
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: config-mako
data:
# This should only be used by our performance automation.
environment: prod
EOF
# Create secrets required for running benchmarks on the cluster
echo ">> Creating secrets on cluster $1 in zone $2"
kubectl create secret generic mako-secrets \
--from-file=robot.json=${GOOGLE_APPLICATION_CREDENTIALS} \
--from-file=github-token=${GITHUB_TOKEN} \
--from-file=slack-read-token=${SLACK_READ_TOKEN} \
--from-file=slack-write-token=${SLACK_WRITE_TOKEN}
# Delete all benchmark jobs to avoid noise in the update process
echo ">> Deleting all cronjobs and jobs on cluster $1 in zone $2"
kubectl delete cronjob --all
kubectl delete job --all
if function_exists update_knative; then
update_knative || abort "failed to update knative"
fi
# get benchmark name from the cluster name
local benchmark_name=$(get_benchmark_name $1)
if function_exists update_benchmark; then
update_benchmark ${benchmark_name} || abort "failed to update benchmark"
fi
}
# Get benchmark name from the cluster name.
# Parameters: $1 - cluster name
function get_benchmark_name() {
# get benchmark_name by removing the prefix from cluster name, e.g. get "load-test" from "serving--load-test"
echo ${1#$REPO_NAME"--"}
}
# Update the clusters related to the current repo.
function update_clusters() {
header "Updating all clusters for ${REPO_NAME}"
local all_clusters=$(gcloud container clusters list --project="${PROJECT_NAME}" --format="csv[no-heading](name,zone)")
echo ">> Project contains clusters:" ${all_clusters}
for cluster in ${all_clusters}; do
local name=$(echo "${cluster}" | cut -f1 -d",")
# the cluster name is prefixed with "${REPO_NAME}--", here we should only handle clusters belonged to the current repo
[[ ! ${name} =~ ^${REPO_NAME}-- ]] && continue
local zone=$(echo "${cluster}" | cut -f2 -d",")
# Update all resources installed on the cluster
update_cluster ${name} ${zone}
done
header "Done updating all clusters"
}
# Run the perf-tests tool
# Parameters: $1..$n - parameters passed to the tool
function run_perf_cluster_tool() {
go run ${REPO_ROOT_DIR}/vendor/knative.dev/pkg/testutils/clustermanager/perf-tests $@
}
# Delete the old clusters belonged to the current repo, and recreate them with the same configuration.
function recreate_clusters() {
header "Recreating clusters for ${REPO_NAME}"
run_perf_cluster_tool --recreate \
--gcp-project=${PROJECT_NAME} --repository=${REPO_NAME} --benchmark-root=${BENCHMARK_ROOT_PATH} \
|| abort "failed recreating clusters for ${REPO_NAME}"
header "Done recreating clusters"
# Update all clusters after they are recreated
update_clusters
}
# Try to reconcile clusters for benchmarks in the current repo.
# This function will be run as postsubmit jobs.
function reconcile_benchmark_clusters() {
header "Reconciling clusters for ${REPO_NAME}"
run_perf_cluster_tool --reconcile \
--gcp-project=${PROJECT_NAME} --repository=${REPO_NAME} --benchmark-root=${BENCHMARK_ROOT_PATH} \
|| abort "failed reconciling clusters for ${REPO_NAME}"
header "Done reconciling clusters"
# For now, do nothing after reconciling the clusters, and the next update_clusters job will automatically
# update them. So there will be a period that the newly created clusters are being idle, and the duration
# can be as long as <update_clusters interval>.
}
# Parse flags and excute the command.
function main() {
if (( ! IS_PROW )); then
abort "this script should only be run by Prow since it needs secrets created on Prow cluster"
fi
# Set up the user credential for cluster operations
setup_user || abort "failed to set up user"
# Try parsing the first flag as a command.
case $1 in
--recreate-clusters) recreate_clusters ;;
--update-clusters) update_clusters ;;
--reconcile-benchmark-clusters) reconcile_benchmark_clusters ;;
*) abort "unknown command $1, must be --recreate-clusters, --update-clusters or --reconcile_benchmark_clusters"
esac
shift
}

View File

@ -0,0 +1,391 @@
#!/usr/bin/env bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative presubmit test scripts.
# See README.md for instructions on how to use it.
source $(dirname ${BASH_SOURCE})/library.sh
# Custom configuration of presubmit tests
readonly DISABLE_MD_LINTING=${DISABLE_MD_LINTING:-0}
readonly DISABLE_MD_LINK_CHECK=${DISABLE_MD_LINK_CHECK:-0}
readonly PRESUBMIT_TEST_FAIL_FAST=${PRESUBMIT_TEST_FAIL_FAST:-0}
# Extensions or file patterns that don't require presubmit tests.
readonly NO_PRESUBMIT_FILES=(\.png \.gitignore \.gitattributes ^OWNERS ^OWNERS_ALIASES ^AUTHORS)
# Flag if this is a presubmit run or not.
(( IS_PROW )) && [[ -n "${PULL_PULL_SHA}" ]] && IS_PRESUBMIT=1 || IS_PRESUBMIT=0
readonly IS_PRESUBMIT
# List of changed files on presubmit, LF separated.
CHANGED_FILES=""
# Flags that this PR is exempt of presubmit tests.
IS_PRESUBMIT_EXEMPT_PR=0
# Flags that this PR contains only changes to documentation.
IS_DOCUMENTATION_PR=0
# Returns true if PR only contains the given file regexes.
# Parameters: $1 - file regexes, space separated.
function pr_only_contains() {
[[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]]
}
# Initialize flags and context for presubmit tests:
# CHANGED_FILES, IS_PRESUBMIT_EXEMPT_PR and IS_DOCUMENTATION_PR.
function initialize_environment() {
CHANGED_FILES=""
IS_PRESUBMIT_EXEMPT_PR=0
IS_DOCUMENTATION_PR=0
(( ! IS_PRESUBMIT )) && return
CHANGED_FILES="$(list_changed_files)"
if [[ -n "${CHANGED_FILES}" ]]; then
echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${CHANGED_FILES}"
local no_presubmit_files="${NO_PRESUBMIT_FILES[*]}"
pr_only_contains "${no_presubmit_files}" && IS_PRESUBMIT_EXEMPT_PR=1
# A documentation PR must contain markdown files
if pr_only_contains "\.md ${no_presubmit_files}"; then
[[ -n "$(echo "${CHANGED_FILES}" | grep '\.md')" ]] && IS_DOCUMENTATION_PR=1
fi
else
header "NO CHANGED FILES REPORTED, ASSUMING IT'S AN ERROR AND RUNNING TESTS ANYWAY"
fi
readonly CHANGED_FILES
readonly IS_DOCUMENTATION_PR
readonly IS_PRESUBMIT_EXEMPT_PR
}
# Display a pass/fail banner for a test group.
# Parameters: $1 - test group name (e.g., build)
# $2 - result (0=passed, 1=failed)
function results_banner() {
local result
[[ $2 -eq 0 ]] && result="PASSED" || result="FAILED"
header "$1 tests ${result}"
}
# Run build tests. If there's no `build_tests` function, run the default
# build test runner.
function run_build_tests() {
(( ! RUN_BUILD_TESTS )) && return 0
header "Running build tests"
local failed=0
# Run pre-build tests, if any
if function_exists pre_build_tests; then
pre_build_tests || { failed=1; step_failed "pre_build_tests"; }
fi
# Don't run build tests if pre-build tests failed
if (( ! failed )); then
if function_exists build_tests; then
build_tests || { failed=1; step_failed "build_tests"; }
else
default_build_test_runner || { failed=1; step_failed "default_build_test_runner"; }
fi
fi
# Don't run post-build tests if pre/build tests failed
if (( ! failed )) && function_exists post_build_tests; then
post_build_tests || { failed=1; step_failed "post_build_tests"; }
fi
results_banner "Build" ${failed}
return ${failed}
}
# Run a build test and report its output as the failure if it fails.
# Parameters: $1 - report name.
# $2... - command (test) to run.
function report_build_test() {
local report="$(mktemp)"
local report_name="$1"
shift
local errors=""
capture_output "${report}" "$@" || errors="$(cat ${report})"
create_junit_xml _build_tests "${report_name}" "${errors}"
[[ -z "${errors}" ]]
}
# Perform markdown build tests if necessary, unless disabled.
function markdown_build_tests() {
(( DISABLE_MD_LINTING && DISABLE_MD_LINK_CHECK )) && return 0
# Get changed markdown files (ignore /vendor, github templates, and deleted files)
local mdfiles=""
for file in $(echo "${CHANGED_FILES}" | grep \\.md$ | grep -v ^vendor/ | grep -v ^.github/); do
[[ -f "${file}" ]] && mdfiles="${mdfiles} ${file}"
done
[[ -z "${mdfiles}" ]] && return 0
local failed=0
if (( ! DISABLE_MD_LINTING )); then
subheader "Linting the markdown files"
report_build_test Markdown_Lint lint_markdown ${mdfiles} || failed=1
fi
if (( ! DISABLE_MD_LINK_CHECK )); then
subheader "Checking links in the markdown files"
report_build_test Markdown_Link check_links_in_markdown ${mdfiles} || failed=1
fi
return ${failed}
}
# Default build test runner that:
# * check markdown files
# * run `/hack/verify-codegen.sh` (if it exists)
# * `go build` on the entire repo
# * check licenses in all go packages
function default_build_test_runner() {
local failed=0
# Perform markdown build checks
markdown_build_tests || failed=1
# Run verify-codegen check
if [[ -f ./hack/verify-codegen.sh ]]; then
subheader "Checking autogenerated code is up-to-date"
report_build_test Verify_CodeGen ./hack/verify-codegen.sh || failed=1
fi
# For documentation PRs, just check the md files and run
# verify-codegen (as md files can be auto-generated in some repos).
(( IS_DOCUMENTATION_PR )) && return ${failed}
# Don't merge these two lines, or return code will always be 0.
local go_pkg_dirs
go_pkg_dirs="$(go list ./...)" || return 1
# Skip build test if there is no go code
[[ -z "${go_pkg_dirs}" ]] && return ${failed}
# Ensure all the code builds
subheader "Checking that go code builds"
local report="$(mktemp)"
local errors_go1=""
local errors_go2=""
if ! capture_output "${report}" go build -v ./... ; then
failed=1
# Consider an error message everything that's not a package name.
errors_go1="$(grep -v '^\(github\.com\|knative\.dev\)/' "${report}" | sort | uniq)"
fi
# Get all build tags in go code (ignore /vendor, /hack and /third_party)
local tags="$(grep -r '// +build' . \
| grep -v '^./vendor/' | grep -v '^./hack/' | grep -v '^./third_party' \
| cut -f3 -d' ' | sort | uniq | tr '\n' ' ')"
local tagged_pkgs="$(grep -r '// +build' . \
| grep -v '^./vendor/' | grep -v '^./hack/' | grep -v '^./third_party' \
| grep ":// +build " | cut -f1 -d: | xargs dirname \
| sort | uniq | tr '\n' ' ')"
for pkg in ${tagged_pkgs}; do
# `go test -c` lets us compile the tests but do not run them.
if ! capture_output "${report}" go test -c -tags="${tags}" ${pkg} ; then
failed=1
# Consider an error message everything that's not a successful test result.
errors_go2+="$(grep -v '^\(ok\|\?\)\s\+\(github\.com\|knative\.dev\)/' "${report}")"
fi
# Remove unused generated binary, if any.
rm -f e2e.test
done
local errors_go="$(echo -e "${errors_go1}\n${errors_go2}" | uniq)"
create_junit_xml _build_tests Build_Go "${errors_go}"
# Check that we don't have any forbidden licenses in our images.
subheader "Checking for forbidden licenses"
report_build_test Check_Licenses check_licenses || failed=1
return ${failed}
}
# Run unit tests. If there's no `unit_tests` function, run the default
# unit test runner.
function run_unit_tests() {
(( ! RUN_UNIT_TESTS )) && return 0
if (( IS_DOCUMENTATION_PR )); then
header "Documentation only PR, skipping unit tests"
return 0
fi
header "Running unit tests"
local failed=0
# Run pre-unit tests, if any
if function_exists pre_unit_tests; then
pre_unit_tests || { failed=1; step_failed "pre_unit_tests"; }
fi
# Don't run unit tests if pre-unit tests failed
if (( ! failed )); then
if function_exists unit_tests; then
unit_tests || { failed=1; step_failed "unit_tests"; }
else
default_unit_test_runner || { failed=1; step_failed "default_unit_test_runner"; }
fi
fi
# Don't run post-unit tests if pre/unit tests failed
if (( ! failed )) && function_exists post_unit_tests; then
post_unit_tests || { failed=1; step_failed "post_unit_tests"; }
fi
results_banner "Unit" ${failed}
return ${failed}
}
# Default unit test runner that runs all go tests in the repo.
function default_unit_test_runner() {
report_go_test -race ./...
}
# Run integration tests. If there's no `integration_tests` function, run the
# default integration test runner.
function run_integration_tests() {
# Don't run integration tests if not requested OR on documentation PRs
(( ! RUN_INTEGRATION_TESTS )) && return 0
if (( IS_DOCUMENTATION_PR )); then
header "Documentation only PR, skipping integration tests"
return 0
fi
header "Running integration tests"
local failed=0
# Run pre-integration tests, if any
if function_exists pre_integration_tests; then
pre_integration_tests || { failed=1; step_failed "pre_integration_tests"; }
fi
# Don't run integration tests if pre-integration tests failed
if (( ! failed )); then
if function_exists integration_tests; then
integration_tests || { failed=1; step_failed "integration_tests"; }
else
default_integration_test_runner || { failed=1; step_failed "default_integration_test_runner"; }
fi
fi
# Don't run integration tests if pre/integration tests failed
if (( ! failed )) && function_exists post_integration_tests; then
post_integration_tests || { failed=1; step_failed "post_integration_tests"; }
fi
results_banner "Integration" ${failed}
return ${failed}
}
# Default integration test runner that runs all `test/e2e-*tests.sh`.
function default_integration_test_runner() {
local options=""
local failed=0
for e2e_test in $(find test/ -name e2e-*tests.sh); do
echo "Running integration test ${e2e_test}"
if ! ${e2e_test} ${options}; then
failed=1
step_failed "${e2e_test} ${options}"
fi
done
return ${failed}
}
# Options set by command-line flags.
RUN_BUILD_TESTS=0
RUN_UNIT_TESTS=0
RUN_INTEGRATION_TESTS=0
# Process flags and run tests accordingly.
function main() {
initialize_environment
if (( IS_PRESUBMIT_EXEMPT_PR )) && (( ! IS_DOCUMENTATION_PR )); then
header "Commit only contains changes that don't require tests, skipping"
exit 0
fi
# Show the version of the tools we're using
if (( IS_PROW )); then
# Disable gcloud update notifications
gcloud config set component_manager/disable_update_check true
header "Current test setup"
echo ">> gcloud SDK version"
gcloud version
echo ">> kubectl version"
kubectl version --client
echo ">> go version"
go version
echo ">> go env"
go env
echo ">> python3 version"
python3 --version
echo ">> git version"
git version
echo ">> ko version"
[[ -f /ko_version ]] && cat /ko_version || echo "unknown"
echo ">> bazel version"
[[ -f /bazel_version ]] && cat /bazel_version || echo "unknown"
if [[ "${DOCKER_IN_DOCKER_ENABLED}" == "true" ]]; then
echo ">> docker version"
docker version
fi
# node/pod names are important for debugging purposes, but they are missing
# after migrating from bootstrap to podutil.
# Report it here with the same logic as in bootstrap until it is fixed.
# (https://github.com/kubernetes/test-infra/blob/09bd4c6709dc64308406443f8996f90cf3b40ed1/jenkins/bootstrap.py#L588)
# TODO(chaodaiG): follow up on https://github.com/kubernetes/test-infra/blob/0fabd2ea816daa8c15d410c77a0c93c0550b283f/prow/initupload/run.go#L49
echo ">> node name"
echo "$(curl -H "Metadata-Flavor: Google" 'http://169.254.169.254/computeMetadata/v1/instance/name' 2> /dev/null)"
echo ">> pod name"
echo ${HOSTNAME}
fi
[[ -z $1 ]] && set -- "--all-tests"
local TESTS_TO_RUN=()
while [[ $# -ne 0 ]]; do
local parameter=$1
case ${parameter} in
--build-tests) RUN_BUILD_TESTS=1 ;;
--unit-tests) RUN_UNIT_TESTS=1 ;;
--integration-tests) RUN_INTEGRATION_TESTS=1 ;;
--all-tests)
RUN_BUILD_TESTS=1
RUN_UNIT_TESTS=1
RUN_INTEGRATION_TESTS=1
;;
--run-test)
shift
[[ $# -ge 1 ]] || abort "missing executable after --run-test"
TESTS_TO_RUN+=("$1")
;;
*) abort "error: unknown option ${parameter}" ;;
esac
shift
done
readonly RUN_BUILD_TESTS
readonly RUN_UNIT_TESTS
readonly RUN_INTEGRATION_TESTS
readonly TESTS_TO_RUN
cd ${REPO_ROOT_DIR}
# Tests to be performed, in the right order if --all-tests is passed.
local failed=0
if [[ ${#TESTS_TO_RUN[@]} -gt 0 ]]; then
if (( RUN_BUILD_TESTS || RUN_UNIT_TESTS || RUN_INTEGRATION_TESTS )); then
abort "--run-test must be used alone"
fi
# If this is a presubmit run, but a documentation-only PR, don't run the test
if (( IS_PRESUBMIT && IS_DOCUMENTATION_PR )); then
header "Documentation only PR, skipping running custom test"
exit 0
fi
for test_to_run in "${TESTS_TO_RUN[@]}"; do
${test_to_run} || { failed=1; step_failed "${test_to_run}"; }
done
fi
run_build_tests || { failed=1; step_failed "run_build_tests"; }
# If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run unit tests if build tests failed
if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
run_unit_tests || { failed=1; step_failed "run_unit_tests"; }
fi
# If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run integration tests if build/unit tests failed
if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
run_integration_tests || { failed=1; step_failed "run_integration_tests"; }
fi
exit ${failed}
}

608
scripts/test-infra/release.sh Executable file
View File

@ -0,0 +1,608 @@
#!/usr/bin/env bash
# Copyright 2018 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a helper script for Knative release scripts.
# See README.md for instructions on how to use it.
source $(dirname ${BASH_SOURCE})/library.sh
# Organization name in GitHub; defaults to Knative.
readonly ORG_NAME="${ORG_NAME:-knative}"
# GitHub upstream.
readonly REPO_UPSTREAM="https://github.com/${ORG_NAME}/${REPO_NAME}"
# GCRs for Knative releases.
readonly NIGHTLY_GCR="gcr.io/knative-nightly/github.com/${ORG_NAME}/${REPO_NAME}"
readonly RELEASE_GCR="gcr.io/knative-releases/github.com/${ORG_NAME}/${REPO_NAME}"
# Georeplicate images to {us,eu,asia}.gcr.io
readonly GEO_REPLICATION=(us eu asia)
# Simple banner for logging purposes.
# Parameters: $1 - message to display.
function banner() {
make_banner "@" "$1"
}
# Tag images in the yaml files if $TAG is not empty.
# $KO_DOCKER_REPO is the registry containing the images to tag with $TAG.
# Parameters: $1..$n - files to parse for images (non .yaml files are ignored).
function tag_images_in_yamls() {
[[ -z ${TAG} ]] && return 0
local SRC_DIR="${GOPATH}/src/"
local DOCKER_BASE="${KO_DOCKER_REPO}/${REPO_ROOT_DIR/$SRC_DIR}"
local GEO_REGIONS="${GEO_REPLICATION[@]} "
echo "Tagging any images under '${DOCKER_BASE}' with ${TAG}"
for file in $@; do
[[ "${file##*.}" != "yaml" ]] && continue
echo "Inspecting ${file}"
for image in $(grep -o "${DOCKER_BASE}/[a-z\./-]\+@sha256:[0-9a-f]\+" ${file}); do
for region in "" ${GEO_REGIONS// /. }; do
gcloud -q container images add-tag ${image} ${region}${image%%@*}:${TAG}
done
done
done
}
# Copy the given files to the $RELEASE_GCS_BUCKET bucket's "latest" directory.
# If $TAG is not empty, also copy them to $RELEASE_GCS_BUCKET bucket's "previous" directory.
# Parameters: $1..$n - files to copy.
function publish_to_gcs() {
function verbose_gsutil_cp {
local DEST="gs://${RELEASE_GCS_BUCKET}/$1/"
shift
echo "Publishing [$@] to ${DEST}"
gsutil -m cp $@ ${DEST}
}
# Before publishing the files, cleanup the `latest` dir if it exists.
local latest_dir="gs://${RELEASE_GCS_BUCKET}/latest"
if [[ -n "$(gsutil ls ${latest_dir} 2> /dev/null)" ]]; then
echo "Cleaning up '${latest_dir}' first"
gsutil -m rm ${latest_dir}/**
fi
verbose_gsutil_cp latest $@
[[ -n ${TAG} ]] && verbose_gsutil_cp previous/${TAG} $@
}
# These are global environment variables.
SKIP_TESTS=0
PRESUBMIT_TEST_FAIL_FAST=1
TAG_RELEASE=0
PUBLISH_RELEASE=0
PUBLISH_TO_GITHUB=0
TAG=""
BUILD_COMMIT_HASH=""
BUILD_YYYYMMDD=""
BUILD_TIMESTAMP=""
BUILD_TAG=""
RELEASE_VERSION=""
RELEASE_NOTES=""
RELEASE_BRANCH=""
RELEASE_GCS_BUCKET="knative-nightly/${REPO_NAME}"
RELEASE_DIR=""
KO_FLAGS="-P"
VALIDATION_TESTS="./test/presubmit-tests.sh"
ARTIFACTS_TO_PUBLISH=""
FROM_NIGHTLY_RELEASE=""
FROM_NIGHTLY_RELEASE_GCS=""
export KO_DOCKER_REPO="gcr.io/knative-nightly"
export GITHUB_TOKEN=""
# Convenience function to run the hub tool.
# Parameters: $1..$n - arguments to hub.
function hub_tool() {
run_go_tool github.com/github/hub hub $@
}
# Shortcut to "git push" that handles authentication.
# Parameters: $1..$n - arguments to "git push <repo>".
function git_push() {
local repo_url="${REPO_UPSTREAM}"
[[ -n "${GITHUB_TOKEN}}" ]] && repo_url="${repo_url/:\/\//:\/\/${GITHUB_TOKEN}@}"
git push ${repo_url} $@
}
# Return the master version of a release.
# For example, "v0.2.1" returns "0.2"
# Parameters: $1 - release version label.
function master_version() {
local release="${1//v/}"
local tokens=(${release//\./ })
echo "${tokens[0]}.${tokens[1]}"
}
# Return the release build number of a release.
# For example, "v0.2.1" returns "1".
# Parameters: $1 - release version label.
function release_build_number() {
local tokens=(${1//\./ })
echo "${tokens[2]}"
}
# Return the short commit SHA from a release tag.
# For example, "v20010101-deadbeef" returns "deadbeef".
function hash_from_tag() {
local tokens=(${1//-/ })
echo "${tokens[1]}"
}
# Setup the repository upstream, if not set.
function setup_upstream() {
# hub and checkout need the upstream URL to be set
# TODO(adrcunha): Use "git remote get-url" once available on Prow.
local upstream="$(git config --get remote.upstream.url)"
echo "Remote upstream URL is '${upstream}'"
if [[ -z "${upstream}" ]]; then
echo "Setting remote upstream URL to '${REPO_UPSTREAM}'"
git remote add upstream ${REPO_UPSTREAM}
fi
}
# Fetch the release branch, so we can check it out.
function setup_branch() {
[[ -z "${RELEASE_BRANCH}" ]] && return
git fetch ${REPO_UPSTREAM} ${RELEASE_BRANCH}:upstream/${RELEASE_BRANCH}
}
# Setup version, branch and release notes for a auto release.
function prepare_auto_release() {
echo "Auto release requested"
TAG_RELEASE=1
PUBLISH_RELEASE=1
git fetch --all || abort "error fetching branches/tags from remote"
local tags="$(git tag | cut -d 'v' -f2 | cut -d '.' -f1-2 | sort -V | uniq)"
local branches="$( { (git branch -r | grep upstream/release-) ; (git branch | grep release-); } | cut -d '-' -f2 | sort -V | uniq)"
echo "Versions released (from tags): [" ${tags} "]"
echo "Versions released (from branches): [" ${branches} "]"
local release_number=""
for i in ${branches}; do
release_number="${i}"
for j in ${tags}; do
if [[ "${i}" == "${j}" ]]; then
release_number=""
fi
done
done
if [[ -z "${release_number}" ]]; then
echo "*** No new release will be generated, as no new branches exist"
exit 0
fi
RELEASE_VERSION="${release_number}.0"
RELEASE_BRANCH="release-${release_number}"
echo "Will create release ${RELEASE_VERSION} from branch ${RELEASE_BRANCH}"
# If --release-notes not used, add a placeholder
if [[ -z "${RELEASE_NOTES}" ]]; then
RELEASE_NOTES="$(mktemp)"
echo "[add release notes here]" > ${RELEASE_NOTES}
fi
}
# Setup version, branch and release notes for a "dot" release.
function prepare_dot_release() {
echo "Dot release requested"
TAG_RELEASE=1
PUBLISH_RELEASE=1
git fetch --all || abort "error fetching branches/tags from remote"
# List latest release
local releases # don't combine with the line below, or $? will be 0
releases="$(hub_tool release)"
[[ $? -eq 0 ]] || abort "cannot list releases"
# If --release-branch passed, restrict to that release
if [[ -n "${RELEASE_BRANCH}" ]]; then
local version_filter="v${RELEASE_BRANCH##release-}"
echo "Dot release will be generated for ${version_filter}"
releases="$(echo "${releases}" | grep ^${version_filter})"
fi
local last_version="$(echo "${releases}" | grep '^v[0-9]\+\.[0-9]\+\.[0-9]\+$' | sort -r -V | head -1)"
[[ -n "${last_version}" ]] || abort "no previous release exist"
local major_minor_version=""
if [[ -z "${RELEASE_BRANCH}" ]]; then
echo "Last release is ${last_version}"
# Determine branch
major_minor_version="$(master_version ${last_version})"
RELEASE_BRANCH="release-${major_minor_version}"
echo "Last release branch is ${RELEASE_BRANCH}"
else
major_minor_version="${RELEASE_BRANCH##release-}"
fi
[[ -n "${major_minor_version}" ]] || abort "cannot get release major/minor version"
# Ensure there are new commits in the branch, otherwise we don't create a new release
setup_branch
local last_release_commit="$(git rev-list -n 1 ${last_version})"
local release_branch_commit="$(git rev-list -n 1 upstream/${RELEASE_BRANCH})"
[[ -n "${last_release_commit}" ]] || abort "cannot get last release commit"
[[ -n "${release_branch_commit}" ]] || abort "cannot get release branch last commit"
echo "Version ${last_version} is at commit ${last_release_commit}"
echo "Branch ${RELEASE_BRANCH} is at commit ${release_branch_commit}"
if [[ "${last_release_commit}" == "${release_branch_commit}" ]]; then
echo "*** Branch ${RELEASE_BRANCH} has no new cherry-picks since release ${last_version}"
echo "*** No dot release will be generated, as no changes exist"
exit 0
fi
# Create new release version number
local last_build="$(release_build_number ${last_version})"
RELEASE_VERSION="${major_minor_version}.$(( last_build + 1 ))"
echo "Will create release ${RELEASE_VERSION} at commit ${release_branch_commit}"
# If --release-notes not used, copy from the latest release
if [[ -z "${RELEASE_NOTES}" ]]; then
RELEASE_NOTES="$(mktemp)"
hub_tool release show -f "%b" ${last_version} > ${RELEASE_NOTES}
echo "Release notes from ${last_version} copied to ${RELEASE_NOTES}"
fi
}
# Setup source nightly image for a release.
function prepare_from_nightly_release() {
echo "Release from nightly requested"
SKIP_TESTS=1
if [[ "${FROM_NIGHTLY_RELEASE}" == "latest" ]]; then
echo "Finding the latest nightly release"
find_latest_nightly "${NIGHTLY_GCR}" || abort "cannot find the latest nightly release"
echo "Latest nightly is ${FROM_NIGHTLY_RELEASE}"
fi
readonly FROM_NIGHTLY_RELEASE_GCS="gs://knative-nightly/${REPO_NAME}/previous/${FROM_NIGHTLY_RELEASE}"
gsutil ls -d "${FROM_NIGHTLY_RELEASE_GCS}" > /dev/null \
|| abort "nightly release ${FROM_NIGHTLY_RELEASE} doesn't exist"
}
# Build a release from an existing nightly one.
function build_from_nightly_release() {
banner "Building the release"
echo "Fetching manifests from nightly"
local yamls_dir="$(mktemp -d)"
gsutil -m cp -r "${FROM_NIGHTLY_RELEASE_GCS}/*" "${yamls_dir}" || abort "error fetching manifests"
# Update references to release GCR
for yaml in ${yamls_dir}/*.yaml; do
sed -i -e "s#${NIGHTLY_GCR}#${RELEASE_GCR}#" "${yaml}"
done
ARTIFACTS_TO_PUBLISH="$(find ${yamls_dir} -name '*.yaml' -printf '%p ')"
echo "Copying nightly images"
copy_nightly_images_to_release_gcr "${NIGHTLY_GCR}" "${FROM_NIGHTLY_RELEASE}"
# Create a release branch from the nightly release tag.
local commit="$(hash_from_tag ${FROM_NIGHTLY_RELEASE})"
echo "Creating release branch ${RELEASE_BRANCH} at commit ${commit}"
git checkout -b ${RELEASE_BRANCH} ${commit} || abort "cannot create branch"
git_push upstream ${RELEASE_BRANCH} || abort "cannot push branch"
}
# Build a release from source.
function build_from_source() {
run_validation_tests ${VALIDATION_TESTS}
banner "Building the release"
build_release
# Do not use `||` above or any error will be swallowed.
if [[ $? -ne 0 ]]; then
abort "error building the release"
fi
}
# Copy tagged images from the nightly GCR to the release GCR, tagging them 'latest'.
# This is a recursive function, first call must pass $NIGHTLY_GCR as first parameter.
# Parameters: $1 - GCR to recurse into.
# $2 - tag to be used to select images to copy.
function copy_nightly_images_to_release_gcr() {
for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do
copy_nightly_images_to_release_gcr "${entry}" "$2"
# Copy each image with the given nightly tag
for x in $(gcloud --format="value(tags)" container images list-tags "${entry}" --filter="tags=$2" --limit=1); do
local path="${entry/${NIGHTLY_GCR}}" # Image "path" (remove GCR part)
local dst="${RELEASE_GCR}${path}:latest"
gcloud container images add-tag "${entry}:$2" "${dst}" || abort "error copying image"
done
done
}
# Recurse into GCR and find the nightly tag of the first `latest` image found.
# Parameters: $1 - GCR to recurse into.
function find_latest_nightly() {
for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do
find_latest_nightly "${entry}" && return 0
for tag in $(gcloud --format="value(tags)" container images list-tags "${entry}" \
--filter="tags=latest" --limit=1); do
local tags=( ${tag//,/ } )
# Skip if more than one nightly tag, as we don't know what's the latest.
if [[ ${#tags[@]} -eq 2 ]]; then
local nightly_tag="${tags[@]/latest}" # Remove 'latest' tag
FROM_NIGHTLY_RELEASE="${nightly_tag// /}" # Remove spaces
return 0
fi
done
done
return 1
}
# Parses flags and sets environment variables accordingly.
function parse_flags() {
local has_gcr_flag=0
local has_gcs_flag=0
local has_dir_flag=0
local is_dot_release=0
local is_auto_release=0
cd ${REPO_ROOT_DIR}
while [[ $# -ne 0 ]]; do
local parameter=$1
case ${parameter} in
--skip-tests) SKIP_TESTS=1 ;;
--tag-release) TAG_RELEASE=1 ;;
--notag-release) TAG_RELEASE=0 ;;
--publish) PUBLISH_RELEASE=1 ;;
--nopublish) PUBLISH_RELEASE=0 ;;
--dot-release) is_dot_release=1 ;;
--auto-release) is_auto_release=1 ;;
--from-latest-nightly) FROM_NIGHTLY_RELEASE=latest ;;
*)
[[ $# -ge 2 ]] || abort "missing parameter after $1"
shift
case ${parameter} in
--github-token)
[[ ! -f "$1" ]] && abort "file $1 doesn't exist"
# Remove any trailing newline/space from token
GITHUB_TOKEN="$(echo -n $(cat $1))"
[[ -n "${GITHUB_TOKEN}" ]] || abort "file $1 is empty"
;;
--release-gcr)
KO_DOCKER_REPO=$1
has_gcr_flag=1
;;
--release-gcs)
RELEASE_GCS_BUCKET=$1
RELEASE_DIR=""
has_gcs_flag=1
;;
--release-dir)
RELEASE_DIR=$1
RELEASE_GCS_BUCKET=""
has_dir_flag=1
;;
--version)
[[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'"
RELEASE_VERSION=$1
;;
--branch)
[[ $1 =~ ^release-[0-9]+\.[0-9]+$ ]] || abort "branch name must be 'release-[0-9].[0-9]'"
RELEASE_BRANCH=$1
;;
--release-notes)
[[ ! -f "$1" ]] && abort "file $1 doesn't exist"
RELEASE_NOTES=$1
;;
--from-nightly)
[[ $1 =~ ^v[0-9]+-[0-9a-f]+$ ]] || abort "nightly tag must be 'vYYYYMMDD-commithash'"
FROM_NIGHTLY_RELEASE=$1
;;
*) abort "unknown option ${parameter}" ;;
esac
esac
shift
done
(( has_gcs_flag )) && (( has_dir_flag )) && abort "cannot have both --release-gcs and --release-dir set simultaneously"
[[ -n "${RELEASE_GCS_BUCKET}" && -n "${RELEASE_DIR}" ]] && abort "cannot have both GCS and release directory set"
# Do auto release unless release is forced
if (( is_auto_release )); then
(( is_dot_release )) && abort "cannot have both --dot-release and --auto-release set simultaneously"
[[ -n "${RELEASE_VERSION}" ]] && abort "cannot have both --version and --auto-release set simultaneously"
[[ -n "${RELEASE_BRANCH}" ]] && abort "cannot have both --branch and --auto-release set simultaneously"
[[ -n "${FROM_NIGHTLY_RELEASE}" ]] && abort "cannot have --auto-release with a nightly source"
setup_upstream
prepare_auto_release
fi
# Setup source nightly image
if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then
(( is_dot_release )) && abort "dot releases are built from source"
[[ -z "${RELEASE_VERSION}" ]] && abort "release version must be specified with --version"
# TODO(adrcunha): "dot" releases from release branches require releasing nightlies
# for such branches, which we don't do yet.
[[ "${RELEASE_VERSION}" =~ ^[0-9]+\.[0-9]+\.0$ ]] || abort "version format must be 'X.Y.0'"
RELEASE_BRANCH="release-$(master_version ${RELEASE_VERSION})"
prepare_from_nightly_release
setup_upstream
fi
# Setup dot releases
if (( is_dot_release )); then
setup_upstream
prepare_dot_release
fi
# Update KO_DOCKER_REPO and KO_FLAGS if we're not publishing.
if (( ! PUBLISH_RELEASE )); then
(( has_gcr_flag )) && echo "Not publishing the release, GCR flag is ignored"
(( has_gcs_flag )) && echo "Not publishing the release, GCS flag is ignored"
KO_DOCKER_REPO="ko.local"
RELEASE_GCS_BUCKET=""
[[ -z "${RELEASE_DIR}" ]] && RELEASE_DIR="${REPO_ROOT_DIR}"
fi
[[ -z "${RELEASE_GCS_BUCKET}" && -z "${RELEASE_DIR}" ]] && abort "--release-gcs or --release-dir must be used"
if [[ -n "${RELEASE_DIR}" ]]; then
mkdir -p "${RELEASE_DIR}" || abort "cannot create release dir '${RELEASE_DIR}'"
fi
# Get the commit, excluding any tags but keeping the "dirty" flag
BUILD_COMMIT_HASH="$(git describe --always --dirty --match '^$')"
[[ -n "${BUILD_COMMIT_HASH}" ]] || abort "error getting the current commit"
BUILD_YYYYMMDD="$(date -u +%Y%m%d)"
BUILD_TIMESTAMP="$(date -u '+%Y-%m-%d %H:%M:%S')"
BUILD_TAG="v${BUILD_YYYYMMDD}-${BUILD_COMMIT_HASH}"
(( TAG_RELEASE )) && TAG="${BUILD_TAG}"
[[ -n "${RELEASE_VERSION}" ]] && TAG="v${RELEASE_VERSION}"
[[ -n "${RELEASE_VERSION}" && -n "${RELEASE_BRANCH}" ]] && (( PUBLISH_RELEASE )) && PUBLISH_TO_GITHUB=1
readonly BUILD_COMMIT_HASH
readonly BUILD_YYYYMMDD
readonly BUILD_TIMESTAMP
readonly BUILD_TAG
readonly SKIP_TESTS
readonly TAG_RELEASE
readonly PUBLISH_RELEASE
readonly PUBLISH_TO_GITHUB
readonly TAG
readonly RELEASE_VERSION
readonly RELEASE_NOTES
readonly RELEASE_BRANCH
readonly RELEASE_GCS_BUCKET
readonly RELEASE_DIR
readonly KO_DOCKER_REPO
readonly VALIDATION_TESTS
readonly FROM_NIGHTLY_RELEASE
}
# Run tests (unless --skip-tests was passed). Conveniently displays a banner indicating so.
# Parameters: $1 - executable that runs the tests.
function run_validation_tests() {
(( SKIP_TESTS )) && return
banner "Running release validation tests"
# Run tests.
if ! $1; then
banner "Release validation tests failed, aborting"
abort "release validation tests failed"
fi
}
# Publishes the generated artifacts to directory, GCS, GitHub, etc.
# Parameters: $1..$n - files to add to the release.
function publish_artifacts() {
(( ! PUBLISH_RELEASE )) && return
tag_images_in_yamls ${ARTIFACTS_TO_PUBLISH}
if [[ -n "${RELEASE_DIR}" ]]; then
cp ${ARTIFACTS_TO_PUBLISH} ${RELEASE_DIR} || abort "cannot copy release to '${RELEASE_DIR}'"
fi
[[ -n "${RELEASE_GCS_BUCKET}" ]] && publish_to_gcs ${ARTIFACTS_TO_PUBLISH}
publish_to_github ${ARTIFACTS_TO_PUBLISH}
banner "New release published successfully"
}
# Entry point for a release script.
function main() {
parse_flags "$@"
# Checkout specific branch, if necessary
local current_branch
current_branch="$(git rev-parse --abbrev-ref HEAD)"
if [[ -n "${RELEASE_BRANCH}" && -z "${FROM_NIGHTLY_RELEASE}" && "${current_branch}" != "${RELEASE_BRANCH}" ]]; then
setup_upstream
setup_branch
# When it runs in Prow, the origin is identical with upstream, and previous
# fetch already fetched release-* branches, so no need to `checkout -b`
if (( IS_PROW )); then
git checkout "${RELEASE_BRANCH}" || abort "cannot checkout branch ${RELEASE_BRANCH}"
else
git checkout -b "${RELEASE_BRANCH}" upstream/"${RELEASE_BRANCH}" || abort "cannot checkout branch ${RELEASE_BRANCH}"
fi
# HACK HACK HACK
# Rerun the release script from the release branch. Fixes https://github.com/knative/test-infra/issues/1262
./hack/release.sh "$@"
exit "$?"
fi
function_exists build_release || abort "function 'build_release()' not defined"
[[ -x ${VALIDATION_TESTS} ]] || abort "test script '${VALIDATION_TESTS}' doesn't exist"
# Log what will be done and where.
banner "Release configuration"
if which gcloud &>/dev/null ; then
echo "- gcloud user: $(gcloud config get-value core/account)"
fi
echo "- Go path: ${GOPATH}"
echo "- Repository root: ${REPO_ROOT_DIR}"
echo "- Destination GCR: ${KO_DOCKER_REPO}"
(( SKIP_TESTS )) && echo "- Tests will NOT be run" || echo "- Tests will be run"
if (( TAG_RELEASE )); then
echo "- Artifacts will be tagged '${TAG}'"
else
echo "- Artifacts WILL NOT be tagged"
fi
if (( PUBLISH_RELEASE )); then
local dst="${RELEASE_DIR}"
[[ -z "${dst}" ]] && dst="${RELEASE_GCS_BUCKET}"
echo "- Release WILL BE published to '${dst}'"
else
echo "- Release will not be published"
fi
if (( PUBLISH_TO_GITHUB )); then
echo "- Release WILL BE published to GitHub"
fi
if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then
echo "- Release will be A COPY OF '${FROM_NIGHTLY_RELEASE}' nightly"
else
echo "- Release will be BUILT FROM SOURCE"
[[ -n "${RELEASE_BRANCH}" ]] && echo "- Release will be built from branch '${RELEASE_BRANCH}'"
fi
[[ -n "${RELEASE_NOTES}" ]] && echo "- Release notes are generated from '${RELEASE_NOTES}'"
if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then
build_from_nightly_release
else
set -e -o pipefail
build_from_source
set +e +o pipefail
fi
[[ -z "${ARTIFACTS_TO_PUBLISH}" ]] && abort "no artifacts were generated"
# Ensure no empty file will be published.
for artifact in ${ARTIFACTS_TO_PUBLISH}; do
[[ -s ${artifact} ]] || abort "Artifact ${artifact} is empty"
done
echo "New release built successfully"
publish_artifacts
}
# Publishes a new release on GitHub, also git tagging it (unless this is not a versioned release).
# Parameters: $1..$n - files to add to the release.
function publish_to_github() {
(( PUBLISH_TO_GITHUB )) || return 0
local title="${REPO_NAME_FORMATTED} release ${TAG}"
local attachments=()
local description="$(mktemp)"
local attachments_dir="$(mktemp -d)"
local commitish=""
# Copy files to a separate dir
for artifact in $@; do
cp ${artifact} ${attachments_dir}/
attachments+=("--attach=${artifact}#$(basename ${artifact})")
done
echo -e "${title}\n" > ${description}
if [[ -n "${RELEASE_NOTES}" ]]; then
cat ${RELEASE_NOTES} >> ${description}
fi
git tag -a ${TAG} -m "${title}"
git_push tag ${TAG}
[[ -n "${RELEASE_BRANCH}" ]] && commitish="--commitish=${RELEASE_BRANCH}"
for i in {2..0}; do
hub_tool release create \
--prerelease \
${attachments[@]} \
--file=${description} \
${commitish} \
${TAG} && return 0
if [[ "${i}" -gt 0 ]]; then
echo "Error publishing the release, retrying in 15s..."
sleep 15
fi
done
abort "Cannot publish release to GitHub"
}

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# Copyright 2020 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
source "$(dirname "${BASH_SOURCE[0]}")/library.sh"
if (( ! IS_PROW )); then
echo "Local run of shellcheck-presubmit detected"
echo "This notably DOES NOT ACT LIKE THE GITHUB PRESUBMIT"
echo "The Github presubmit job only runs shellcheck on files you touch"
echo "There's no way to locally determine which files you touched:"
echo " as git is a distributed VCS, there is no notion of parent until merge"
echo " is attempted."
echo "So it checks the current content of all files changed in the previous commit"
echo " and/or currently staged."
fi
shellcheck_new_files

View File

@ -0,0 +1,91 @@
#!/usr/bin/env bash
# Copyright 2020 The Knative Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# This script updates test-infra scripts in-repo.
# Run it to update (usually from hack/update-deps.sh) the current scripts.
# Scripts are installed to REPO_ROOT/scripts/test-infra
# The following arguments are accepted:
# --update
# Do the update
# --ref X
# Defines which ref (branch, tag, commit) of test-infra to get scripts from; defaults to master
# --first-time
# Run this script from your repo root directory to install scripts for the first time
# Will also sed -i non-vendor scripts in the current repo to point to new path
# TODO: --verify
# Verify the contents of scripts/test-infra match the contents from commit sha in scripts/test-infra/COMMIT
# One can verify manually by running the script with '--ref $(cat scripts/test-infra/COMMIT)' and ensuring no files are staged
declare -i FIRST_TIME_SETUP=0
declare -i DO_UPDATE=0
declare SCRIPTS_REF=master
while [[ $# -ne 0 ]]; do
parameter="$1"
case ${parameter} in
--ref)
shift
SCRIPTS_REF="$1"
;;
--first-time)
FIRST_TIME_SETUP=1
;;
--update)
DO_UPDATE=1
;;
*)
echo "unknown option ${parameter}"
exit 1
;;
esac
shift
done
function do_read_tree() {
mkdir -p scripts/test-infra
git read-tree --prefix=scripts/test-infra -u "test-infra/${SCRIPTS_REF}:scripts"
git show-ref -s -- "refs/remotes/test-infra/${SCRIPTS_REF}" > scripts/test-infra/COMMIT
git add scripts/test-infra/COMMIT
echo "test-infra scripts installed to scripts/test-infra from branch ${SCRIPTS_REF}"
}
function run() {
if (( FIRST_TIME_SETUP )); then
if [[ ! -d .git ]]; then
echo "I don't believe you are in a repo root; exiting"
exit 5
fi
git remote add test-infra https://github.com/knative/test-infra.git || echo "test-infra remote already set; not changing"
git fetch test-infra "${SCRIPTS_REF}"
do_read_tree
echo "Attempting to point all scripts to use this new path"
grep -RiIl vendor/knative.dev/test-infra | grep -v ^vendor | grep -v ^scripts/test-infra | xargs sed -i 's+vendor/knative.dev/test-infra/scripts+scripts/test-infra+'
elif (( DO_UPDATE )); then
pushd "$(dirname "${BASH_SOURCE[0]}")/../.."
trap popd EXIT
git remote add test-infra https://github.com/knative/test-infra.git || true
git fetch test-infra "${SCRIPTS_REF}"
git rm -fr scripts/test-infra
rm -fR scripts/test-infra
do_read_tree
fi
}
run

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/e2e-tests.sh
source $(dirname $0)/../scripts/test-infra/e2e-tests.sh
function cluster_setup() {
header "Building client"

View File

@ -37,7 +37,7 @@ export PRESUBMIT_TEST_FAIL_FAST=1
export GO111MODULE=on
export KNATIVE_SERVING_VERSION=${KNATIVE_SERVING_VERSION:-latest}
export KNATIVE_EVENTING_VERSION=${KNATIVE_EVENTING_VERSION:-latest}
source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/presubmit-tests.sh
source $(dirname $0)/../scripts/test-infra/presubmit-tests.sh
# Run cross platform build to ensure kn compiles for Linux, macOS and Windows
function post_build_tests() {