mirror of https://github.com/knative/docs.git
				
				
				
			
		
			
				
	
	
		
			373 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Bash
		
	
	
	
			
		
		
	
	
			373 lines
		
	
	
		
			12 KiB
		
	
	
	
		
			Bash
		
	
	
	
| #!/usr/bin/env bash
 | |
| 
 | |
| # Copyright 2018 The Knative Authors
 | |
| #
 | |
| # Licensed under the Apache License, Version 2.0 (the "License");
 | |
| # you may not use this file except in compliance with the License.
 | |
| # You may obtain a copy of the License at
 | |
| #
 | |
| #     http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| 
 | |
| # This is a helper script for Knative presubmit test scripts.
 | |
| # See README.md for instructions on how to use it.
 | |
| 
 | |
| source "$(dirname "${BASH_SOURCE[0]}")"/library.sh
 | |
| 
 | |
| # Custom configuration of presubmit tests
 | |
| readonly PRESUBMIT_TEST_FAIL_FAST=${PRESUBMIT_TEST_FAIL_FAST:-0}
 | |
| 
 | |
| # Extensions or file patterns that don't require presubmit tests.
 | |
| readonly NO_PRESUBMIT_FILES=(\.png \.gitignore \.gitattributes ^OWNERS ^OWNERS_ALIASES ^AUTHORS \.github/.*)
 | |
| 
 | |
| # Flag if this is a presubmit run or not.
 | |
| (( IS_PROW )) && [[ ${JOB_TYPE} == "presubmit" ]] && IS_PRESUBMIT=1 || IS_PRESUBMIT=0
 | |
| readonly IS_PRESUBMIT
 | |
| 
 | |
| # List of changed files on presubmit, LF separated.
 | |
| CHANGED_FILES=""
 | |
| 
 | |
| # Flags that this PR is exempt of presubmit tests.
 | |
| IS_PRESUBMIT_EXEMPT_PR=0
 | |
| 
 | |
| # Flags that this PR contains only changes to documentation.
 | |
| IS_DOCUMENTATION_PR=0
 | |
| 
 | |
| # Returns true if PR only contains the given file regexes.
 | |
| # Parameters: $1 - file regexes, space separated.
 | |
| function pr_only_contains() {
 | |
|   [[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]]
 | |
| }
 | |
| 
 | |
| # Initialize flags and context for presubmit tests:
 | |
| # CHANGED_FILES, IS_PRESUBMIT_EXEMPT_PR and IS_DOCUMENTATION_PR.
 | |
| function initialize_environment() {
 | |
|   CHANGED_FILES=""
 | |
|   IS_PRESUBMIT_EXEMPT_PR=0
 | |
|   IS_DOCUMENTATION_PR=0
 | |
|   (( ! IS_PRESUBMIT )) && return
 | |
|   CHANGED_FILES="$(list_changed_files)"
 | |
|   if [[ -n "${CHANGED_FILES}" ]]; then
 | |
|     echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${CHANGED_FILES}"
 | |
|     local no_presubmit_files="${NO_PRESUBMIT_FILES[*]}"
 | |
|     pr_only_contains "${no_presubmit_files}" && IS_PRESUBMIT_EXEMPT_PR=1
 | |
|     # A documentation PR must contain markdown files
 | |
|     if pr_only_contains "\.md ${no_presubmit_files}"; then
 | |
|       [[ -n "$(echo "${CHANGED_FILES}" | grep '\.md')" ]] && IS_DOCUMENTATION_PR=1
 | |
|     fi
 | |
|   else
 | |
|     header "NO CHANGED FILES REPORTED, ASSUMING IT'S AN ERROR AND RUNNING TESTS ANYWAY"
 | |
|   fi
 | |
|   readonly CHANGED_FILES
 | |
|   readonly IS_DOCUMENTATION_PR
 | |
|   readonly IS_PRESUBMIT_EXEMPT_PR
 | |
| }
 | |
| 
 | |
| # Display a pass/fail banner for a test group.
 | |
| # Parameters: $1 - test group name (e.g., build)
 | |
| #             $2 - result (0=passed, 1=failed)
 | |
| function results_banner() {
 | |
|   if [[ $2 -eq 0 ]]; then
 | |
|     header "$1 tests PASSED"
 | |
|   else
 | |
|     error "$1 tests FAILED"
 | |
|   fi
 | |
| }
 | |
| 
 | |
| # Run build tests. If there's no `build_tests` function, run the default
 | |
| # build test runner.
 | |
| function run_build_tests() {
 | |
|   (( ! RUN_BUILD_TESTS )) && return 0
 | |
|   header "Running build tests"
 | |
|   local failed=0
 | |
|   # Run pre-build tests, if any
 | |
|   if function_exists pre_build_tests; then
 | |
|     pre_build_tests || { failed=1; step_failed "pre_build_tests"; }
 | |
|   fi
 | |
|   # Don't run build tests if pre-build tests failed
 | |
|   if (( ! failed )); then
 | |
|     if function_exists build_tests; then
 | |
|       build_tests || { failed=1; step_failed "build_tests"; }
 | |
|     else
 | |
|       default_build_test_runner || { failed=1; step_failed "default_build_test_runner"; }
 | |
|     fi
 | |
|   fi
 | |
|   # Don't run post-build tests if pre/build tests failed
 | |
|   if (( ! failed )) && function_exists post_build_tests; then
 | |
|     post_build_tests || { failed=1; step_failed "post_build_tests"; }
 | |
|   fi
 | |
|   results_banner "Build" ${failed}
 | |
|   return ${failed}
 | |
| }
 | |
| 
 | |
| # Run a build test and report its output as the failure if it fails.
 | |
| # Parameters: $1 - report name.
 | |
| #             $2... - command (test) to run.
 | |
| function report_build_test() {
 | |
|   local report
 | |
|   report="$(mktemp)"
 | |
|   local report_name="$1"
 | |
|   shift
 | |
|   local errors=""
 | |
|   capture_output "${report}" "$@" || errors="$(cat "${report}")"
 | |
|   create_junit_xml _build_tests "${report_name}" "${errors}"
 | |
|   [[ -z "${errors}" ]]
 | |
| }
 | |
| 
 | |
| # Default build test runner that:
 | |
| # * run `/hack/verify-codegen.sh` (if it exists)
 | |
| # * `go build` on the entire repo
 | |
| # * check licenses in all go packages
 | |
| function default_build_test_runner() {
 | |
|   foreach_go_module __build_test_runner_for_module
 | |
| }
 | |
| 
 | |
| function __build_test_runner_for_module() {
 | |
|   local failed=0
 | |
|   subheader "Build tests for $(go_mod_module_name)"
 | |
|   # Run verify-codegen check
 | |
|   if [[ -f ./hack/verify-codegen.sh ]]; then
 | |
|     subheader "Checking autogenerated code is up-to-date"
 | |
|     report_build_test Verify_CodeGen ./hack/verify-codegen.sh || failed=1
 | |
|   fi
 | |
|   # For documentation PRs, just check the md files and run
 | |
|   # verify-codegen (as md files can be auto-generated in some repos).
 | |
|   (( IS_DOCUMENTATION_PR )) && return ${failed}
 | |
|   # Don't merge these two lines, or return code will always be 0.
 | |
|   # Get all build tags in go code (ignore /vendor, /hack and /third_party)
 | |
|   local tags
 | |
|   tags="$(go run knative.dev/toolbox/go-ls-tags@latest --joiner=,)"
 | |
|   local go_pkg_dirs
 | |
|   go_pkg_dirs="$(go list -tags "${tags}" ./...)" || return $?
 | |
|   if [[ -z "${go_pkg_dirs}" ]]; then
 | |
|     subheader "No golang code found, skipping build tests"
 | |
|     return 0
 | |
|   fi
 | |
|   # Ensure all the code builds
 | |
|   subheader "Checking that go code builds"
 | |
| 
 | |
|   report_build_test Build_Go \
 | |
|     go test -vet=off -tags "${tags}" -exec echo ./... || failed=2
 | |
| 
 | |
|   # Check that we don't have any forbidden licenses in our images.
 | |
|   subheader "Checking for forbidden licenses"
 | |
|   report_build_test Check_Licenses check_licenses || failed=3
 | |
|   return ${failed}
 | |
| }
 | |
| 
 | |
| # Run unit tests. If there's no `unit_tests` function, run the default
 | |
| # unit test runner.
 | |
| function run_unit_tests() {
 | |
|   (( ! RUN_UNIT_TESTS )) && return 0
 | |
|   if (( IS_DOCUMENTATION_PR )); then
 | |
|     header "Documentation only PR, skipping unit tests"
 | |
|     return 0
 | |
|   fi
 | |
|   header "Running unit tests"
 | |
|   local failed=0
 | |
|   # Run pre-unit tests, if any
 | |
|   if function_exists pre_unit_tests; then
 | |
|     pre_unit_tests || { failed=1; step_failed "pre_unit_tests"; }
 | |
|   fi
 | |
|   # Don't run unit tests if pre-unit tests failed
 | |
|   if (( ! failed )); then
 | |
|     if function_exists unit_tests; then
 | |
|       unit_tests || { failed=1; step_failed "unit_tests"; }
 | |
|     else
 | |
|       default_unit_test_runner || { failed=1; step_failed "default_unit_test_runner"; }
 | |
|     fi
 | |
|   fi
 | |
|   # Don't run post-unit tests if pre/unit tests failed
 | |
|   if (( ! failed )) && function_exists post_unit_tests; then
 | |
|     post_unit_tests || { failed=1; step_failed "post_unit_tests"; }
 | |
|   fi
 | |
|   results_banner "Unit" ${failed}
 | |
|   return ${failed}
 | |
| }
 | |
| 
 | |
| # Default unit test runner that runs all go tests in the repo.
 | |
| function default_unit_test_runner() {
 | |
|   foreach_go_module __unit_test_runner_for_module
 | |
| }
 | |
| 
 | |
| function __unit_test_runner_for_module() {
 | |
|   subheader "Unit tests for $(go_mod_module_name)"
 | |
|   report_go_test -short -race -count 1 ./...
 | |
| }
 | |
| 
 | |
| # Run integration tests. If there's no `integration_tests` function, run the
 | |
| # default integration test runner.
 | |
| function run_integration_tests() {
 | |
|   # Don't run integration tests if not requested OR on documentation PRs
 | |
|   (( ! RUN_INTEGRATION_TESTS )) && return 0
 | |
|   if (( IS_DOCUMENTATION_PR )); then
 | |
|     header "Documentation only PR, skipping integration tests"
 | |
|     return 0
 | |
|   fi
 | |
|   header "Running integration tests"
 | |
|   local failed=0
 | |
|   # Run pre-integration tests, if any
 | |
|   if function_exists pre_integration_tests; then
 | |
|     pre_integration_tests || { failed=1; step_failed "pre_integration_tests"; }
 | |
|   fi
 | |
|   # Don't run integration tests if pre-integration tests failed
 | |
|   if (( ! failed )); then
 | |
|     if function_exists integration_tests; then
 | |
|       integration_tests || { failed=1; step_failed "integration_tests"; }
 | |
|     else
 | |
|       default_integration_test_runner || { failed=1; step_failed "default_integration_test_runner"; }
 | |
|     fi
 | |
|   fi
 | |
|   # Don't run integration tests if pre/integration tests failed
 | |
|   if (( ! failed )) && function_exists post_integration_tests; then
 | |
|     post_integration_tests || { failed=1; step_failed "post_integration_tests"; }
 | |
|   fi
 | |
|   results_banner "Integration" ${failed}
 | |
|   return ${failed}
 | |
| }
 | |
| 
 | |
| # Default integration test runner that runs all `test/e2e-*tests.sh`.
 | |
| function default_integration_test_runner() {
 | |
|   local failed=0
 | |
| 
 | |
|   while read -r e2e_test; do
 | |
|     subheader "Running integration test ${e2e_test}"
 | |
|     "${e2e_test}" || failed=$?
 | |
|     if (( failed )); then
 | |
|       echo "${e2e_test} failed: $failed" >&2
 | |
|       return $failed
 | |
|     fi
 | |
|   done < <(find test/ -maxdepth 1 ! -name "$(printf "*\n*")" -name "e2e-*tests.sh")
 | |
|   return ${failed}
 | |
| }
 | |
| 
 | |
| # Options set by command-line flags.
 | |
| RUN_BUILD_TESTS=0
 | |
| RUN_UNIT_TESTS=0
 | |
| RUN_INTEGRATION_TESTS=0
 | |
| 
 | |
| # Process flags and run tests accordingly.
 | |
| function main() {
 | |
|   initialize_environment
 | |
|   if (( IS_PRESUBMIT_EXEMPT_PR )) && (( ! IS_DOCUMENTATION_PR )); then
 | |
|     header "Commit only contains changes that don't require tests, skipping"
 | |
|     exit 0
 | |
|   fi
 | |
| 
 | |
|   # Show the version of the tools we're using
 | |
|   if (( IS_PROW )); then
 | |
|     # Disable gcloud update notifications
 | |
|     gcloud config set component_manager/disable_update_check true
 | |
|     header "Current test setup"
 | |
|     echo ">> gcloud SDK version"
 | |
|     gcloud version
 | |
|     echo ">> kubectl version"
 | |
|     kubectl version --client
 | |
|     echo ">> go version"
 | |
|     go version
 | |
|     echo ">> go env"
 | |
|     go env
 | |
|     echo ">> python3 version"
 | |
|     python3 --version
 | |
|     echo ">> git version"
 | |
|     git version
 | |
|     echo ">> ko version"
 | |
|     [[ -f /ko_version ]] && cat /ko_version || echo "unknown"
 | |
|     if [[ "${DOCKER_IN_DOCKER_ENABLED:-}" == "true" ]]; then
 | |
|       echo ">> docker version"
 | |
|       docker version
 | |
|     fi
 | |
|     if type java > /dev/null; then
 | |
|       echo ">> java version"
 | |
|       java -version
 | |
|       echo "JAVA_HOME: ${JAVA_HOME:-}"
 | |
|     fi
 | |
|     if command -v mvn > /dev/null; then
 | |
|       echo ">> maven version"
 | |
|       mvn --version
 | |
|     fi
 | |
|     if command -v cosign > /dev/null; then
 | |
|       echo ">> cosign version"
 | |
|       cosign version
 | |
|     fi
 | |
|     echo ">> prow-tests image version"
 | |
|     [[ -f /commit_hash ]] && echo "Prow test image was built from $(cat /commit_hash) commit which is viewable at https://github.com/knative/test-infra/tree/$(cat /commit_hash) " || echo "unknown"
 | |
|   fi
 | |
| 
 | |
|   [[ -z ${1:-} ]] && set -- "--all-tests"
 | |
| 
 | |
|   local TESTS_TO_RUN=()
 | |
| 
 | |
|   while [[ $# -ne 0 ]]; do
 | |
|     local parameter=$1
 | |
|     case ${parameter} in
 | |
|       --help|-h)
 | |
|         echo "Usage: ./presubmit-tests.sh [options...]"
 | |
|         echo "  --build-tests: run build tests."
 | |
|         echo "  --unit-tests: run unit tests."
 | |
|         echo "  --integration-tests: run integration tests, basically all the e2e-*tests.sh."
 | |
|         echo "  --all-tests: run build tests, unit tests and integration tests in sequence."
 | |
|         echo "  --run-test: run custom tests. Can be used to run multiple tests that need different args."
 | |
|         echo "              For example, ./presubmit-tests.sh --run-test \"e2e-tests1.sh arg1\" \"e2e-tests2.sh arg2\"."
 | |
|         ;;
 | |
|       --build-tests) RUN_BUILD_TESTS=1 ;;
 | |
|       --unit-tests) RUN_UNIT_TESTS=1 ;;
 | |
|       --integration-tests) RUN_INTEGRATION_TESTS=1 ;;
 | |
|       --all-tests)
 | |
|         RUN_BUILD_TESTS=1
 | |
|         RUN_UNIT_TESTS=1
 | |
|         RUN_INTEGRATION_TESTS=1
 | |
|         ;;
 | |
|       --run-test)
 | |
|         shift
 | |
|         [[ $# -ge 1 ]] || abort "missing executable after --run-test"
 | |
|         TESTS_TO_RUN+=("$1")
 | |
|         ;;
 | |
|       *) abort "error: unknown option ${parameter}" ;;
 | |
|     esac
 | |
|     shift
 | |
|   done
 | |
| 
 | |
|   readonly RUN_BUILD_TESTS
 | |
|   readonly RUN_UNIT_TESTS
 | |
|   readonly RUN_INTEGRATION_TESTS
 | |
|   readonly TESTS_TO_RUN
 | |
| 
 | |
|   cd "${REPO_ROOT_DIR}" || exit
 | |
| 
 | |
|   # Tests to be performed, in the right order if --all-tests is passed.
 | |
| 
 | |
|   local failed=0
 | |
| 
 | |
|   if [[ ${#TESTS_TO_RUN[@]} -gt 0 ]]; then
 | |
|     if (( RUN_BUILD_TESTS || RUN_UNIT_TESTS || RUN_INTEGRATION_TESTS )); then
 | |
|       abort "--run-test must be used alone"
 | |
|     fi
 | |
|     # If this is a presubmit run, but a documentation-only PR, don't run the test
 | |
|     if (( IS_PRESUBMIT && IS_DOCUMENTATION_PR )); then
 | |
|       header "Documentation only PR, skipping running custom test"
 | |
|       exit 0
 | |
|     fi
 | |
|     for test_to_run in "${TESTS_TO_RUN[@]}"; do
 | |
|       ${test_to_run} || { failed=$?; step_failed "${test_to_run}"; }
 | |
|     done
 | |
|   fi
 | |
| 
 | |
|   run_build_tests || { failed=$?; step_failed "run_build_tests"; }
 | |
|   # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run unit tests if build tests failed
 | |
|   if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
 | |
|     run_unit_tests || { failed=$?; step_failed "run_unit_tests"; }
 | |
|   fi
 | |
|   # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run integration tests if build/unit tests failed
 | |
|   if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then
 | |
|     run_integration_tests || { failed=$?; step_failed "run_integration_tests"; }
 | |
|   fi
 | |
| 
 | |
|   exit ${failed}
 | |
| }
 |