podman/test/apiv2/test-apiv2

740 lines
24 KiB
Bash
Executable File

#!/usr/bin/env bash
#
# Usage: test-apiv2 testglob
#
# DEVELOPER NOTE: you almost certainly don't need to play in here. See README.
#
ME=$(basename $0)
###############################################################################
# BEGIN stuff you can but probably shouldn't customize
PODMAN_TEST_IMAGE_REGISTRY=${PODMAN_TEST_IMAGE_REGISTRY:-"quay.io"}
PODMAN_TEST_IMAGE_USER=${PODMAN_TEST_IMAGE_USER:-"libpod"}
PODMAN_TEST_IMAGE_NAME=${PODMAN_TEST_IMAGE_NAME:-"testimage"}
PODMAN_TEST_IMAGE_TAG=${PODMAN_TEST_IMAGE_TAG:-"20241011"}
PODMAN_TEST_IMAGE_FQN="$PODMAN_TEST_IMAGE_REGISTRY/$PODMAN_TEST_IMAGE_USER/$PODMAN_TEST_IMAGE_NAME:$PODMAN_TEST_IMAGE_TAG"
IMAGE=$PODMAN_TEST_IMAGE_FQN
REGISTRY_IMAGE="${PODMAN_TEST_IMAGE_REGISTRY}/${PODMAN_TEST_IMAGE_USER}/registry:2.8.2"
# END stuff you can but probably shouldn't customize
###############################################################################
# BEGIN setup
TMPDIR=${TMPDIR:-/tmp}
WORKDIR=$(mktemp --tmpdir -d $ME.tmp.XXXXXX)
# Log of all HTTP requests and responses; always make '.log' point to latest
LOGBASE=${TMPDIR}/$ME.log
LOG=${LOGBASE}.$(date +'%Y%m%dT%H%M%S')
ln -sf $LOG $LOGBASE
HOST=localhost
PORT=${PODMAN_SERVICE_PORT:-8081}
# Keep track of test count and failures in files, not variables, because
# variables don't carry back up from subshells.
testcounter_file=$WORKDIR/.testcounter
failures_file=$WORKDIR/.failures
echo 0 >$testcounter_file
echo 0 >$failures_file
# Where the tests live
TESTS_DIR=$(realpath $(dirname $0))
# As of 2021-11 podman has one external helper binary, rootlessport, needed
# for rootless networking.
if [[ -z "$CONTAINERS_HELPER_BINARY_DIR" ]]; then
export CONTAINERS_HELPER_BINARY_DIR=$(realpath ${TESTS_DIR}/../../bin)
fi
# Path to podman binary
PODMAN_BIN=${PODMAN:-${CONTAINERS_HELPER_BINARY_DIR}/podman}
# Path to registries.conf file. CI uses a locally-running cache registry,
# but we can't expect developers to have that in their local environment.
registries_conf_file=registries.conf
if [[ -n "$CI_USE_REGISTRY_CACHE" ]]; then
registries_conf_file=registries-cached.conf
fi
registries_conf_path=$TESTS_DIR/../$registries_conf_file
# Cleanup handlers
clean_up_server() {
if [ -n "$service_pid" ]; then
# Remove any containers and images; this prevents the following warning:
# 'rm: cannot remove '/.../overlay': Device or resource busy
podman rm -a
podman rmi -af
stop_registry --cleanup
stop_service
fi
}
# Any non-test-related error, be it syntax or podman-command, fails here.
err_handler() {
echo "Fatal error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}"
echo "Log:"
sed -e 's/^/ >/' <$WORKDIR/output.log
echo "Bailing."
clean_up_server
}
trap err_handler ERR
# END setup
###############################################################################
# BEGIN infrastructure code - the helper functions used in tests themselves
#########
# die # Exit error with a message to stderr
#########
function die() {
echo "$ME: $*" >&2
clean_up_server
exit 1
}
########
# is # Simple comparison
########
function is() {
local actual=$1
local expect=$2
local testname=$3
if [ "$actual" = "$expect" ]; then
# On success, include expected value; this helps readers understand
_show_ok 1 "$testname=$expect"
return
fi
_show_ok 0 "$testname" "$expect" "$actual"
}
############
# is_not # Simple disequality
############
function is_not() {
local actual=$1
local expect_not=$2
local testname=$3
if [ "$actual" != "$expect_not" ]; then
# On success, include expected value; this helps readers understand
_show_ok 1 "$testname!=$expect"
return
fi
_show_ok 0 "$testname" "!= $expect" "$actual"
}
##########
# like # Compare, but allowing patterns
##########
function like() {
local actual=$1
local expect=$2
local testname=$3
if expr "$actual" : "$expect" &>/dev/null; then
# On success, include expected value; this helps readers understand
# (but don't show enormous multi-line output like 'generate kube')
blurb=$(head -n1 <<<"$actual")
_show_ok 1 "$testname ('$blurb') ~ $expect"
return
fi
_show_ok 0 "$testname" "~ $expect" "$actual"
}
##############
# _show_ok # Helper for is() and like(): displays 'ok' or 'not ok'
##############
function _show_ok() {
local ok=$1
# Exec tests include control characters; filter them out
# Also filter out timestamps, to get consistent test names in logs
local testname=$(tr -d \\012 <<<"$2"|sed -e 's/since=[0-9.]\+/since=(T)/'|cat -vT)
# If output is a tty, colorize pass/fail
local red=
local green=
local reset=
local bold=
if [ -t 1 ]; then
red='\e[31m'
green='\e[32m'
reset='\e[0m'
bold='\e[1m'
fi
_bump $testcounter_file
count=$(<$testcounter_file)
# "skip" is a special case of "ok". Assume that our caller has included
# the magical '# skip - reason" comment string.
if [[ $ok == "skip" ]]; then
# colon-plus: replace green with yellow, but only if green is non-null
green="${green:+\e[33m}"
ok=1
fi
if [ $ok -eq 1 ]; then
echo -e "${green}ok $count ${TEST_CONTEXT} $testname${reset}"
echo "ok $count ${TEST_CONTEXT} $testname" >>$LOG
return
fi
# Failed
local expect=$3
local actual=$4
echo -e "${red}not ok $count ${TEST_CONTEXT} $testname${reset}"
echo -e "${red}# expected: $expect${reset}"
echo -e "${red}# actual: ${bold}$actual${reset}"
echo "not ok $count ${TEST_CONTEXT} $testname" >>$LOG
echo " expected: $expect" >>$LOG
_bump $failures_file
}
###########
# _bump # Increment a counter in a file
###########
function _bump() {
local file=$1
count=$(<$file)
echo $(( $count + 1 )) >| $file
}
#############
# jsonify # convert 'foo=bar,x=y' to json {"foo":"bar","x":"y"}
#############
function jsonify() {
# convert each to double-quoted form
local -a settings_out
for i in "$@"; do
# Each argument is of the form foo=bar. Separate into left and right.
local lhs
local rhs
IFS='=' read lhs rhs <<<"$i"
if [[ $rhs =~ \" || $rhs == true || $rhs == false || $rhs == "[]" || $rhs =~ ^-?[0-9]+$ ]]; then
# rhs has been pre-formatted for JSON or a non-string, do not change it
:
elif [[ $rhs == False ]]; then
# JSON boolean is lowercase only
rhs=false
elif [[ $rhs == True ]]; then
# JSON boolean is lowercase only
rhs=true
else
rhs="\"${rhs}\""
fi
settings_out+=("\"${lhs}\":${rhs}")
done
# ...and wrap inside braces, with comma separator if multiple fields
(IFS=','; echo "{${settings_out[*]}}")
}
#######
# t # Main test helper
#######
function t() {
local method=$1; shift
local path=$1; shift
local -a curl_args form_args
local content_type="application/json"
local testname="$method $path"
# POST and PUT requests may be followed by one or more key=value pairs.
# Slurp the command line until we see a 3-digit status code.
if [[ $method = "POST" || $method == "PUT" || $method == "DELETE" ]]; then
local -a post_args
if [[ $method == "POST" ]]; then
function _add_curl_args() { curl_args+=(--data-binary @$1); }
else
function _add_curl_args() { curl_args+=(--upload-file $1); }
fi
for arg; do
case "$arg" in
# This is just some hack to avoid adding `-d {}` to curl for endpoints where we really need an empty body.
# --disable makes curl not lookup the curlrc file, it shouldn't affect the tests in any way.
-) curl_args+=(--disable);
shift;;
--form=*) form_args+=(--form);
form_args+=("${arg#--form=}");
content_type="multipart/form-data";
shift;;
*=*) post_args+=("$arg");
shift;;
*.json) _add_curl_args $arg;
content_type="application/json";
shift;;
*.tar) _add_curl_args $arg;
content_type="application/x-tar";
shift;;
*.yaml) _add_curl_args $arg;
shift;;
application/*) content_type="$arg";
shift;;
[1-9][0-9][0-9]) break;;
*) die "Internal error: invalid POST arg '$arg'" ;;
esac
done
if [[ -z "${curl_args[*]}" && -z "${form_args[*]}" ]]; then
curl_args=(-d $(jsonify ${post_args[*]}))
testname="$testname [${curl_args[*]}]"
elif [[ -z "${curl_args[*]}" ]]; then
curl_args=(--form request.json=$(jsonify ${post_args[*]}) "${form_args[@]}")
testname="$testname [${curl_args[*]} ${form_args[*]}]"
fi
fi
# entrypoint path can include a descriptive comment; strip it off
path=${path%% *}
local url=$path
if ! [[ $path =~ ^'http://' ]]; then
# path may include JSONish params that curl will barf on; url-encode them
path="${path//'['/%5B}"
path="${path//']'/%5D}"
path="${path//'{'/%7B}"
path="${path//'}'/%7D}"
path="${path//':'/%3A}"
# If given path begins with /, use it as-is; otherwise prepend /version/
url=http://$HOST:$PORT
case "$path" in
/*) url="$url$path" ;;
libpod/*) url="$url/v5.0.0/$path" ;;
*) url="$url/v1.41/$path" ;;
esac
fi
# curl -X HEAD but without --head seems to wait for output anyway
if [[ $method == "HEAD" ]]; then
curl_args+=("--head")
fi
# If this is set, we're *expecting* curl to time out
if [[ -n "$APIV2_TEST_EXPECT_TIMEOUT" ]]; then
curl_args+=("-m" $APIV2_TEST_EXPECT_TIMEOUT)
fi
local expected_code=$1; shift
if [[ "$expected_code" == "101" ]]; then
curl_args+=("-H" "Connection: upgrade" "-H" "Upgrade: tcp")
fi
# Log every action we do
echo "-------------------------------------------------------------" >>$LOG
echo "\$ $testname" >>$LOG
rm -f $WORKDIR/curl.*
# The --write-out 'format' gives us important response data.
# The hairy "{ ...;rc=$?; } || :" lets us capture curl's exit code and
# give a helpful diagnostic if it fails.
: > $WORKDIR/curl.result.out
: > $WORKDIR/curl.result.err
{ response=$(curl -X $method "${curl_args[@]}" \
-H "Content-type: $content_type" \
--dump-header $WORKDIR/curl.headers.out \
-v --stderr $WORKDIR/curl.result.err \
--write-out '%{http_code}^%{content_type}^%{time_total}' \
-o $WORKDIR/curl.result.out "$url"); rc=$?; } || :
if [ -n "$PODMAN_TESTS_DUMP_TRACES" ]; then
# Dump the results we got back, exactly as we got them back.
echo "\$ begin stdout" >>$LOG
test -s $WORKDIR/curl.result.out && od -t x1c $WORKDIR/curl.result.out 2>&1 >>$LOG
echo "\$ end stdout" >>$LOG
echo "\$ begin stderr" >>$LOG
test -s $WORKDIR/curl.result.err && cat $WORKDIR/curl.result.err >>$LOG
echo "\$ end stderr" >>$LOG
echo "\$ begin response code^content_type^time_total" >>$LOG
od -t x1c <<< "$response" >>$LOG
echo "\$ end response" >>$LOG
fi
# Special case: this means we *expect and want* a timeout
if [[ -n "$APIV2_TEST_EXPECT_TIMEOUT" ]]; then
# Hardcoded. See curl(1) for list of exit codes
if [[ $rc -eq 28 ]]; then
_show_ok 1 "$testname: curl timed out (expected)"
else
_show_ok 0 "$testname: expected curl to time out; it did not"
fi
return
fi
# Any error from curl is instant bad news, from which we can't recover
if [[ $rc -ne 0 ]]; then
die "curl failure ($rc) on $url - cannot continue. args=${curl_args[*]}"
fi
# Show returned headers (without trailing ^M or empty lines) in log file.
# Sometimes -- I can't remember why! -- we don't get headers.
if [[ -e $WORKDIR/curl.headers.out ]]; then
tr -d '\015' < $WORKDIR/curl.headers.out | grep -E '.' >>$LOG
fi
IFS='^' read actual_code content_type time_total <<<"$response"
printf "X-Response-Time: ${time_total}s\n\n" >>$LOG
# Log results, if text. If JSON, filter through jq for readability.
if [[ $content_type =~ /octet ]]; then
output="[$(file --brief $WORKDIR/curl.result.out)]"
echo "$output" >>$LOG
elif [[ -e $WORKDIR/curl.result.out ]]; then
# Output from /logs sometimes includes NULs. Strip them.
output=$(tr -d '\0' < $WORKDIR/curl.result.out)
if [[ $content_type =~ application/json ]] && [[ $method != "HEAD" ]]; then
jq . <<<"$output" >>$LOG
else
echo "$output" >>$LOG
fi
else
output=
echo "[no output]" >>$LOG
fi
# Test return code
is "$actual_code" "$expected_code" "$testname : status"
# Special case: 204/304, by definition, MUST NOT return content (rfc2616)
if [[ $expected_code = 204 || $expected_code = 304 ]]; then
if [ -n "$*" ]; then
die "Internal error: ${expected_code} status returns no output; fix your test."
fi
if [ -n "$output" ]; then
_show_ok 0 "$testname: ${expected_code} status returns no output" "''" "$output"
fi
return
fi
local i
# Special case: if response code does not match, dump the response body
# and skip all further subtests.
if [[ "$actual_code" != "$expected_code" ]]; then
echo -e "# response: $output"
for i; do
_show_ok skip "$testname: $i # skip - wrong return code"
done
return
fi
for i; do
if expr "$i" : '[^\!]\+\!=.\+' >/dev/null; then
# Disequality on json field
json_field=$(expr "$i" : '\([^!]*\)!')
expect_not=$(expr "$i" : '[^\!]*\!=\(.*\)')
actual=$(jq -r "$json_field" <<<"$output")
is_not "$actual" "$expect_not" "$testname : $json_field"
elif expr "$i" : "[^=~]\+=.*" >/dev/null; then
# Exact match on json field
json_field=$(expr "$i" : "\([^=]*\)=")
expect=$(expr "$i" : '[^=]*=\(.*\)')
actual=$(jq -r "$json_field" <<<"$output")
is "$actual" "$expect" "$testname : $json_field"
elif expr "$i" : "[^=~]\+~.*" >/dev/null; then
# regex match on json field
json_field=$(expr "$i" : "\([^~]*\)~")
expect=$(expr "$i" : '[^~]*~\(.*\)')
actual=$(jq -r "$json_field" <<<"$output")
like "$actual" "$expect" "$testname : $json_field"
else
# Direct string comparison
is "$output" "$i" "$testname : output"
fi
done
}
###################
# start_service # Run the socket listener
###################
service_pid=
function start_service() {
# If there's a listener on the port, nothing for us to do
{ exec 3<> /dev/tcp/$HOST/$PORT; } &>/dev/null && return
test -x $PODMAN_BIN || die "Not found: $PODMAN_BIN"
if [ "$HOST" != "localhost" ]; then
die "Cannot start service on non-localhost ($HOST)"
fi
# FIXME: EXPERIMENTAL: 2022-06-13: podman rootless needs a namespace. If
# system-service is the first podman command run (as is the case in CI)
# this will happen as a fork-exec, where the parent podman creates the
# namespace and the child is the server. Then, when stop_service() kills
# the parent, the child (server) happily stays alive and ruins subsequent
# tests that try to restart service with different settings.
# Workaround: run an unshare to get namespaces initialized.
if [[ $(id -u) != 0 ]]; then
$PODMAN_BIN unshare true
fi
CONTAINERS_REGISTRIES_CONF=$registries_conf_path \
$PODMAN_BIN \
--root $WORKDIR/server_root --syslog=true \
system service \
--time 0 \
tcp:127.0.0.1:$PORT \
&> $WORKDIR/server.log &
service_pid=$!
echo "# started service, pid $service_pid"
wait_for_port $HOST $PORT
}
function stop_service() {
# Stop the server
if [[ -n $service_pid ]]; then
kill $service_pid || :
wait $service_pid || :
echo "# stopped service, pid $service_pid"
fi
service_pid=
if { exec 3<> /dev/tcp/$HOST/$PORT; } &>/dev/null; then
echo "# WARNING: stop_service: Service still running on port $PORT"
fi
}
####################
# start_registry # Run a local registry
####################
REGISTRY_PORT=
REGISTRY_USERNAME=
REGISTRY_PASSWORD=
function start_registry() {
# We can be called multiple times, but each time should start a new
# registry container with (possibly) different configuration. That
# means that all callers must be responsible for invoking stop_registry.
if [[ -n "$REGISTRY_PORT" ]]; then
die "start_registry invoked twice in succession, without stop_registry"
fi
# First arg is auth type (default: "none", but can also be "htpasswd")
local auth="${1:-none}"
REGISTRY_PORT=$(random_port)
local REGDIR=$WORKDIR/registry
local AUTHDIR=$REGDIR/auth
mkdir -p $AUTHDIR
mkdir -p ${REGDIR}/{root,runroot}
local PODMAN_REGISTRY_ARGS="--root ${REGDIR}/root --runroot ${REGDIR}/runroot --tmpdir ${REGDIR}/tmp"
# Give it three tries, to compensate for network flakes
podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE ||
podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE ||
podman ${PODMAN_REGISTRY_ARGS} pull $REGISTRY_IMAGE
# Create a local cert (no need to do this more than once)
if [[ ! -e $AUTHDIR/domain.key ]]; then
# FIXME: is there a hidden "--quiet" flag? This is too noisy.
openssl req -newkey rsa:4096 -nodes -sha256 \
-keyout $AUTHDIR/domain.key -x509 -days 2 \
-out $AUTHDIR/domain.crt \
-subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=registry host certificate" \
-addext subjectAltName=DNS:localhost
fi
# If invoked with auth=htpasswd, create credentials
REGISTRY_USERNAME=
REGISTRY_PASSWORD=
declare -a registry_auth_params=(-e "REGISTRY_AUTH=$auth")
if [[ "$auth" = "htpasswd" ]]; then
REGISTRY_USERNAME=u$(random_string 7)
REGISTRY_PASSWORD=p$(random_string 7)
htpasswd -Bbn ${REGISTRY_USERNAME} ${REGISTRY_PASSWORD} \
> $AUTHDIR/htpasswd
registry_auth_params+=(
-e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm"
-e "REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd"
)
fi
# Run the registry, and wait for it to come up
podman ${PODMAN_REGISTRY_ARGS} run -d \
-p ${REGISTRY_PORT}:5000 \
--name registry \
-v $AUTHDIR:/auth:Z \
"${registry_auth_params[@]}" \
-e REGISTRY_HTTP_TLS_CERTIFICATE=/auth/domain.crt \
-e REGISTRY_HTTP_TLS_KEY=/auth/domain.key \
${REGISTRY_IMAGE}
wait_for_port localhost $REGISTRY_PORT 10
echo "# started registry (auth=$auth) on port $PORT"
}
function stop_registry() {
local REGDIR=${WORKDIR}/registry
if [[ -d $REGDIR ]]; then
local OPTS="--root ${REGDIR}/root --runroot ${REGDIR}/runroot"
podman $OPTS stop -i -t 0 registry
# rm/rmi are important when running rootless: without them we
# get EPERMS in tmpdir cleanup because files are owned by subuids.
podman $OPTS rm -f -i registry
if [[ "$1" = "--cleanup" ]]; then
podman $OPTS rmi -f -a
fi
echo "# stopped registry on port $PORT"
fi
REGISTRY_PORT=
REGISTRY_USERNAME=
REGISTRY_PASSWORD=
}
#################
# random_port # Random open port; arg is range (min-max), default 5001-5999
#################
function random_port() {
# Note port 5001 is chosen because 5000 is trusted per test/registries.conf.
# The tests try to push without SSL and that must fail so we cannot use that port.
local range=${1:-5001-5999}
local port
for port in $(shuf -i ${range}); do
if ! { exec 5<> /dev/tcp/127.0.0.1/$port; } &>/dev/null; then
echo $port
return
fi
done
die "Could not find open port in range $range"
}
###################
# random_string # Pseudorandom alphanumeric string of given length
###################
function random_string() {
local length=${1:-10}
head /dev/urandom | tr -dc a-zA-Z0-9 | head -c$length
}
###################
# wait_for_port # Returns once port is available on host
###################
function wait_for_port() {
local host=$1 # Probably "localhost"
local port=$2 # Numeric port
local _timeout=${3:-5} # Optional; default to 5 seconds
local path=/dev/tcp/$host/$port
# Wait
local i=$_timeout
while [ $i -gt 0 ]; do
{ exec 3<> /dev/tcp/$host/$port; } &>/dev/null && return
sleep 1
i=$(( $i - 1 ))
done
die "Timed out (${_timeout}s) waiting for service ($path)"
}
############
# podman # Needed by some test scripts to invoke the actual podman binary
############
function podman() {
echo "\$ $PODMAN_BIN $*" >>$WORKDIR/output.log
env CONTAINERS_REGISTRIES_CONF=$registries_conf_path \
$PODMAN_BIN --root $WORKDIR/server_root "$@" >>$WORKDIR/output.log 2>&1
}
####################
# root, rootless # Is server rootless?
####################
ROOTLESS=
function root() {
! rootless
}
function rootless() {
if [[ -z $ROOTLESS ]]; then
ROOTLESS=$(curl -s http://$HOST:$PORT/v1.40/info | jq .Rootless)
fi
test "$ROOTLESS" = "true"
}
# True if cgroups v2 are enabled
function have_cgroupsv2() {
cgroup_type=$(stat -f -c %T /sys/fs/cgroup)
test "$cgroup_type" = "cgroup2fs"
}
# END infrastructure code
###############################################################################
# BEGIN sanity checks
for tool in curl jq podman; do
type $tool &>/dev/null || die "$ME: Required tool '$tool' not found"
done
# END sanity checks
###############################################################################
# BEGIN entry handler (subtest invoker)
echo '============================= test session starts =============================='
echo "podman client -- $(curl --version)"
# Identify the tests to run. If called with args, use those as globs.
tests_to_run=()
if [ -n "$*" ]; then
shopt -s nullglob
for i; do
match=(${TESTS_DIR}/*${i}*.at)
if [ ${#match} -eq 0 ]; then
die "No match for $TESTS_DIR/*$i*.at"
fi
tests_to_run+=("${match[@]}")
done
shopt -u nullglob
else
tests_to_run=($TESTS_DIR/*.at)
fi
echo -e "collected ${#tests_to_run[@]} items\n"
start_service
for i in "${tests_to_run[@]}"; do
TEST_CONTEXT="[$(basename $i .at)]"
# Clear output from 'podman' helper
truncate --size=0 $WORKDIR/output.log
source $i
done
# END entry handler
###############################################################################
clean_up_server
test_count=$(<$testcounter_file)
failure_count=$(<$failures_file)
if [ -z "$PODMAN_TESTS_KEEP_WORKDIR" ]; then
rm -rf $WORKDIR
fi
echo "1..${test_count}"
exit $failure_count