Compare commits

..

No commits in common. "main" and "20230320t154110z-f37f36d12" have entirely different histories.

89 changed files with 1889 additions and 1729 deletions

View File

@ -6,6 +6,7 @@ load("cirrus", "fs")
def main():
return {
"env": {
"IMG_SFX": fs.read("IMG_SFX").strip()
"IMG_SFX": fs.read("IMG_SFX").strip(),
"IMPORT_IMG_SFX": fs.read("IMPORT_IMG_SFX").strip()
},
}

View File

@ -10,8 +10,6 @@ env:
CIRRUS_CLONE_DEPTH: 50
# Version of packer to use when building images
PACKER_VERSION: &PACKER_VERSION "1.8.3"
# Registry/namespace prefix where container images live
REGPFX: "quay.io/libpod"
#IMG_SFX = <See IMG_SFX file and .cirrus.star script>
#IMPORT_IMG_SFX = <See IMPORT_IMG_SFX file and .cirrus.star script>
@ -47,7 +45,7 @@ image_builder_task:
# Packer needs time to clean up partially created VM images
auto_cancellation: $CI != "true"
stateful: true
timeout_in: 50m
timeout_in: 40m
container:
dockerfile: "image_builder/Containerfile"
cpu: 2
@ -71,7 +69,7 @@ container_images_task: &container_images
skip: *ci_docs_tooling
depends_on:
- image_builder
timeout_in: &cntr_timeout 40m
timeout_in: 30m
gce_instance: &ibi_vm
image_project: "libpod-218412"
# Trust whatever was built most recently is functional
@ -83,7 +81,7 @@ container_images_task: &container_images
env:
TARGET_NAME: 'fedora_podman'
# Add a 'c' to the tag for consistency with VM Image names
DEST_FQIN: &fqin '${REGPFX}/${TARGET_NAME}:c$IMG_SFX'
DEST_FQIN: &fqin 'quay.io/libpod/${TARGET_NAME}:c$IMG_SFX'
- name: *name
env:
TARGET_NAME: 'prior-fedora_podman'
@ -99,59 +97,36 @@ container_images_task: &container_images
# TARGET_NAME: 'debian'
# DEST_FQIN: *fqin
env: &image_env
# For $REGPFX namespace, select FQINs only.
REG_USERNAME: ENCRYPTED[df4efe530b9a6a731cfea19233e395a5206d24dfac25e84329de035393d191e94ead8c39b373a0391fa025cab15470f8]
REG_PASSWORD: ENCRYPTED[255ec05057707c20237a6c7d15b213422779c534f74fe019b8ca565f635dba0e11035a034e533a6f39e146e7435d87b5]
# For quay.io/libpod namespace
REG_USERNAME: ENCRYPTED[de755aef351c501ee480231c24eae25b15e2b2a2b7c629f477c1d427fc5269e360bb358a53bd8914605bae588e99b52a]
REG_PASSWORD: ENCRYPTED[52268944bb0d6642c33efb1c5d7fb82d0c40f9e6988448de35827f9be2cc547c1383db13e8b21516dbd7a0a69a7ae536]
script: ci/make_container_images.sh;
package_cache: &package_cache
folder: "/var/tmp/automation_images_tmp/.cache/**"
folder: "/tmp/automation_images_tmp/.cache/**"
fingerprint_key: "${TARGET_NAME}-cache-version-1"
# Most other tooling images depend on this one, build it first so the others
# may build in parallel.
imgts_build_task:
alias: imgts_build
name: 'Build IMGTS image'
only_if: *is_pr
skip: &ci_docs $CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'
depends_on:
- image_builder
timeout_in: *cntr_timeout
gce_instance: *ibi_vm
env: *image_env
script: |
export TARGET_NAME=imgts
export DEST_FQIN="${REGPFX}/${TARGET_NAME}:c${IMG_SFX}";
ci/make_container_images.sh;
tooling_images_task:
alias: tooling_images
name: 'Build Tooling image ${TARGET_NAME}'
only_if: *is_pr
skip: *ci_docs
name: 'Build Tooling images'
only_if: $CIRRUS_CRON == ''
skip: &ci_docs $CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'
depends_on:
- imgts_build
timeout_in: *cntr_timeout
- validate
# TODO: This should not take this long, but it can :(
timeout_in: 40m
gce_instance: *ibi_vm
env: *image_env
matrix:
- env:
TARGET_NAME: imgobsolete
- env:
TARGET_NAME: imgprune
- env:
TARGET_NAME: gcsupld
- env:
TARGET_NAME: get_ci_vm
- env:
TARGET_NAME: orphanvms
- env:
TARGET_NAME: ccia
env:
<<: *image_env
TARGET_NAMES: imgts imgobsolete imgprune gcsupld get_ci_vm orphanvms ccia bench_stuff
PUSH_LATEST: 1 # scripts force to 0 if $CIRRUS_PR
script: |
export DEST_FQIN="${REGPFX}/${TARGET_NAME}:c${IMG_SFX}";
ci/make_container_images.sh;
for TARGET_NAME in $TARGET_NAMES; do
export TARGET_NAME
export DEST_FQIN="quay.io/libpod/${TARGET_NAME}:c${IMG_SFX}";
ci/make_container_images.sh;
done
base_images_task:
name: "Build VM Base-images"
@ -164,21 +139,20 @@ base_images_task:
# Packer needs time to clean up partially created VM images
auto_cancellation: $CI != "true"
stateful: true
timeout_in: 70m
gce_instance: *ibi_vm
timeout_in: 45m
# Cannot use a container for this task, virt required for fedora image conversion
gce_instance:
<<: *ibi_vm
# Nested-virt is required, need Intel Haswell or better CPU
enable_nested_virtualization: true
type: "n2-standard-2"
scopes: ["cloud-platform"]
matrix:
- &base_image
name: "${PACKER_BUILDS} Base Image"
gce_instance: &nested_virt_vm
<<: *ibi_vm
# Nested-virt is required, need Intel Haswell or better CPU
enable_nested_virtualization: true
type: "n2-standard-16"
scopes: ["cloud-platform"]
env:
PACKER_BUILDS: "fedora"
- <<: *base_image
gce_instance: *nested_virt_vm
env:
PACKER_BUILDS: "prior-fedora"
- <<: *base_image
@ -193,8 +167,6 @@ base_images_task:
env:
GAC_JSON: &gac_json ENCRYPTED[7fba7fb26ab568ae39f799ab58a476123206576b0135b3d1019117c6d682391370c801e149f29324ff4b50133012aed9]
AWS_INI: &aws_ini ENCRYPTED[4cd69097cd29a9899e51acf3bbacceeb83cb5c907d272ca1e2a8ccd515b03f2368a0680870c0d120fc32bc578bb0a930]
AWS_MAX_ATTEMPTS: 300
AWS_TIMEOUT_SECONDS: 3000
script: "ci/make.sh base_images"
manifest_artifacts:
path: base_images/manifest.json
@ -212,7 +184,7 @@ cache_images_task:
# Packer needs time to clean up partially created VM images
auto_cancellation: $CI != "true"
stateful: true
timeout_in: 90m
timeout_in: 45m
container:
dockerfile: "image_builder/Containerfile"
cpu: 2
@ -229,10 +201,10 @@ cache_images_task:
PACKER_BUILDS: "prior-fedora"
- <<: *cache_image
env:
PACKER_BUILDS: "rawhide"
PACKER_BUILDS: "fedora-netavark"
- <<: *cache_image
env:
PACKER_BUILDS: "fedora-netavark"
PACKER_BUILDS: "fedora-podman-py"
- <<: *cache_image
env:
PACKER_BUILDS: "fedora-aws"
@ -251,8 +223,6 @@ cache_images_task:
env:
GAC_JSON: *gac_json
AWS_INI: *aws_ini
AWS_MAX_ATTEMPTS: 300
AWS_TIMEOUT_SECONDS: 3000
script: "ci/make.sh cache_images"
manifest_artifacts:
path: cache_images/manifest.json
@ -271,6 +241,7 @@ win_images_task:
# Packer needs time to clean up partially created VM images
auto_cancellation: $CI != "true"
stateful: true
timeout_in: 45m
# Packer WinRM communicator is not reliable on container tasks
gce_instance:
<<: *ibi_vm
@ -283,39 +254,18 @@ win_images_task:
path: win_images/manifest.json
type: application/json
# These targets are intended for humans, make sure they builds and function on a basic level
test_debug_task:
name: "Test ${TARGET} make target"
alias: test_debug
only_if: *is_pr
skip: *ci_docs
depends_on:
- validate
gce_instance: *nested_virt_vm
matrix:
- env:
TARGET: ci_debug
- env:
TARGET: image_builder_debug
env:
HOME: "/root"
GAC_FILEPATH: "/dev/null"
AWS_SHARED_CREDENTIALS_FILE: "/dev/null"
DBG_TEST_CMD: "true"
script: make ${TARGET}
# Test metadata addition to images (built or not) to ensure container functions
# TODO: Requires manually examining the output log to confirm operation.
test_imgts_task: &imgts
name: "Test image timestamp/metadata updates"
alias: test_imgts
only_if: *is_pr
only_if: $CIRRUS_CRON == ''
skip: *ci_docs
depends_on: &imgts_deps
- base_images
- cache_images
- imgts_build
depends_on:
- tooling_images
container:
image: '${REGPFX}/imgts:c$IMG_SFX'
image: 'quay.io/libpod/imgts:c$IMG_SFX'
cpu: 2
memory: '2G'
env: &imgts_env
@ -337,14 +287,13 @@ test_imgts_task: &imgts
fedora-c${IMG_SFX}
prior-fedora-c${IMG_SFX}
fedora-netavark-c${IMG_SFX}
rawhide-c${IMG_SFX}
fedora-podman-py-c${IMG_SFX}
debian-c${IMG_SFX}
build-push-c${IMG_SFX}
EC2IMGNAMES: |
fedora-aws-i${IMPORT_IMG_SFX}
fedora-aws-b${IMG_SFX}
fedora-aws-c${IMG_SFX}
fedora-aws-arm64-i${IMPORT_IMG_SFX}
fedora-aws-arm64-b${IMG_SFX}
fedora-podman-aws-arm64-c${IMG_SFX}
fedora-netavark-aws-arm64-c${IMG_SFX}
@ -359,7 +308,9 @@ imgts_task:
alias: imgts
only_if: *is_pr
skip: *ci_docs_tooling
depends_on: *imgts_deps
depends_on:
- base_images
- cache_images
env:
<<: *imgts_env
DRY_RUN: 0
@ -376,13 +327,13 @@ imgts_task:
test_imgobsolete_task: &lifecycle_test
name: "Test obsolete image detection"
alias: test_imgobsolete
only_if: *is_pr
only_if: &only_prs $CIRRUS_PR != ''
skip: *ci_docs
depends_on:
- tooling_images
- imgts
container:
image: '${REGPFX}/imgobsolete:c$IMG_SFX'
image: 'quay.io/libpod/imgobsolete:c$IMG_SFX'
cpu: 2
memory: '2G'
env: &lifecycle_env
@ -401,8 +352,9 @@ test_orphanvms_task:
<<: *lifecycle_test
name: "Test orphan VMs detection"
alias: test_orphanvms
skip: *ci_docs
container:
image: '$REGPFX/orphanvms:c$IMG_SFX'
image: 'quay.io/libpod/orphanvms:c$IMG_SFX'
cpu: 2
memory: '2G'
env:
@ -411,7 +363,6 @@ test_orphanvms_task:
GCPPROJECT: 'libpod-218412'
GCPPROJECTS: 'libpod-218412' # value for testing, otherwise see gcpprojects.txt
AWSINI: ENCRYPTED[1ab89ff7bc1515dc964efe7ef6e094e01164ba8dd2e11c9a01259c6af3b3968ab841dbe473fe4ab5b573f2f5fa3653e8]
DRY_RUN: 1
EVERYTHING: 1 # Alter age-limit from 3-days -> 3 seconds for a test-run.
script: /usr/local/bin/entrypoint.sh
@ -420,23 +371,24 @@ test_imgprune_task:
<<: *lifecycle_test
name: "Test obsolete image removal"
alias: test_imgprune
depends_on:
- tooling_images
- imgts
container:
image: '$REGPFX/imgprune:c$IMG_SFX'
test_gcsupld_task:
name: "Test uploading to GCS"
alias: test_gcsupld
only_if: *is_pr
skip: *ci_docs
depends_on:
- tooling_images
- imgts
container:
image: '$REGPFX/gcsupld:c$IMG_SFX'
image: 'quay.io/libpod/imgprune:c$IMG_SFX'
test_gcsupld_task:
name: "Test uploading to GCS"
alias: test_gcsupld
only_if: *only_prs
skip: *ci_docs
depends_on:
- tooling_images
- imgts
container:
image: 'quay.io/libpod/gcsupld:c$IMG_SFX'
cpu: 2
memory: '2G'
env:
@ -449,13 +401,13 @@ test_gcsupld_task:
test_get_ci_vm_task:
name: "Test get_ci_vm entrypoint"
alias: test_get_ci_vm
only_if: *is_pr
only_if: *only_prs
skip: *ci_docs
depends_on:
- tooling_images
- imgts
container:
image: '$REGPFX/get_ci_vm:c$IMG_SFX'
image: 'quay.io/libpod/get_ci_vm:c$IMG_SFX'
cpu: 2
memory: '2G'
env:
@ -466,59 +418,55 @@ test_get_ci_vm_task:
test_ccia_task:
name: "Test ccia entrypoint"
alias: test_ccia
only_if: *is_pr
only_if: *only_prs
skip: *ci_docs
depends_on:
- tooling_images
container:
image: '$REGPFX/ccia:c$IMG_SFX'
image: 'quay.io/libpod/ccia:c$IMG_SFX'
cpu: 2
memory: '2G'
test_script: ./ccia/test.sh
test_bench_stuff_task:
name: "Test bench_stuff entrypoint"
alias: test_bench_stuff
only_if: *only_prs
skip: *ci_docs
depends_on:
- tooling_images
container:
image: 'quay.io/libpod/bench_stuff:c$IMG_SFX'
cpu: 2
memory: '2G'
test_script: ./bench_stuff/test.sh
test_build-push_task:
name: "Test build-push VM functions"
alias: test_build-push
only_if: |
$CIRRUS_PR != '' &&
$CIRRUS_PR_LABELS !=~ ".*no_build-push.*"
only_if: *only_prs
skip: *ci_docs_tooling
depends_on:
- cache_images
gce_instance:
image_project: "libpod-218412"
image_name: build-push-c${IMG_SFX}
image_family: 'build-push-cache'
zone: "us-central1-a"
disk: 200
# More muscle to emulate multi-arch
type: "n2-standard-4"
script: |
mkdir /tmp/context
echo -e "FROM scratch\nENV foo=bar\n" > /tmp/context/Containerfile
source /etc/automation_environment
A_DEBUG=1 build-push.sh --nopush --arches=amd64,arm64,s390x,ppc64le example.com/foo/bar /tmp/context
tag_latest_images_task:
alias: tag_latest_images
name: "Tag latest built container images."
only_if: |
$CIRRUS_CRON == '' &&
$CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH
skip: *ci_docs
gce_instance: *ibi_vm
env: *image_env
script: ci/tag_latest.sh
script: bash ./build-push/test.sh
# N/B: "latest" image produced after PR-merge (branch-push)
cron_imgobsolete_task: &lifecycle_cron
name: "Periodicly mark old images obsolete"
alias: cron_imgobsolete
only_if: $CIRRUS_CRON == 'lifecycle'
only_if: $CIRRUS_PR == '' && $CIRRUS_CRON != ''
container:
image: '$REGPFX/imgobsolete:latest'
image: 'quay.io/libpod/imgobsolete:latest'
cpu: 2
memory: '2G'
env:
@ -534,7 +482,7 @@ cron_imgprune_task:
depends_on:
- cron_imgobsolete
container:
image: '$REGPFX/imgprune:latest'
image: 'quay.io/libpod/imgprune:latest'
success_task:
@ -548,7 +496,6 @@ success_task:
- base_images
- cache_images
- win_images
- test_debug
- test_imgts
- imgts
- test_imgobsolete
@ -559,6 +506,7 @@ success_task:
- test_gcsupld
- test_get_ci_vm
- test_ccia
- test_bench_stuff
- test_build-push
container:
<<: *ci_container

View File

@ -1,2 +0,0 @@
IMGSFX,IMG-SFX->IMG_SFX
Dockerfile->Containerfile

View File

View File

@ -1,4 +0,0 @@
[codespell]
ignore-words = .codespellignore
dictionary = .codespelldict
quiet-level = 3

View File

@ -13,9 +13,9 @@ import sys
def msg(msg, newline=True):
"""Print msg to stderr with optional newline."""
nl = ""
nl = ''
if newline:
nl = "\n"
nl = '\n'
sys.stderr.write(f"{msg}{nl}")
sys.stderr.flush()
@ -23,13 +23,13 @@ def msg(msg, newline=True):
def stage_sort(item):
"""Return sorting-key for build-image-json item."""
if item["stage"] == "import":
return str("0010" + item["name"])
return str("0010"+item["name"])
elif item["stage"] == "base":
return str("0020" + item["name"])
return str("0020"+item["name"])
elif item["stage"] == "cache":
return str("0030" + item["name"])
return str("0030"+item["name"])
else:
return str("0100" + item["name"])
return str("0100"+item["name"])
if "GITHUB_ENV" not in os.environ:
@ -40,58 +40,46 @@ github_workspace = os.environ.get("GITHUB_WORKSPACE", ".")
# File written by a previous workflow step
with open(f"{github_workspace}/built_images.json") as bij:
msg(f"Reading image build data from {bij.name}:")
data = []
for build in json.load(bij): # list of build data maps
stage = build.get("stage", False)
name = build.get("name", False)
sfx = build.get("sfx", False)
task = build.get("task", False)
if bool(stage) and bool(name) and bool(sfx) and bool(task):
image_suffix = f"{stage[0]}{sfx}"
data.append(
dict(stage=stage, name=name, image_suffix=image_suffix, task=task)
)
if cirrus_ci_build_id is None:
cirrus_ci_build_id = sfx
msg(f"Including '{stage}' stage build '{name}' for task '{task}'.")
else:
msg(f"Skipping '{stage}' stage build '{name}' for task '{task}'.")
msg(f"Reading image build data from {bij.name}:")
data = []
for build in json.load(bij): # list of build data maps
stage = build.get("stage", False)
name = build.get("name", False)
sfx = build.get("sfx", False)
task = build.get("task", False)
if bool(stage) and bool(name) and bool(sfx) and bool(task):
image_suffix = f'{stage[0]}{sfx}'
data.append(dict(stage=stage, name=name,
image_suffix=image_suffix, task=task))
if cirrus_ci_build_id is None:
cirrus_ci_build_id = sfx
msg(f"Including '{stage}' stage build '{name}' for task '{task}'.")
else:
msg(f"Skipping '{stage}' stage build '{name}' for task '{task}'.")
url = "https://cirrus-ci.com/task"
url = 'https://cirrus-ci.com/task'
lines = []
data.sort(key=stage_sort)
for item in data:
image_suffix = item["image_suffix"]
# Base-images should never actually be used, but it may be helpful
# to have them in the list in case some debugging is needed.
if item["stage"] != "cache":
image_suffix = "do-not-use"
lines.append(
"|*{0}*|[{1}]({2})|`{3}`|\n".format(
item["stage"],
item["name"],
"{0}/{1}".format(url, item["task"]),
image_suffix,
)
)
lines.append('|*{0}*|[{1}]({2})|`{3}`|\n'.format(item['stage'],
item['name'], '{0}/{1}'.format(url, item['task']),
item['image_suffix']))
# This is the mechanism required to set an multi-line env. var.
# value to be consumed by future workflow steps.
with open(os.environ["GITHUB_ENV"], "a") as ghenv, open(
f"{github_workspace}/images.md", "w"
) as mdfile, open(f"{github_workspace}/images.json", "w") as images_json:
with open(os.environ["GITHUB_ENV"], "a") as ghenv, \
open(f'{github_workspace}/images.md', "w") as mdfile, \
open(f'{github_workspace}/images.json', "w") as images_json:
env_header = "IMAGE_TABLE<<EOF\n"
header = (
f"[Cirrus CI build](https://cirrus-ci.com/build/{cirrus_ci_build_id})"
" successful. [Found built image names and"
f' IDs](https://github.com/{os.environ["GITHUB_REPOSITORY"]}'
f'/actions/runs/{os.environ["GITHUB_RUN_ID"]}):\n'
"\n"
)
c_head = "|*Stage*|**Image Name**|`IMAGE_SUFFIX`|\n" "|---|---|---|\n"
env_header = ("IMAGE_TABLE<<EOF\n")
header = (f"[Cirrus CI build](https://cirrus-ci.com/build/{cirrus_ci_build_id})"
" successful. [Found built image names and"
f' IDs](https://github.com/{os.environ["GITHUB_REPOSITORY"]}'
f'/actions/runs/{os.environ["GITHUB_RUN_ID"]}):\n'
"\n")
c_head = ("|*Stage*|**Image Name**|`IMAGE_SUFFIX`|\n"
"|---|---|---|\n")
# Different output destinations get slightly different content
for dst in [ghenv, mdfile, sys.stderr]:
if dst == ghenv:
@ -104,7 +92,5 @@ with open(os.environ["GITHUB_ENV"], "a") as ghenv, open(
dst.write("EOF\n\n")
json.dump(data, images_json, indent=4, sort_keys=True)
msg(
f"Wrote github env file '{ghenv.name}', md-file '{mdfile.name}',"
f" and json-file '{images_json.name}'"
)
msg(f"Wrote github env file '{ghenv.name}', md-file '{mdfile.name}',"
f" and json-file '{images_json.name}'")

View File

@ -1,12 +1,20 @@
/*
Renovate is a service similar to GitHub Dependabot.
Renovate is a service similar to GitHub Dependabot, but with
(fantastically) more configuration options. So many options
in fact, if you're new I recommend glossing over this cheat-sheet
prior to the official documentation:
Please Manually validate any changes to this file with:
https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet
Configuration Update/Change Procedure:
1. Make changes
2. Manually validate changes (from repo-root):
podman run -it \
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
ghcr.io/renovatebot/renovate:latest \
docker.io/renovate/renovate:latest \
renovate-config-validator
3. Commit.
Configuration Reference:
https://docs.renovatebot.com/configuration-options/
@ -14,9 +22,11 @@
Monitoring Dashboard:
https://app.renovatebot.com/dashboard#github/containers
Note: The Renovate bot will create/manage its business on
branches named 'renovate/*'. The only copy of this
file that matters is the one on the `main` branch.
Note: The Renovate bot will create/manage it's business on
branches named 'renovate/*'. Otherwise, and by
default, the only the copy of this file that matters
is the one on the `main` branch. No other branches
will be monitored or touched in any way.
*/
{
@ -34,45 +44,12 @@
// This repo builds images, don't try to manage them.
"docker:disable"
],
/*************************************************
*** Repository-specific configuration options ***
*************************************************/
// Don't leave dep. update. PRs "hanging", assign them to people.
"assignees": ["cevich"],
// Don't build CI VM images for dep. update PRs (by default)
"commitMessagePrefix": "[CI:DOCS]",
"customManagers": [
// Manage updates to the common automation library version
{
"customType": "regex",
"fileMatch": "^lib.sh$",
"matchStrings": ["INSTALL_AUTOMATION_VERSION=\"(?<currentValue>.+)\""],
"depNameTemplate": "containers/automation",
"datasourceTemplate": "github-tags",
"versioningTemplate": "semver-coerced",
// "v" included in tag, but should not be used in lib.sh
"extractVersionTemplate": "^v(?<version>.+)$"
}
],
// N/B: LAST MATCHING RULE WINS, match statems are ANDed together.
"packageRules": [
// When automation library version updated, full CI VM image build
// is needed, along with some other overrides not required in
// (for example) github-action updates.
{
"matchManagers": ["custom.regex"],
"matchFileNames": ["lib.sh"],
"schedule": ["at any time"],
"commitMessagePrefix": null,
"draftPR": true,
"prBodyNotes": [
"\
{{#if isMajor}}\
:warning: Changes are **likely** required for build-scripts and/or downstream CI VM \
image users. Please check very carefully. :warning:\
{{else}}\
:warning: Changes may be required for build-scripts and/or downstream CI VM \
image users. Please double-check. :warning:\
{{/if}}"
]
}
]
// Don't build CI VM images for dep. update PRs
commitMessagePrefix: "[CI:DOCS]",
}

View File

@ -13,10 +13,5 @@ on:
jobs:
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
call_cron_failures:
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
secrets:
SECRET_CIRRUS_API_KEY: ${{secrets.SECRET_CIRRUS_API_KEY}}
ACTION_MAIL_SERVER: ${{secrets.ACTION_MAIL_SERVER}}
ACTION_MAIL_USERNAME: ${{secrets.ACTION_MAIL_USERNAME}}
ACTION_MAIL_PASSWORD: ${{secrets.ACTION_MAIL_PASSWORD}}
ACTION_MAIL_SENDER: ${{secrets.ACTION_MAIL_SENDER}}
uses: containers/buildah/.github/workflows/check_cirrus_cron.yml@main
secrets: inherit

View File

@ -25,12 +25,12 @@ jobs:
orphan_vms:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
persist-credentials: false
# Avoid duplicating cron-fail_addrs.csv
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
repository: containers/podman
path: '_podman'
@ -44,14 +44,14 @@ jobs:
GCPPROJECT: 'libpod-218412'
run: |
export GCPNAME GCPJSON AWSINI GCPPROJECT
export GCPPROJECTS=$(grep -E -vx '^#+.*$' $GITHUB_WORKSPACE/gcpprojects.txt | tr -s '[:space:]' ' ')
export GCPPROJECTS=$(egrep -vx '^#+.*$' $GITHUB_WORKSPACE/gcpprojects.txt | tr -s '[:space:]' ' ')
podman run --rm \
-e GCPNAME -e GCPJSON -e AWSINI -e GCPPROJECT -e GCPPROJECTS \
quay.io/libpod/orphanvms:latest \
> /tmp/orphanvms_output.txt
- if: always()
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v3
with:
name: orphanvms_output
path: /tmp/orphanvms_output.txt
@ -59,7 +59,7 @@ jobs:
- name: Count number of orphaned VMs
id: orphans
run: |
count=$(grep -E -x '\* VM .+' /tmp/orphanvms_output.txt | wc -l)
count=$(egrep -x '\* VM .+' /tmp/orphanvms_output.txt | wc -l)
# Assist with debugging job (step-outputs are otherwise hidden)
printf "Orphan VMs count:%d\n" $count
if [[ "$count" =~ ^[0-9]+$ ]]; then
@ -86,20 +86,20 @@ jobs:
- if: steps.orphans.outputs.count > 0
name: Send orphan notification e-mail
# Ref: https://github.com/dawidd6/action-send-mail
uses: dawidd6/action-send-mail@v3.12.0
uses: dawidd6/action-send-mail@v3.7.1
with:
server_address: ${{ secrets.ACTION_MAIL_SERVER }}
server_port: 465
username: ${{ secrets.ACTION_MAIL_USERNAME }}
password: ${{ secrets.ACTION_MAIL_PASSWORD }}
subject: Orphaned CI VMs detected
subject: Orphaned GCP VMs
to: ${{env.RCPTCSV}}
from: ${{ secrets.ACTION_MAIL_SENDER }}
body: file:///tmp/email_body.txt
- if: failure()
name: Send error notification e-mail
uses: dawidd6/action-send-mail@v3.12.0
uses: dawidd6/action-send-mail@v3.7.1
with:
server_address: ${{secrets.ACTION_MAIL_SERVER}}
server_port: 465
@ -108,4 +108,4 @@ jobs:
subject: Github workflow error on ${{github.repository}}
to: ${{env.RCPTCSV}}
from: ${{secrets.ACTION_MAIL_SENDER}}
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"
body: "Job failed: https://github.com/${{github.repository}}/runs/${{github.job}}?check_suite_focus=true"

View File

@ -58,7 +58,7 @@ jobs:
fi
- if: steps.retro.outputs.is_pr == 'true'
uses: actions/checkout@v4
uses: actions/checkout@v3
with:
persist-credentials: false
@ -132,10 +132,12 @@ jobs:
- if: steps.manifests.outputs.count > 0
name: Post PR comment with image name/id table
uses: thollander/actions-comment-pull-request@v3
uses: jungwinter/comment@v1.1.0
with:
pr-number: '${{ steps.retro.outputs.prn }}'
message: |
issue_number: '${{ steps.retro.outputs.prn }}'
type: 'create'
token: '${{ secrets.GITHUB_TOKEN }}'
body: |
${{ env.IMAGE_TABLE }}
# Ref: https://github.com/marketplace/actions/deploy-to-gist

1
.gitignore vendored
View File

@ -1,3 +1,2 @@
*/*.json
/.cache
.pre-commit-config.yaml

View File

@ -1,20 +0,0 @@
---
# Ref: https://pre-commit.com/#creating-new-hooks
- id: check-imgsfx
name: Check IMG_SFX for accidental reuse.
description: |
Every PR intended to produce CI VM or container images must update
the `IMG_SFX` file via `make IMG_SFX`. The exact value will be
validated against global suffix usage (encoded as tags on the
`imgts` container image). This pre-commit hook verifies on every
push, the IMG_SFX file's value has not been pushed previously.
It's intended as a simple/imperfect way to save developers time
by avoiding force-pushes that will most certainly fail validation.
entry: ./check-imgsfx.sh
language: system
exclude: '.*' # Not examining any specific file/dir/link
always_run: true # ignore no matching files
fail_fast: true
pass_filenames: false
stages: ["pre-push"]

View File

@ -1 +1 @@
20250812t173301z-f42f41d13
20230320t154110z-f37f36d12

1
IMPORT_IMG_SFX Normal file
View File

@ -0,0 +1 @@
20230302t154757z-f37f36d12

270
Makefile
View File

@ -1,7 +1,4 @@
# Default is sh, which has scripting limitations
SHELL := $(shell command -v bash;)
##### Functions #####
# Evaluates to $(1) if $(1) non-empty, otherwise evaluates to $(2)
@ -18,20 +15,18 @@ if_ci_else = $(if $(findstring true,$(CI)),$(1),$(2))
##### Important image release and source details #####
export CENTOS_STREAM_RELEASE = 9
export CENTOS_STREAM_RELEASE = 8
# Warning: Beta Fedora releases are not supported. Verifiy EC2 AMI availability
# here: https://fedoraproject.org/cloud/download
export FEDORA_RELEASE = 42
export PRIOR_FEDORA_RELEASE = 41
export FEDORA_RELEASE = 37
export PRIOR_FEDORA_RELEASE = 36
# This should always be one-greater than $FEDORA_RELEASE (assuming it's actually the latest)
export RAWHIDE_RELEASE = 43
# See import_images/README.md
export FEDORA_IMPORT_IMG_SFX = $(_IMPORT_IMG_SFX)
# Automation assumes the actual release number (after SID upgrade)
# is always one-greater than the latest DEBIAN_BASE_FAMILY (GCE image).
export DEBIAN_RELEASE = 13
export DEBIAN_BASE_FAMILY = debian-12
export DEBIAN_RELEASE = 12
export DEBIAN_BASE_FAMILY = debian-11
IMPORT_FORMAT = vhdx
@ -103,6 +98,7 @@ override _HLPFMT = "%-20s %s\n"
# Suffix value for any images built from this make execution
_IMG_SFX ?= $(file <IMG_SFX)
_IMPORT_IMG_SFX ?= $(file <IMPORT_IMG_SFX)
# Env. vars needed by packer
export CHECKPOINT_DISABLE = 1 # Disable hashicorp phone-home
@ -111,12 +107,6 @@ export PACKER_CACHE_DIR = $(call err_if_empty,_TEMPDIR)
# AWS CLI default, in case caller needs to override
export AWS := aws --output json --region us-east-1
# Needed for container-image builds
GIT_HEAD = $(shell git rev-parse HEAD)
# Save some typing
_IMGTS_FQIN := quay.io/libpod/imgts:c$(_IMG_SFX)
##### Targets #####
# N/B: The double-# after targets is gawk'd out as the target description
@ -131,39 +121,16 @@ help: ## Default target, parses special in-line comments as documentation.
# There are length/character limitations (a-z, 0-9, -) in GCE for image
# names and a max-length of 63.
.PHONY: IMG_SFX
IMG_SFX: timebomb-check ## Generate a new date-based image suffix, store in the file IMG_SFX
@echo "$$(date -u +%Y%m%dt%H%M%Sz)-f$(FEDORA_RELEASE)f$(PRIOR_FEDORA_RELEASE)d$(subst .,,$(DEBIAN_RELEASE))" > "$@"
@cat IMG_SFX
IMG_SFX: ## Generate a new date-based image suffix, store in the file IMG_SFX
$(file >$@,$(shell date --utc +%Y%m%dt%H%M%Sz)-f$(FEDORA_RELEASE)f$(PRIOR_FEDORA_RELEASE)d$(subst .,,$(DEBIAN_RELEASE)))
@echo "$(file <IMG_SFX)"
# Prevent us from wasting CI time when we have expired timebombs
.PHONY: timebomb-check
timebomb-check:
@now=$$(date -u +%Y%m%d); \
found=; \
while read -r bomb; do \
when=$$(echo "$$bomb" | sed -E -e 's/^.*timebomb ([0-9]+).*/\1/'); \
if [ "$$when" -le "$$now" ]; then \
echo "$$bomb"; \
found=found; \
fi; \
done < <(git grep --line-number '^[ ]*timebomb '); \
if [[ -n "$$found" ]]; then \
echo ""; \
echo "****** FATAL: Please check/fix expired timebomb(s) ^^^^^^"; \
false; \
fi
IMPORT_IMG_SFX: ## Generate a new date-based import-image suffix, store in the file IMPORT_IMG_SFX
$(file >$@,$(shell date --utc +%Y%m%dt%H%M%Sz)-f$(FEDORA_RELEASE)f$(PRIOR_FEDORA_RELEASE)d$(subst .,,$(DEBIAN_RELEASE)))
@echo "$(file <IMPORT_IMG_SFX)"
# Given the path to a file containing 'sha256:<image id>' return <image id>
# or throw error if empty.
define imageid
$(if $(file < $(1)),$(subst sha256:,,$(file < $(1))),$(error Container IID file $(1) doesn't exist or is empty))
endef
# This is intended for use by humans, to debug the image_builder_task in .cirrus.yml
# as well as the scripts under the ci subdirectory. See the 'image_builder_debug`
# target if debugging of the packer builds is necessary.
.PHONY: ci_debug
ci_debug: $(_TEMPDIR)/ci_debug.iid ## Build and enter container for local development/debugging of container-based Cirrus-CI tasks
ci_debug: $(_TEMPDIR)/ci_debug.tar ## Build and enter container for local development/debugging of container-based Cirrus-CI tasks
/usr/bin/podman run -it --rm \
--security-opt label=disable \
-v $(_MKFILE_DIR):$(_MKFILE_DIR) -w $(_MKFILE_DIR) \
@ -175,18 +142,19 @@ ci_debug: $(_TEMPDIR)/ci_debug.iid ## Build and enter container for local develo
-e GAC_FILEPATH=$(GAC_FILEPATH) \
-e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \
-e TEMPDIR=$(_TEMPDIR) \
$(call imageid,$<) $(if $(DBG_TEST_CMD),$(DBG_TEST_CMD),)
docker-archive:$<
# Takes 3 arguments: IID filepath, FQIN, context dir
# Takes 4 arguments: export filepath, FQIN, context dir
define podman_build
podman build -t $(2) \
--iidfile=$(1) \
--build-arg CENTOS_STREAM_RELEASE=$(CENTOS_STREAM_RELEASE) \
--build-arg PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \
-f $(3)/Containerfile .
rm -f $(1)
podman save --quiet -o $(1) $(2)
endef
$(_TEMPDIR)/ci_debug.iid: $(_TEMPDIR) $(wildcard ci/*)
$(_TEMPDIR)/ci_debug.tar: $(_TEMPDIR) $(wildcard ci/*)
$(call podman_build,$@,ci_debug,ci)
$(_TEMPDIR):
@ -229,7 +197,7 @@ $(_TEMPDIR)/user-data: $(_TEMPDIR) $(_TEMPDIR)/cidata.ssh.pub $(_TEMPDIR)/cidata
cidata: $(_TEMPDIR)/user-data $(_TEMPDIR)/meta-data
define build_podman_container
$(MAKE) $(_TEMPDIR)/$(1).iid BASE_TAG=$(2)
$(MAKE) $(_TEMPDIR)/$(1).tar BASE_TAG=$(2)
endef
# First argument is the path to the template JSON
@ -257,17 +225,14 @@ image_builder: image_builder/manifest.json ## Create image-building image and im
image_builder/manifest.json: image_builder/gce.json image_builder/setup.sh lib.sh systemd_banish.sh $(PACKER_INSTALL_DIR)/packer
$(call packer_build,image_builder/gce.json)
# Note: It's assumed there are important files in the callers $HOME
# needed for debugging (.gitconfig, .ssh keys, etc.). It's unsafe
# to assume $(_MKFILE_DIR) is also under $HOME. Both are mounted
# for good measure.
# Note: We assume this repo is checked out somewhere under the caller's
# home-dir for bind-mounting purposes. Otherwise possibly necessary
# files/directories like $HOME/.gitconfig or $HOME/.ssh/ won't be available
# from inside the debugging container.
.PHONY: image_builder_debug
image_builder_debug: $(_TEMPDIR)/image_builder_debug.iid ## Build and enter container for local development/debugging of targets requiring packer + virtualization
image_builder_debug: $(_TEMPDIR)/image_builder_debug.tar ## Build and enter container for local development/debugging of targets requiring packer + virtualization
/usr/bin/podman run -it --rm \
--security-opt label=disable \
-v $$HOME:$$HOME \
-v $(_MKFILE_DIR):$(_MKFILE_DIR) \
-w $(_MKFILE_DIR) \
--security-opt label=disable -v $$HOME:$$HOME -w $(_MKFILE_DIR) \
-v $(_TEMPDIR):$(_TEMPDIR) \
-v $(call err_if_empty,GAC_FILEPATH):$(GAC_FILEPATH) \
-v $(call err_if_empty,AWS_SHARED_CREDENTIALS_FILE):$(AWS_SHARED_CREDENTIALS_FILE) \
@ -275,13 +240,119 @@ image_builder_debug: $(_TEMPDIR)/image_builder_debug.iid ## Build and enter cont
-e PACKER_INSTALL_DIR=/usr/local/bin \
-e PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \
-e IMG_SFX=$(call err_if_empty,_IMG_SFX) \
-e IMPORT_IMG_SFX=$(call err_if_empty,_IMPORT_IMG_SFX) \
-e GAC_FILEPATH=$(GAC_FILEPATH) \
-e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \
$(call imageid,$<) $(if $(DBG_TEST_CMD),$(DBG_TEST_CMD))
docker-archive:$<
$(_TEMPDIR)/image_builder_debug.iid: $(_TEMPDIR) $(wildcard image_builder/*)
$(_TEMPDIR)/image_builder_debug.tar: $(_TEMPDIR) $(wildcard image_builder/*)
$(call podman_build,$@,image_builder_debug,image_builder)
# Avoid re-downloading unnecessarily
# Ref: https://www.gnu.org/software/make/manual/html_node/Special-Targets.html#Special-Targets
.PRECIOUS: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).$(IMPORT_FORMAT)
$(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).$(IMPORT_FORMAT): $(_TEMPDIR)
bash import_images/handle_image.sh \
$@ \
$(call err_if_empty,FEDORA_IMAGE_URL) \
$(call err_if_empty,FEDORA_CSUM_URL)
$(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).$(IMPORT_FORMAT): $(_TEMPDIR)
bash import_images/handle_image.sh \
$@ \
$(call err_if_empty,FEDORA_ARM64_IMAGE_URL) \
$(call err_if_empty,FEDORA_ARM64_CSUM_URL)
$(_TEMPDIR)/%.md5: $(_TEMPDIR)/%.$(IMPORT_FORMAT)
openssl md5 -binary $< | base64 > $@.tmp
mv $@.tmp $@
# MD5 metadata value checked by AWS after upload + 5 retries.
# Cache disabled to avoid sync. issues w/ vmimport service if
# image re-uploaded.
# TODO: Use sha256 from ..._CSUM_URL file instead of recalculating
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
# Avoid re-uploading unnecessarily
.SECONDARY: $(_TEMPDIR)/%.uploaded
$(_TEMPDIR)/%.uploaded: $(_TEMPDIR)/%.$(IMPORT_FORMAT) $(_TEMPDIR)/%.md5
-$(AWS) s3 rm --quiet s3://packer-image-import/%.$(IMPORT_FORMAT)
$(AWS) s3api put-object \
--content-md5 "$(file < $(_TEMPDIR)/$*.md5)" \
--content-encoding binary/octet-stream \
--cache-control no-cache \
--bucket packer-image-import \
--key $*.$(IMPORT_FORMAT) \
--body $(_TEMPDIR)/$*.$(IMPORT_FORMAT) > $@.tmp
mv $@.tmp $@
# For whatever reason, the 'Format' value must be all upper-case.
# Avoid creating unnecessary/duplicate import tasks
.SECONDARY: $(_TEMPDIR)/%.import_task_id
$(_TEMPDIR)/%.import_task_id: $(_TEMPDIR)/%.uploaded
$(AWS) ec2 import-snapshot \
--disk-container Format=$(shell tr '[:lower:]' '[:upper:]'<<<"$(IMPORT_FORMAT)"),UserBucket="{S3Bucket=packer-image-import,S3Key=$*.$(IMPORT_FORMAT)}" > $@.tmp.json
@cat $@.tmp.json
jq -r -e .ImportTaskId $@.tmp.json > $@.tmp
mv $@.tmp $@
# Avoid importing multiple snapshots for the same image
.PRECIOUS: $(_TEMPDIR)/%.snapshot_id
$(_TEMPDIR)/%.snapshot_id: $(_TEMPDIR)/%.import_task_id
bash import_images/wait_import_task.sh "$<" > $@.tmp
mv $@.tmp $@
define _register_sed
sed -r \
-e 's/@@@NAME@@@/$(1)/' \
-e 's/@@@IMPORT_IMG_SFX@@@/$(_IMPORT_IMG_SFX)/' \
-e 's/@@@ARCH@@@/$(2)/' \
-e 's/@@@SNAPSHOT_ID@@@/$(3)/' \
import_images/register.json.in \
> $(4)
endef
$(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).register.json: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).snapshot_id import_images/register.json.in
$(call _register_sed,fedora-aws,x86_64,$(file <$<),$@)
$(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).register.json: $(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).snapshot_id import_images/register.json.in
$(call _register_sed,fedora-aws-arm64,arm64,$(file <$<),$@)
# Avoid multiple registrations for the same image
.PRECIOUS: $(_TEMPDIR)/%.ami.id
$(_TEMPDIR)/%.ami.id: $(_TEMPDIR)/%.register.json
$(AWS) ec2 register-image --cli-input-json "$$(<$<)" > $@.tmp.json
cat $@.tmp.json
jq -r -e .ImageId $@.tmp.json > $@.tmp
mv $@.tmp $@
$(_TEMPDIR)/%.ami.name: $(_TEMPDIR)/%.register.json
jq -r -e .Name $< > $@.tmp
mv $@.tmp $@
$(_TEMPDIR)/%.ami.json: $(_TEMPDIR)/%.ami.id $(_TEMPDIR)/%.ami.name
$(AWS) ec2 create-tags \
--resources "$$(<$(_TEMPDIR)/$*.ami.id)" \
--tags \
Key=Name,Value=$$(<$(_TEMPDIR)/$*.ami.name) \
Key=automation,Value=false
$(AWS) --output table ec2 describe-images --image-ids "$$(<$(_TEMPDIR)/$*.ami.id)" \
| tee $@
.PHONY: import_images
import_images: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).ami.json $(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).ami.json import_images/manifest.json.in ## Import generic Fedora cloud images into AWS EC2.
sed -r \
-e 's/@@@IMG_SFX@@@/$(_IMPORT_IMG_SFX)/' \
-e 's/@@@CIRRUS_TASK_ID@@@/$(CIRRUS_TASK_ID)/' \
import_images/manifest.json.in \
> import_images/manifest.json
@echo "Image import(s) successful."
@echo "############################################################"
@echo "Please update IMPORT_IMG_SFX file with value:"
@echo ""
@echo "$(_IMPORT_IMG_SFX)"
@echo ""
@echo "############################################################"
.PHONY: base_images
# This needs to run in a virt/nested-virt capable environment
base_images: base_images/manifest.json ## Create, prepare, and import base-level images into GCE.
@ -308,80 +379,77 @@ fedora_podman: ## Build Fedora podman development container
prior-fedora_podman: ## Build Prior-Fedora podman development container
$(call build_podman_container,$@,$(PRIOR_FEDORA_RELEASE))
$(_TEMPDIR)/%_podman.iid: podman/Containerfile podman/setup.sh $(wildcard base_images/*.sh) $(_TEMPDIR) $(wildcard cache_images/*.sh)
$(_TEMPDIR)/%_podman.tar: podman/Containerfile podman/setup.sh $(wildcard base_images/*.sh) $(_TEMPDIR) $(wildcard cache_images/*.sh)
podman build -t $*_podman:$(call err_if_empty,_IMG_SFX) \
--security-opt seccomp=unconfined \
--iidfile=$@ \
--build-arg=BASE_NAME=$(subst prior-,,$*) \
--build-arg=BASE_TAG=$(call err_if_empty,BASE_TAG) \
--build-arg=PACKER_BUILD_NAME=$(subst _podman,,$*) \
--build-arg=IMG_SFX=$(_IMG_SFX) \
--build-arg=CIRRUS_TASK_ID=$(CIRRUS_TASK_ID) \
--build-arg=GIT_HEAD=$(call err_if_empty,GIT_HEAD) \
-f podman/Containerfile .
rm -f $@
podman save --quiet -o $@ $*_podman:$(_IMG_SFX)
.PHONY: skopeo_cidev
skopeo_cidev: $(_TEMPDIR)/skopeo_cidev.iid ## Build Skopeo development and CI container
$(_TEMPDIR)/skopeo_cidev.iid: $(_TEMPDIR) $(wildcard skopeo_base/*)
skopeo_cidev: $(_TEMPDIR)/skopeo_cidev.tar ## Build Skopeo development and CI container
$(_TEMPDIR)/skopeo_cidev.tar: $(_TEMPDIR) $(wildcard skopeo_base/*)
podman build -t skopeo_cidev:$(call err_if_empty,_IMG_SFX) \
--iidfile=$@ \
--security-opt seccomp=unconfined \
--build-arg=BASE_TAG=$(FEDORA_RELEASE) \
skopeo_cidev
rm -f $@
podman save --quiet -o $@ skopeo_cidev:$(_IMG_SFX)
.PHONY: ccia
ccia: $(_TEMPDIR)/ccia.iid ## Build the Cirrus-CI Artifacts container image
$(_TEMPDIR)/ccia.iid: ccia/Containerfile $(_TEMPDIR)
ccia: $(_TEMPDIR)/ccia.tar ## Build the Cirrus-CI Artifacts container image
$(_TEMPDIR)/ccia.tar: ccia/Containerfile $(_TEMPDIR)
$(call podman_build,$@,ccia:$(call err_if_empty,_IMG_SFX),ccia)
# Note: This target only builds imgts:c$(_IMG_SFX) it does not push it to
# any container registry which may be required for targets which
# depend on it as a base-image. In CI, pushing is handled automatically
# by the 'ci/make_container_images.sh' script.
.PHONY: imgts
imgts: imgts/Containerfile imgts/entrypoint.sh imgts/google-cloud-sdk.repo imgts/lib_entrypoint.sh $(_TEMPDIR) ## Build the VM image time-stamping container image
$(call podman_build,/dev/null,imgts:$(call err_if_empty,_IMG_SFX),imgts)
-rm $(_TEMPDIR)/$@.iid
.PHONY: bench_stuff
bench_stuff: $(_TEMPDIR)/bench_stuff.tar ## Build the Cirrus-CI Artifacts container image
$(_TEMPDIR)/bench_stuff.tar: bench_stuff/Containerfile $(_TEMPDIR)
$(call podman_build,$@,bench_stuff:$(call err_if_empty,_IMG_SFX),bench_stuff)
.PHONY: imgts
imgts: $(_TEMPDIR)/imgts.tar ## Build the VM image time-stamping container image
$(_TEMPDIR)/imgts.tar: imgts/Containerfile imgts/entrypoint.sh imgts/google-cloud-sdk.repo imgts/lib_entrypoint.sh $(_TEMPDIR)
$(call podman_build,$@,imgts:$(call err_if_empty,_IMG_SFX),imgts)
# Helper function to build images which depend on imgts:latest base image
# N/B: There is no make dependency resolution on imgts.iid on purpose,
# imgts:c$(_IMG_SFX) is assumed to have already been pushed to quay.
# See imgts target above.
define imgts_base_podman_build
podman image exists $(_IMGTS_FQIN) || podman pull $(_IMGTS_FQIN)
podman image exists imgts:latest || podman tag $(_IMGTS_FQIN) imgts:latest
podman load -i $(_TEMPDIR)/imgts.tar
podman tag imgts:$(call err_if_empty,_IMG_SFX) imgts:latest
$(call podman_build,$@,$(1):$(call err_if_empty,_IMG_SFX),$(1))
endef
.PHONY: imgobsolete
imgobsolete: $(_TEMPDIR)/imgobsolete.iid ## Build the VM Image obsoleting container image
$(_TEMPDIR)/imgobsolete.iid: imgts/lib_entrypoint.sh imgobsolete/Containerfile imgobsolete/entrypoint.sh $(_TEMPDIR)
imgobsolete: $(_TEMPDIR)/imgobsolete.tar ## Build the VM Image obsoleting container image
$(_TEMPDIR)/imgobsolete.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh imgobsolete/Containerfile imgobsolete/entrypoint.sh $(_TEMPDIR)
$(call imgts_base_podman_build,imgobsolete)
.PHONY: imgprune
imgprune: $(_TEMPDIR)/imgprune.iid ## Build the VM Image pruning container image
$(_TEMPDIR)/imgprune.iid: imgts/lib_entrypoint.sh imgprune/Containerfile imgprune/entrypoint.sh $(_TEMPDIR)
imgprune: $(_TEMPDIR)/imgprune.tar ## Build the VM Image pruning container image
$(_TEMPDIR)/imgprune.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh imgprune/Containerfile imgprune/entrypoint.sh $(_TEMPDIR)
$(call imgts_base_podman_build,imgprune)
.PHONY: gcsupld
gcsupld: $(_TEMPDIR)/gcsupld.iid ## Build the GCS Upload container image
$(_TEMPDIR)/gcsupld.iid: imgts/lib_entrypoint.sh gcsupld/Containerfile gcsupld/entrypoint.sh $(_TEMPDIR)
gcsupld: $(_TEMPDIR)/gcsupld.tar ## Build the GCS Upload container image
$(_TEMPDIR)/gcsupld.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh gcsupld/Containerfile gcsupld/entrypoint.sh $(_TEMPDIR)
$(call imgts_base_podman_build,gcsupld)
.PHONY: orphanvms
orphanvms: $(_TEMPDIR)/orphanvms.iid ## Build the Orphaned VM container image
$(_TEMPDIR)/orphanvms.iid: imgts/lib_entrypoint.sh orphanvms/Containerfile orphanvms/entrypoint.sh orphanvms/_gce orphanvms/_ec2 $(_TEMPDIR)
orphanvms: $(_TEMPDIR)/orphanvms.tar ## Build the Orphaned VM container image
$(_TEMPDIR)/orphanvms.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh orphanvms/Containerfile orphanvms/entrypoint.sh orphanvms/_gce orphanvms/_ec2 $(_TEMPDIR)
$(call imgts_base_podman_build,orphanvms)
.PHONY: .get_ci_vm
get_ci_vm: $(_TEMPDIR)/get_ci_vm.iid ## Build the get_ci_vm container image
$(_TEMPDIR)/get_ci_vm.iid: lib.sh get_ci_vm/Containerfile get_ci_vm/entrypoint.sh get_ci_vm/setup.sh $(_TEMPDIR)
podman build --iidfile=$@ -t get_ci_vm:$(call err_if_empty,_IMG_SFX) -f get_ci_vm/Containerfile ./
get_ci_vm: $(_TEMPDIR)/get_ci_vm.tar ## Build the get_ci_vm container image
$(_TEMPDIR)/get_ci_vm.tar: lib.sh get_ci_vm/Containerfile get_ci_vm/entrypoint.sh get_ci_vm/setup.sh $(_TEMPDIR)
podman build -t get_ci_vm:$(call err_if_empty,_IMG_SFX) -f get_ci_vm/Containerfile .
rm -f $@
podman save --quiet -o $@ get_ci_vm:$(_IMG_SFX)
.PHONY: clean
clean: ## Remove all generated files referenced in this Makefile
-rm -rf $(_TEMPDIR)
-rm -f image_builder/*.json
-rm -f *_images/{*.json,cidata*,*-data}
-podman rmi imgts:latest
-podman rmi $(_IMGTS_FQIN)
-rm -f ci_debug.tar

View File

@ -1,108 +0,0 @@
The README here is waaaaaay too complicated for Ed. So here is a
simplified version of the typical things you need to do.
Super Duper Simplest Case
=========================
This is by far the most common case, and the simplest to understand.
You do this when you want to build VMs with newer package versions than
whatever VMs are currently set up in CI. You really need to
understand this before you get into anything more complicated.
```
$ git checkout -b lets-see-what-happens
$ make IMG_SFX
$ git commit -asm"Let's just see what happens"
```
...and push that as a PR.
If you're lucky, in about an hour you will get an email from `github-actions[bot]`
with a nice table of base and cache images, with links. I strongly encourage you
to try to get Ed's
[cirrus-vm-get-versions](https://github.com/edsantiago/containertools/tree/main/cirrus-vm-get-versions)
script working, because this will give you a very quick easy reliable
list of what packages have changed. You don't need this, but life will be painful
for you without it.
(If you're not lucky, the build will break. There are infinite ways for
this to happen, so you're on your own here. Ask for help! This is a great
team, and one or more people may quickly realize the problem.)
Once you have new VMs built, **test in an actual project**! Usually podman
and buildah, but you may want the varks too:
```
$ cd ~/src/github/containers/podman ! or wherever
$ git checkout -b test-new-vms
$ vim .cirrus.yml
[ search for "c202", and replace with your new IMG_SFX.]
[ Don't forget the leading "c"! ]
$ git commit -as
[ Please include a link to the automation_images PR! ]
```
Push this PR and see what happens. If you're very lucky, it will
pass on this and other repos. Get your podman/buildah/vark PRs
reviewed and merged, and then review-merge the automation_images one.
Pushing (har har) Your Luck
---------------------------
Feel lucky? Tag this VM build, so `dependabot` will create PRs
on all the myriad container repos:
```
$ git tag $(<IMG_SFX)
$ git push --no-verify upstream $(<IMG_SFX)
```
Within a few hours you'll see a ton of PRs. It is very likely that
something will go wrong in one or two, and if so, it's impossible to
cover all possibilities. As above, ask for help.
More Complicated Cases
======================
These are the next two most common.
Bumping One Package
-------------------
Quite often we need an emergency bump of only one package that
is not yet stable. Here are examples of the two most typical
cases,
[crun](https://github.com/containers/automation_images/pull/386/files) and
[pasta](https://github.com/containers/automation_images/pull/383/files).
Note the `timebomb` directives. Please use these: the time you save
may be your own, one future day. And please use 2-6 week times.
A timebomb that expires in a year is going to be hard to understand
when it goes off.
Bumping Distros
---------------
Like Fedora 40 to 41. Edit `Makefile`. Change `FEDORA`, `PRIOR_FEDORA`,
and `RAWHIDE`, then proceed with Simple Case.
There is almost zero chance that this will work on the first try.
Sorry, that's just the way it is. See the
[F40 to F41 PR](https://github.com/containers/automation_images/pull/392/files)
for a not-atypical example.
STRONG RECOMMENDATION
=====================
Read [check-imgsfx.sh](check-imgsfx.sh) and follow its instructions. Ed
likes to copy that to `.git/hooks/pre-push`, Chris likes using some
external tool that Ed doesn't trust. Use your judgment.
The reason for this is that you are going to forget to `make IMG_SFX`
one day, and then you're going to `git push --force` an update and walk
away, and come back to a failed run because `IMG_SFX` must always
always always be brand new.
Weak Recommendation
-------------------
Ed likes to fiddle with `IMG_SFX`, zeroing out to the nearest
quarter hour. Absolutely unnecessary, but easier on the eyes
when trying to see which VMs are in use or when comparing
diffs.

View File

@ -52,7 +52,7 @@ However, all steps are listed below for completeness.
For more information on the overall process of importing custom GCE VM
Images, please [refer to the documentation](https://cloud.google.com/compute/docs/import/import-existing-image). For references to the latest pre-build AWS
EC2 Fedora AMI's see [the
upstream cloud page](https://fedoraproject.org/cloud/download).
upstream cloud page](https://alt.fedoraproject.org/cloud/).
For more information on the primary tool (*packer*) used for this process,
please [see it's documentation page](https://www.packer.io/docs).
@ -264,11 +264,13 @@ then automatically pushed to:
* https://quay.io/repository/libpod/fedora_podman
* https://quay.io/repository/libpod/prior-fedora_podman
* https://quay.io/repository/libpod/debian_podman
The meaning of *prior* and *current*, is defined by the contents of
the `*_RELEASE` values in the `Makefile`. The images will be tagged
with the value within the `IMG_SFX` file. Additionally, the most
recently merged PR on this repo will tag its images `latest`.
the `*_release` files within the `podman` subdirectory. This is
necessary to support the Makefile target being used manually
(e.g. debugging). These files must be updated manually when introducing
a new VM image version.
### Tooling
@ -290,7 +292,8 @@ the following are built:
In all cases, when automation runs on a branch (i.e. after a PR is merged)
the actual image tagged `latest` will be pushed. When running in a PR,
only validation and test images are produced.
only validation and test images are produced. This behavior is controled
by a combination of the `$PUSH_LATEST` and `$CIRRUS_PR` variables.
## The Base Images (overview step 3)
@ -374,11 +377,10 @@ infinite-growth of the VM image count.
# Debugging / Locally driving VM Image production
Much of the CI and image-build process is containerized, so it may be debugged
locally on your laptop/workstation. However, this process will
Because the entire automated build process is containerized, it may easily be
performed locally on your laptop/workstation. However, this process will
still involve interfacing with GCE and AWS. Therefore, you must be in possession
of a *Google Application Credentials* (GAC) JSON and
[AWS credentials INI file](https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html#file-format-creds).
of a *Google Application Credentials* (GAC) JSON and AWS credentials INI file.
The GAC JSON file should represent a service account (contrasted to a user account,
which always uses OAuth2). The name of the service account doesn't matter,
@ -399,52 +401,44 @@ one the following (custom) IAM policies enabled:
Somebody familiar with Google and AWS IAM will need to provide you with the
credential files and ensure correct account configuration. Having these files
stored *in your home directory* on your laptop/workstation, the process of
building and entering the debug containers is as follows:
producing images proceeds as follows:
1. Ensure you have podman installed, and lots of available network and CPU
resources (i.e. turn off YouTube, shut down background VMs and other hungry
tasks).
2. Build and enter either the `ci_debug` or the `image_builder_debug` container
image, by executing:
tasks). Build the image-builder container image, by executing
```
make <ci_debug|image_builder_debug> \
GAC_FILEPATH=</home/path/to/gac.json> \
AWS_SHARED_CREDENTIALS_FILE=</path/to/credentials>
make image_builder_debug GAC_FILEPATH=</home/path/to/gac.json> \
AWS_SHARED_CREDENTIALS_FILE=</path/to/credentials>
```
* The `ci_debug` image is significantly smaller, and only intended for rudimentary
cases, for example running the scripts under the `ci` subdirectory.
* The `image_builder_debug` image is larger, and has KVM virtualization enabled.
It's needed for more extensive debugging of the packer-based image builds.
2. You will be dropped into a debugging container, inside a volume-mount of
the repository root. This container is practically identical to the VM
produced and used in *overview step 1*. If changes are made, the container
image should be re-built to reflect them.
3. Both containers will place you in the default shell, inside a volume-mount of
the repository root. This environment is practically identical to what is
used in Cirrus-CI.
3. If you wish to build only a subset of available images, list the names
you want as comma-separated values of the `PACKER_BUILDS` variable. Be
sure you *export* this variable so that `make` has access to it. For
example, `export PACKER_BUILDS=debian,prior-fedora`.
4. For the `image_builder_debug` container, If you wish to build only a subset
of available images, list the names you want as comma-separated values of the
`PACKER_BUILDS` variable. Be sure you *export* this variable so that `make`
has access to it. For example, `export PACKER_BUILDS=debian,prior-fedora`.
5. Still within the container, again ensure you have plenty of network and CPU
4. Still within the container, again ensure you have plenty of network and CPU
resources available. Build the VM Base images by executing the command
``make base_images``. This is the equivalent operation as documented by
*overview step 2*. ***N/B*** The GCS -> GCE image conversion can take
some time, be patient. Packer may not produce any output for several minutes
while the conversion is happening.
6. When successful, the names of the produced images will all be referenced
5. When successful, the names of the produced images will all be referenced
in the `base_images/manifest.json` file. If there are problems, fix them
and remove the `manifest.json` file. Then re-run the same *make* command
as before, packer will force-overwrite any broken/partially created
images automatically.
7. Produce the VM Cache Images, equivalent to the operations outlined
6. Produce the VM Cache Images, equivalent to the operations outlined
in *overview step 3*. Execute the following command (still within the
debug image-builder container): ``make cache_images``.
8. Again when successful, you will find the image names are written into
7. Again when successful, you will find the image names are written into
the `cache_images/manifest.json` file. If there is a problem, remove
this file, fix the problem, and re-run the `make` command. No cleanup
is necessary, leftover/disused images will be automatically cleaned up

View File

@ -26,6 +26,8 @@ variables: # Empty value means it must be passed in on command-line
PRIOR_FEDORA_IMAGE_URL: "{{env `PRIOR_FEDORA_IMAGE_URL`}}"
PRIOR_FEDORA_CSUM_URL: "{{env `PRIOR_FEDORA_CSUM_URL`}}"
FEDORA_IMPORT_IMG_SFX: "{{env `FEDORA_IMPORT_IMG_SFX`}}"
DEBIAN_RELEASE: "{{env `DEBIAN_RELEASE`}}"
DEBIAN_BASE_FAMILY: "{{env `DEBIAN_BASE_FAMILY`}}"
@ -61,7 +63,6 @@ builders:
type: 'qemu'
accelerator: "kvm"
qemu_binary: '/usr/libexec/qemu-kvm' # Unique to CentOS, not fedora :(
memory: 12288
iso_url: '{{user `FEDORA_IMAGE_URL`}}'
disk_image: true
format: "raw"
@ -74,12 +75,12 @@ builders:
headless: true
# qemu_binary: "/usr/libexec/qemu-kvm"
qemuargs: # List-of-list format required to override packer-generated args
- - "-display"
- "none"
- - "-m"
- "1024"
- - "-device"
- "virtio-rng-pci"
- - "-chardev"
- "file,id=pts,path={{user `TTYDEV`}}"
- "tty,id=pts,path={{user `TTYDEV`}}"
- - "-device"
- "isa-serial,chardev=pts"
- - "-netdev"
@ -107,18 +108,20 @@ builders:
- &fedora-aws
name: 'fedora-aws'
type: 'amazon-ebs'
source_ami_filter:
# Many of these search filter values (like account ID and name) aren't publicized
# anywhere. They were found by examining AWS EC2 AMIs published/referenced from
# the AWS sections on https://fedoraproject.org/cloud/download
source_ami_filter: # Will fail if >1 or no AMI found
owners:
- &fedora_accountid 125523088429
most_recent: true # Required b/c >1 search result likely to be returned
# Docs are wrong, specifying the Account ID required to make AMIs private.
# The Account ID is hard-coded here out of expediency, since passing in
# more packer args from the command-line (in Makefile) is non-trivial.
- &accountid '449134212816'
# It's necessary to 'search' for the base-image by these criteria. If
# more than one image is found, Packer will fail the build (and display
# the conflicting AMI IDs).
filters: &ami_filters
architecture: 'x86_64'
image-type: 'machine'
is-public: 'true'
name: 'Fedora-Cloud-Base*-{{user `FEDORA_RELEASE`}}-*'
is-public: 'false'
name: '{{build_name}}-i{{user `FEDORA_IMPORT_IMG_SFX`}}'
root-device-type: 'ebs'
state: 'available'
virtualization-type: 'hvm'
@ -142,6 +145,7 @@ builders:
volume_type: 'gp2'
delete_on_termination: true
# These are critical and used by security-polciy to enforce instance launch limits.
tags: &awstags
<<: *imgcpylabels
# EC2 expects "Name" to be capitalized
@ -155,7 +159,7 @@ builders:
# This is necessary for security - The CI service accounts are not permitted
# to use AMI's from any other account, including public ones.
ami_users:
- &accountid '449134212816'
- *accountid
ssh_username: 'fedora'
ssh_clear_authorized_keys: true
# N/B: Required Packer >= 1.8.0
@ -166,8 +170,7 @@ builders:
name: 'fedora-aws-arm64'
source_ami_filter:
owners:
- *fedora_accountid
most_recent: true # Required b/c >1 search result likely to be returned
- *accountid
filters:
<<: *ami_filters
architecture: 'arm64'
@ -184,23 +187,23 @@ provisioners: # Debian images come bundled with GCE integrations provisioned
- type: 'shell'
inline:
- 'set -e'
- 'mkdir -p /var/tmp/automation_images'
- 'mkdir -p /tmp/automation_images'
- type: 'file'
source: '{{ pwd }}/'
destination: '/var/tmp/automation_images/'
destination: '/tmp/automation_images/'
- except: ['debian']
type: 'shell'
inline:
- 'set -e'
- '/bin/bash /var/tmp/automation_images/base_images/fedora_base-setup.sh'
- '/bin/bash /tmp/automation_images/base_images/fedora_base-setup.sh'
- only: ['debian']
type: 'shell'
inline:
- 'set -e'
- 'env DEBIAN_FRONTEND=noninteractive DEBIAN_RELEASE={{user `DEBIAN_RELEASE`}} /bin/bash /var/tmp/automation_images/base_images/debian_base-setup.sh'
- '/bin/bash /tmp/automation_images/base_images/debian_base-setup.sh'
post-processors:
# Must be double-nested to guarantee execution order

View File

@ -16,17 +16,8 @@ REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
# Cloud-networking in general can sometimes be flaky.
# Increase Apt's tolerance levels.
cat << EOF | $SUDO tee -a /etc/apt/apt.conf.d/99timeouts
// Added during CI VM image build
Acquire::Retries "3";
Acquire::http::timeout "300";
Acquire::https::timeout "300";
EOF
echo "Switch sources to Debian Unstable (SID)"
cat << EOF | $SUDO tee /etc/apt/sources.list
# Switch to Debian Unstable (SID)
cat << EOF | sudo tee /etc/apt/sources.list
deb http://deb.debian.org/debian/ unstable main
deb-src http://deb.debian.org/debian/ unstable main
EOF
@ -37,6 +28,7 @@ PKGS=( \
curl
cloud-init
gawk
git
openssh-client
openssh-server
rng-tools5
@ -44,46 +36,40 @@ PKGS=( \
)
echo "Updating package source lists"
( set -x; $SUDO apt-get -q -y update; )
# Only deps for automation tooling
( set -x; $SUDO apt-get -q -y install git )
install_automation_tooling
# Ensure automation library is loaded
source "$REPO_DIRPATH/lib.sh"
# Workaround 12->13 forward-incompatible change in grub scripts.
# Without this, updating to the SID kernel may fail.
echo "Upgrading grub-common"
( set -x; $SUDO apt-get -q -y upgrade grub-common; )
$SUDO apt-get -qq -y update
echo "Upgrading to SID"
( set -x; $SUDO apt-get -q -y full-upgrade; )
$SUDO apt-get -qq -y full-upgrade
echo "Installing basic, necessary packages."
( set -x; $SUDO apt-get -q -y install "${PKGS[@]}"; )
$SUDO apt-get -qq -y install "${PKGS[@]}"
# compatibility / usefullness of all automated scripting (which is bash-centric)
( set -x; $SUDO DEBCONF_DB_OVERRIDE='File{'$SCRIPT_DIRPATH/no_dash.dat'}' \
dpkg-reconfigure dash; )
$SUDO DEBCONF_DB_OVERRIDE='File{'$SCRIPT_DIRPATH/no_dash.dat'}' \
dpkg-reconfigure dash
# Ref: https://wiki.debian.org/DebianReleases
# CI automation needs an OS version/release number for a variety of uses.
# However, After switching to Unstable/SID, the value from the usual source
# is not available. Simply use the value passed through packer by the Makefile.
req_env_vars DEBIAN_RELEASE
# shellcheck disable=SC2154
warn "Setting '$DEBIAN_RELEASE' as the release number for CI-automation purposes."
( set -x; echo "VERSION_ID=\"$DEBIAN_RELEASE\"" | \
$SUDO tee -a /etc/os-release; )
# CI automation needs a *sortable* OS version/release number to select/perform/apply
# runtime configuration and workarounds. Since switching to Unstable/SID, a
# numeric release version is not available. While an imperfect solution,
# base an artificial version off the 'base-files' package version, right-padded with
# zeros to ensure sortability (i.e. "12.02" < "12.13").
base_files_version=$(dpkg -s base-files | awk '/Version:/{print $2}')
base_major=$(cut -d. -f 1 <<<"$base_files_version")
base_minor=$(cut -d. -f 2 <<<"$base_files_version")
sortable_version=$(printf "%02d.%02d" $base_major $base_minor)
echo "WARN: This is NOT an official version number. It's for CI-automation purposes only."
echo "VERSION_ID=\"$sortable_version\"" | \
$SUDO tee -a /etc/os-release
install_automation_tooling
if ! ((CONTAINER)); then
custom_cloud_init
( set -x; $SUDO systemctl enable rngd; )
$SUDO systemctl enable rngd
# Cloud-config fails to enable this for some reason or another
( set -x; $SUDO sed -i -r \
$SUDO sed -i -r \
-e 's/^PermitRootLogin no/PermitRootLogin prohibit-password/' \
/etc/ssh/sshd_config; )
/etc/ssh/sshd_config
fi
finalize

View File

@ -18,6 +18,7 @@ source "$REPO_DIRPATH/lib.sh"
declare -a PKGS
PKGS=(rng-tools git coreutils cloud-init)
XARGS=--disablerepo=updates
if ! ((CONTAINER)); then
# Packer defines this automatically for us
# shellcheck disable=SC2154
@ -29,28 +30,20 @@ if ! ((CONTAINER)); then
if ((OS_RELEASE_VER<35)); then
PKGS+=(google-compute-engine-tools)
else
PKGS+=(google-compute-engine-guest-configs google-guest-agent)
PKGS+=(google-compute-engine-guest-configs)
fi
fi
fi
# The Fedora CI VM base images are built using nested-virt with
# limited resources available. Further, cloud-networking in
# general can sometimes be flaky. Increase DNF's tolerance
# levels.
cat << EOF | $SUDO tee -a /etc/dnf/dnf.conf
# Added during CI VM image build
minrate=100
timeout=60
EOF
$SUDO dnf makecache
$SUDO dnf -y update
$SUDO dnf -y install "${PKGS[@]}"
# Occasionally following an install, there are more updates available.
# This may be due to activation of suggested/recommended dependency resolution.
$SUDO dnf -y update
# Due to https://bugzilla.redhat.com/show_bug.cgi?id=1907030
# updates cannot be installed or even looked at during this stage.
# Pawn the problem off to the cache-image stage where more memory
# is available and debugging is also easier. Try to save some more
# memory by pre-populating repo metadata prior to any transactions.
$SUDO dnf makecache $XARGS
# Updates disable, see comment above
# $SUDO dnf -y update $XARGS
$SUDO dnf -y install $XARGS "${PKGS[@]}"
if ! ((CONTAINER)); then
$SUDO systemctl enable rngd
@ -90,9 +83,7 @@ if ! ((CONTAINER)); then
# This is necessary to prevent permission-denied errors on service-start
# and also on the off-chance the package gets updated and context reset.
$SUDO semanage fcontext --add --type bin_t /usr/bin/cloud-init
# This used restorecon before so we don't have to specify the file_contexts.local
# manually, however with f42 that stopped working: https://bugzilla.redhat.com/show_bug.cgi?id=2360183
$SUDO setfiles -v /etc/selinux/targeted/contexts/files/file_contexts.local /usr/bin/cloud-init
$SUDO restorecon -v /usr/bin/cloud-init
else # GCP Image
echo "Setting GCP startup service (for Cirrus-CI agent) SELinux unconfined"
# ref: https://cloud.google.com/compute/docs/startupscript
@ -104,4 +95,10 @@ if ! ((CONTAINER)); then
/lib/$METADATA_SERVICE_PATH | $SUDO tee -a /etc/$METADATA_SERVICE_PATH
fi
if [[ "$OS_RELEASE_ID" == "fedora" ]] && ((OS_RELEASE_VER>=33)); then
# Ref: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=783509
echo "Disabling automatic /tmp (tmpfs) mount"
$SUDO systemctl mask tmp.mount
fi
finalize

32
bench_stuff/Containerfile Normal file
View File

@ -0,0 +1,32 @@
ARG BASE_NAME=registry.fedoraproject.org/fedora-minimal
ARG BASE_TAG=latest
FROM ${BASE_NAME}:${BASE_TAG} as updated_base
RUN microdnf update -y && \
microdnf clean all
ENV _RUNTIME_DEPS="bash python3"
ENV _BUILD_DEPS="coreutils curl git python3 python3-pip python3-virtualenv python3-devel gcc g++"
FROM updated_base as builder
RUN microdnf install -y ${_RUNTIME_DEPS} ${_BUILD_DEPS} && \
export INSTALL_PREFIX=/usr/share && \
curl -sL \
https://raw.githubusercontent.com/containers/automation/main/bin/install_automation.sh | \
bash -s latest bench_stuff
FROM updated_base as final
ADD bench_stuff/entrypoint.sh /usr/local/bin/
RUN microdnf install -y ${_RUNTIME_DEPS} && \
microdnf clean all && \
chmod +x /usr/local/bin/entrypoint.sh
COPY --from=builder /usr/share/automation /usr/share/automation
COPY --from=builder /etc/automation_environment /etc/automation_environment
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]

31
bench_stuff/README.md Normal file
View File

@ -0,0 +1,31 @@
# Bench Stuff
This container facilitates stuffing podman CI benchmarks into GCE firebase.
For more details, [see the automation
documentation](https://github.com/containers/automation/blob/main/bench_stuff/README.md).
## Build
This is a multi-stage build with some parallelism possible if stages were
previously cached. For example:
`podman build -t bench_stuff --jobs=4 .`
## Usage
1. Utilize [the ccia container image](https://quay.io/repository/libpod/ccia) to retrieve
benchmark data from a podman Cirrus-CI build into a temporary directory.
```
$ mkdir /tmp/b
$ podman run -it --rm \
-v /tmp/b:/b:Z -w /b \
quay.io/libpod/ccia 1234567890 'benchmark/data'
```
1. Run the `bench_stuff` container, providing it with proper credentials, and a reference
to the temporary directory (containing the benchmark data).
```
$ podman secret create GAC $GOOGLE_APPLICATION_CREDENTIALS
$ podman run -it --rm -v /tmp/b:/b:Z,ro \
--secret GAC -e GOOGLE_APPLICATION_CREDENTIALS=/run/secrets/GAC
quay.io/libpod/ccia /b
```

23
bench_stuff/entrypoint.sh Normal file
View File

@ -0,0 +1,23 @@
#!/bin/bash
# This script is intended to be the entrypoint for the bench_stuff container image.
# Any other use is unlikely to function as intended.
set -e
source /etc/automation_environment
source $AUTOMATION_LIB_PATH/common_lib.sh
if [[ $# -lt 1 ]] || [[ ! -d "$1" ]]; then
die "Must be called with the path to an existing search directory; Got '$1'."
fi
search_root="$1"
# Any additional arguments will be passed into the bench_stuff calls
shift
find "$search_root" -name benchmarks.env | \
while read line; do
data_dir=$(dirname "$line")
bench_stuff "$@" "$data_dir"
done

35
bench_stuff/test.sh Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
# This script is intended to be executed in the bench_stuff container by
# Cirrus-CI. Any other usage or environment could lead to negative
# outcomes.
set -eo pipefail
SCRIPT_DIRPATH=$(dirname $(realpath "${BASH_SOURCE[0]}"))
source $SCRIPT_DIRPATH/../lib.sh
req_env_vars CIRRUS_CI
# No credentials required for dry-run mode (-d), but existing-file check needs
# to be bypassed.
export GOOGLE_APPLICATION_CREDENTIALS=/proc/cpuinfo
echo "Confirming error when no arguments given"
output=$(entrypoint.sh 2>&1 || true)
grep -q 'Must be called with the path to an existing search directory' <<<"$output"
echo "Confirming dry-run execution against dummy-data"
output=$(entrypoint.sh $SCRIPT_DIRPATH/test_data -v -d 2>&1)
declare -a expected
expected_lines=(\
"Verbose-mode enabled"
"Dry-run"
"Loading environment"
"Processing Basis"
"Did NOT insert"
)
for expected in "${expected_lines[@]}"; do
grep -q "$expected" <<<"$output" || \
die "Did not find '$expected' in output: $output"
done

View File

@ -0,0 +1,13 @@
"Test Name", "CPU Fastest Time", "CPU Slowest Time", "CPU Average Time", "CPU StdDev", "CPU StdDev (Percent)", "MEM Smallest", "MEM Largest", "MEM Average", "MEM StdDev", "MEM StdDev (Percent)"
"podman create", "0.154s", "0.175s", "0.170s", "0.008s", "4.7%", "43376.0KB", "44360.0KB", "43869.3KB", "341.4KB", "0.8%"
"podman image inspect", "0.113s", "0.125s", "0.117s", "0.005s", "4.3%", "40652.0KB", "41876.0KB", "41311.3KB", "374.0KB", "0.9%"
"podman images", "0.123s", "0.236s", "0.151s", "0.039s", "25.8%", "42368.0KB", "43400.0KB", "42828.7KB", "420.3KB", "1.0%"
"podman load [docker]", "0.300s", "0.350s", "0.321s", "0.016s", "5.0%", "47960.0KB", "50028.0KB", "48994.7KB", "714.5KB", "1.5%"
"podman load [oci]", "0.247s", "0.299s", "0.275s", "0.017s", "6.2%", "49460.0KB", "54360.0KB", "50828.0KB", "1611.5KB", "3.2%"
"podman login + logout", "0.258s", "0.288s", "0.273s", "0.010s", "3.7%", "82164.0KB", "84776.0KB", "83004.0KB", "857.8KB", "1.0%"
"podman pull", "0.197s", "0.228s", "0.215s", "0.011s", "5.1%", "43344.0KB", "44612.0KB", "44116.0KB", "421.4KB", "1.0%"
"podman push", "1.711s", "1.852s", "1.789s", "0.056s", "3.1%", "83948.0KB", "88956.0KB", "86284.0KB", "1631.1KB", "1.9%"
"podman run", "0.359s", "0.390s", "0.376s", "0.011s", "2.9%", "46764.0KB", "47580.0KB", "47114.7KB", "301.8KB", "0.6%"
"podman run --detach", "0.257s", "0.290s", "0.273s", "0.010s", "3.7%", "45888.0KB", "47352.0KB", "46490.7KB", "526.9KB", "1.1%"
"podman save", "0.197s", "0.227s", "0.207s", "0.011s", "5.3%", "43156.0KB", "43996.0KB", "43574.7KB", "258.4KB", "0.6%"
"podman start", "0.195s", "0.227s", "0.202s", "0.012s", "5.9%", "44672.0KB", "45812.0KB", "45349.3KB", "423.9KB", "0.9%"
1 Test Name CPU Fastest Time CPU Slowest Time CPU Average Time CPU StdDev CPU StdDev (Percent) MEM Smallest MEM Largest MEM Average MEM StdDev MEM StdDev (Percent)
2 podman create 0.154s 0.175s 0.170s 0.008s 4.7% 43376.0KB 44360.0KB 43869.3KB 341.4KB 0.8%
3 podman image inspect 0.113s 0.125s 0.117s 0.005s 4.3% 40652.0KB 41876.0KB 41311.3KB 374.0KB 0.9%
4 podman images 0.123s 0.236s 0.151s 0.039s 25.8% 42368.0KB 43400.0KB 42828.7KB 420.3KB 1.0%
5 podman load [docker] 0.300s 0.350s 0.321s 0.016s 5.0% 47960.0KB 50028.0KB 48994.7KB 714.5KB 1.5%
6 podman load [oci] 0.247s 0.299s 0.275s 0.017s 6.2% 49460.0KB 54360.0KB 50828.0KB 1611.5KB 3.2%
7 podman login + logout 0.258s 0.288s 0.273s 0.010s 3.7% 82164.0KB 84776.0KB 83004.0KB 857.8KB 1.0%
8 podman pull 0.197s 0.228s 0.215s 0.011s 5.1% 43344.0KB 44612.0KB 44116.0KB 421.4KB 1.0%
9 podman push 1.711s 1.852s 1.789s 0.056s 3.1% 83948.0KB 88956.0KB 86284.0KB 1631.1KB 1.9%
10 podman run 0.359s 0.390s 0.376s 0.011s 2.9% 46764.0KB 47580.0KB 47114.7KB 301.8KB 0.6%
11 podman run --detach 0.257s 0.290s 0.273s 0.010s 3.7% 45888.0KB 47352.0KB 46490.7KB 526.9KB 1.1%
12 podman save 0.197s 0.227s 0.207s 0.011s 5.3% 43156.0KB 43996.0KB 43574.7KB 258.4KB 0.6%
13 podman start 0.195s 0.227s 0.202s 0.012s 5.9% 44672.0KB 45812.0KB 45349.3KB 423.9KB 0.9%

View File

@ -0,0 +1,66 @@
# Env. var basis for benchmarks benchmarks.
CGROUP_MANAGER=systemd
CI=true
CI_DESIRED_NETWORK=netavark
CI_NODE_INDEX=13
CI_NODE_TOTAL=32
CIRRUS_ARCH=amd64
CIRRUS_BASE_SHA=2a6a80ef74e5fa6f5b26236fbcf9b7a6aa1955df
CIRRUS_BRANCH=main
CIRRUS_BUILD_ID=6146490737885184
CIRRUS_BUILD_SOURCE=github
CIRRUS_CHANGE_IN_REPO=2a6a80ef74e5fa6f5b26236fbcf9b7a6aa1955df
CIRRUS_CHANGE_MESSAGE=$'Merge pull request #17714 from containers/dependabot/go_modules/test/tools/golang.org/x/tools-0.7.0\n\nbuild(deps): bump golang.org/x/tools from 0.6.0 to 0.7.0 in /test/tools'
CIRRUS_CHANGE_TIMESTAMP=1678288306000
CIRRUS_CHANGE_TITLE=Merge\ pull\ request\ #17714\ from\ containers/dependabot/go_modules/test/tools/golang.org/x/tools-0.7.0
CIRRUS_CI=true
CIRRUS_COMMIT_MESSAGE=$'Merge pull request #17714 from containers/dependabot/go_modules/test/tools/golang.org/x/tools-0.7.0\n\nbuild(deps): bump golang.org/x/tools from 0.6.0 to 0.7.0 in /test/tools'
CIRRUS_DEFAULT_BRANCH=main
CIRRUS_ENV=/tmp/cirrus-env-task-5187827730743296-6000fcf8-525a-43e4-9d9c-e40ece98edb6
CIRRUS_HTTP_CACHE_HOST=127.0.0.1:12321
CIRRUS_LAST_GREEN_BUILD_ID=5021346745286656
CIRRUS_LAST_GREEN_CHANGE=f7ac9fd5d6b7eada42cc3bb9ec33f9ea07df3958
CIRRUS_OS=linux
CIRRUS_REPO_CLONE_HOST=github.com
CIRRUS_REPO_CLONE_URL=https://github.com/containers/podman.git
CIRRUS_REPO_FULL_NAME=containers/podman
CIRRUS_REPO_ID=6707778565701632
CIRRUS_REPO_NAME=podman
CIRRUS_REPO_OWNER=containers
CIRRUS_SHELL=/bin/bash
CIRRUS_TASK_ID=5187827730743296
CIRRUS_TASK_NAME=machine\ podman\ fedora-37\ rootless\ host
CIRRUS_USER_COLLABORATOR=true
CIRRUS_USER_PERMISSION=write
CIRRUS_WORKING_DIR=/var/tmp/go/src/github.com/containers/podman
CTR_FQIN=''
DEBIAN_CACHE_IMAGE_NAME=debian-c20230223t153813z-f37f36d12
DEBIAN_NAME=debian-12
DEST_BRANCH=main
DISTRO_NV=fedora-37
EC2_INST_TYPE=m5zn.metal
FEDORA_AARCH64_NAME=fedora-37-aarch64
FEDORA_CACHE_IMAGE_NAME=fedora-c20230223t153813z-f37f36d12
FEDORA_CONTAINER_FQIN=quay.io/libpod/fedora_podman:c20230223t153813z-f37f36d12
FEDORA_NAME=fedora-37
GOCACHE=/var/tmp/go/cache
GOPATH=/var/tmp/go
GOSRC=/var/tmp/go/src/github.com/containers/podman
NETWORK_BACKEND=netavark
PODBIN_NAME=podman
PRIOR_FEDORA_CACHE_IMAGE_NAME=prior-fedora-c20230223t153813z-f37f36d12
PRIOR_FEDORA_CONTAINER_FQIN=quay.io/libpod/prior-fedora_podman:c20230223t153813z-f37f36d12
PRIOR_FEDORA_NAME=fedora-36
PRIV_NAME=rootless
ROOTLESS_USER=some17019dude
SCRIPT_BASE=./contrib/cirrus
TEST_ENVIRON=host
TEST_FLAVOR=machine
VM_IMAGE_NAME=fedora-aws-c20230223t153813z-f37f36d12
# Machine details for data-comparison sake, not actual env. vars.
BENCH_ENV_VER=1
CPUTOTAL=48
INST_TYPE=m5zn.metal
MEMTOTALKB=197684988
UNAME_R=6.1.13-200.fc37.x86_64
UNAME_M=x86_64

View File

@ -0,0 +1,13 @@
"Test Name", "CPU Fastest Time", "CPU Slowest Time", "CPU Average Time", "CPU StdDev", "CPU StdDev (Percent)", "MEM Smallest", "MEM Largest", "MEM Average", "MEM StdDev", "MEM StdDev (Percent)"
"podman create", "0.122s", "0.203s", "0.136s", "0.030s", "22.1%", "42776.0KB", "43640.0KB", "43097.3KB", "273.6KB", "0.6%"
"podman image inspect", "0.061s", "0.073s", "0.070s", "0.004s", "5.7%", "40464.0KB", "42240.0KB", "41228.0KB", "636.7KB", "1.5%"
"podman images", "0.072s", "0.224s", "0.101s", "0.055s", "54.5%", "41648.0KB", "42504.0KB", "42151.3KB", "299.8KB", "0.7%"
"podman load [docker]", "0.206s", "0.316s", "0.238s", "0.036s", "15.1%", "47240.0KB", "50064.0KB", "48968.7KB", "890.4KB", "1.8%"
"podman load [oci]", "0.174s", "0.356s", "0.218s", "0.063s", "28.9%", "48828.0KB", "52924.0KB", "50080.0KB", "1448.9KB", "2.9%"
"podman login + logout", "0.184s", "0.215s", "0.191s", "0.011s", "5.8%", "80800.0KB", "83248.0KB", "82005.3KB", "746.6KB", "0.9%"
"podman pull", "0.196s", "0.265s", "0.245s", "0.024s", "9.8%", "43100.0KB", "44012.0KB", "43606.0KB", "332.3KB", "0.8%"
"podman push", "0.923s", "0.995s", "0.947s", "0.027s", "2.9%", "133552.0KB", "140548.0KB", "137268.0KB", "2210.6KB", "1.6%"
"podman run", "0.507s", "0.568s", "0.542s", "0.023s", "4.2%", "47236.0KB", "48684.0KB", "47697.3KB", "516.1KB", "1.1%"
"podman run --detach", "0.163s", "0.193s", "0.182s", "0.009s", "4.9%", "46492.0KB", "47476.0KB", "46935.3KB", "325.6KB", "0.7%"
"podman save", "0.114s", "0.134s", "0.123s", "0.006s", "4.9%", "43112.0KB", "43812.0KB", "43542.7KB", "253.4KB", "0.6%"
"podman start", "0.092s", "0.112s", "0.102s", "0.006s", "5.9%", "44472.0KB", "46600.0KB", "45229.3KB", "664.3KB", "1.5%"
1 Test Name CPU Fastest Time CPU Slowest Time CPU Average Time CPU StdDev CPU StdDev (Percent) MEM Smallest MEM Largest MEM Average MEM StdDev MEM StdDev (Percent)
2 podman create 0.122s 0.203s 0.136s 0.030s 22.1% 42776.0KB 43640.0KB 43097.3KB 273.6KB 0.6%
3 podman image inspect 0.061s 0.073s 0.070s 0.004s 5.7% 40464.0KB 42240.0KB 41228.0KB 636.7KB 1.5%
4 podman images 0.072s 0.224s 0.101s 0.055s 54.5% 41648.0KB 42504.0KB 42151.3KB 299.8KB 0.7%
5 podman load [docker] 0.206s 0.316s 0.238s 0.036s 15.1% 47240.0KB 50064.0KB 48968.7KB 890.4KB 1.8%
6 podman load [oci] 0.174s 0.356s 0.218s 0.063s 28.9% 48828.0KB 52924.0KB 50080.0KB 1448.9KB 2.9%
7 podman login + logout 0.184s 0.215s 0.191s 0.011s 5.8% 80800.0KB 83248.0KB 82005.3KB 746.6KB 0.9%
8 podman pull 0.196s 0.265s 0.245s 0.024s 9.8% 43100.0KB 44012.0KB 43606.0KB 332.3KB 0.8%
9 podman push 0.923s 0.995s 0.947s 0.027s 2.9% 133552.0KB 140548.0KB 137268.0KB 2210.6KB 1.6%
10 podman run 0.507s 0.568s 0.542s 0.023s 4.2% 47236.0KB 48684.0KB 47697.3KB 516.1KB 1.1%
11 podman run --detach 0.163s 0.193s 0.182s 0.009s 4.9% 46492.0KB 47476.0KB 46935.3KB 325.6KB 0.7%
12 podman save 0.114s 0.134s 0.123s 0.006s 4.9% 43112.0KB 43812.0KB 43542.7KB 253.4KB 0.6%
13 podman start 0.092s 0.112s 0.102s 0.006s 5.9% 44472.0KB 46600.0KB 45229.3KB 664.3KB 1.5%

View File

@ -0,0 +1,66 @@
# Env. var basis for benchmarks benchmarks.
CGROUP_MANAGER=systemd
CI=true
CI_DESIRED_NETWORK=netavark
CI_NODE_INDEX=14
CI_NODE_TOTAL=32
CIRRUS_ARCH=arm64
CIRRUS_BASE_SHA=2a6a80ef74e5fa6f5b26236fbcf9b7a6aa1955df
CIRRUS_BRANCH=main
CIRRUS_BUILD_ID=6146490737885184
CIRRUS_BUILD_SOURCE=github
CIRRUS_CHANGE_IN_REPO=2a6a80ef74e5fa6f5b26236fbcf9b7a6aa1955df
CIRRUS_CHANGE_MESSAGE=$'Merge pull request #17714 from containers/dependabot/go_modules/test/tools/golang.org/x/tools-0.7.0\n\nbuild(deps): bump golang.org/x/tools from 0.6.0 to 0.7.0 in /test/tools'
CIRRUS_CHANGE_TIMESTAMP=1678288306000
CIRRUS_CHANGE_TITLE=Merge\ pull\ request\ #17714\ from\ containers/dependabot/go_modules/test/tools/golang.org/x/tools-0.7.0
CIRRUS_CI=true
CIRRUS_COMMIT_MESSAGE=$'Merge pull request #17714 from containers/dependabot/go_modules/test/tools/golang.org/x/tools-0.7.0\n\nbuild(deps): bump golang.org/x/tools from 0.6.0 to 0.7.0 in /test/tools'
CIRRUS_DEFAULT_BRANCH=main
CIRRUS_ENV=/tmp/cirrus-env-task-6313727637585920-5ebc78f9-6b84-44cd-89cc-6dab4cb76add
CIRRUS_HTTP_CACHE_HOST=127.0.0.1:12321
CIRRUS_LAST_GREEN_BUILD_ID=5021346745286656
CIRRUS_LAST_GREEN_CHANGE=f7ac9fd5d6b7eada42cc3bb9ec33f9ea07df3958
CIRRUS_OS=linux
CIRRUS_REPO_CLONE_HOST=github.com
CIRRUS_REPO_CLONE_URL=https://github.com/containers/podman.git
CIRRUS_REPO_FULL_NAME=containers/podman
CIRRUS_REPO_ID=6707778565701632
CIRRUS_REPO_NAME=podman
CIRRUS_REPO_OWNER=containers
CIRRUS_SHELL=/bin/bash
CIRRUS_TASK_ID=6313727637585920
CIRRUS_TASK_NAME=machine\ podman\ fedora-37-aarch64\ rootless\ host
CIRRUS_USER_COLLABORATOR=true
CIRRUS_USER_PERMISSION=write
CIRRUS_WORKING_DIR=/var/tmp/go/src/github.com/containers/podman
CTR_FQIN=''
DEBIAN_CACHE_IMAGE_NAME=debian-c20230223t153813z-f37f36d12
DEBIAN_NAME=debian-12
DEST_BRANCH=main
DISTRO_NV=fedora-37-aarch64
EC2_INST_TYPE=c6g.metal
FEDORA_AARCH64_NAME=fedora-37-aarch64
FEDORA_CACHE_IMAGE_NAME=fedora-c20230223t153813z-f37f36d12
FEDORA_CONTAINER_FQIN=quay.io/libpod/fedora_podman:c20230223t153813z-f37f36d12
FEDORA_NAME=fedora-37
GOCACHE=/var/tmp/go/cache
GOPATH=/var/tmp/go
GOSRC=/var/tmp/go/src/github.com/containers/podman
NETWORK_BACKEND=netavark
PODBIN_NAME=podman
PRIOR_FEDORA_CACHE_IMAGE_NAME=prior-fedora-c20230223t153813z-f37f36d12
PRIOR_FEDORA_CONTAINER_FQIN=quay.io/libpod/prior-fedora_podman:c20230223t153813z-f37f36d12
PRIOR_FEDORA_NAME=fedora-36
PRIV_NAME=rootless
ROOTLESS_USER=some3982dude
SCRIPT_BASE=./contrib/cirrus
TEST_ENVIRON=host
TEST_FLAVOR=machine
VM_IMAGE_NAME=fedora-podman-aws-arm64-c20230223t153813z-f37f36d12
# Machine details for data-comparison sake, not actual env. vars.
BENCH_ENV_VER=1
CPUTOTAL=64
INST_TYPE=c6g.metal
MEMTOTALKB=131625268
UNAME_R=6.1.13-200.fc37.aarch64
UNAME_M=aarch64

26
build-push/.install.sh Normal file
View File

@ -0,0 +1,26 @@
#!/bin/bash
# This script is intended to be used from two places only:
# 1) When building the build-push VM image, to install the scripts as-is
# in a PR in order for CI testing to operate on them.
# 2) From the autoupdate.sh script, when $BUILDPUSHAUTOUPDATED is unset
# or '0'. This clones the latest repository to install (possibly)
# updated scripts.
#
# WARNING: Use under any other circumstances will probably screw things up.
if [[ -z "$BUILDPUSHAUTOUPDATED" ]];
then
echo "This script must only be run under Packer or autoupdate.sh"
exit 1
fi
source /etc/automation_environment
source "$AUTOMATION_LIB_PATH/common_lib.sh"
#shellcheck disable=SC2154
cd $(dirname "$SCRIPT_FILEPATH") || exit 1
# Must be installed into $AUTOMATION_LIB_PATH/../bin which is on $PATH
cp ./bin/* $AUTOMATION_LIB_PATH/../bin/
cp ./lib/* $AUTOMATION_LIB_PATH/
chmod +x $AUTOMATION_LIB_PATH/../bin/*

5
build-push/README.md Normal file
View File

@ -0,0 +1,5 @@
# DO NOT USE
This directory contains scripts/data used by the Cirrus-CI
`test_build-push` task. It is not intended to be used otherwise
and may cause harm.

175
build-push/bin/main.sh Normal file
View File

@ -0,0 +1,175 @@
#!/bin/bash
# This script is not intended for humans. It should be run by automation
# at the branch-level in automation for the skopeo, buildah, and podman
# repositories. It's purpose is to produce a multi-arch container image
# based on the contents of context subdirectory. At runtime, $PWD is assumed
# to be the root of the cloned git repository.
#
# The first argument to the script, should be the URL of the git repository
# in question. Though at this time, this is only used for labeling the
# resulting image.
#
# The second argument to this script is the relative path to the build context
# subdirectory. The basename of this subdirectory may indicates the
# image flavor (i.e. `upstream`, `testing`, or `stable`). Depending
# on this value, the image may be pushed to multiple container registries
# under slightly different rules (see the next option).
#
# If the basename of the context directory (second argument) does NOT reflect
# the image flavor, this name may be passed in as a third argument. Handling
# of this argument may be repository-specific, so check the actual code below
# to understand it's behavior.
set -eo pipefail
if [[ -r "/etc/automation_environment" ]]; then
source /etc/automation_environment # defines AUTOMATION_LIB_PATH
#shellcheck disable=SC1090,SC2154
source "$AUTOMATION_LIB_PATH/common_lib.sh"
#shellcheck source=../lib/autoupdate.sh
source "$AUTOMATION_LIB_PATH/autoupdate.sh"
else
echo "Expecting to find automation common library installed."
exit 1
fi
# Careful: Changing the error message below could break auto-update test.
if [[ "$#" -lt 2 ]]; then
#shellcheck disable=SC2145
die "Must be called with at least two arguments, got '$@'"
fi
if [[ -z $(type -P build-push.sh) ]]; then
die "It does not appear that build-push.sh is installed properly"
fi
if ! [[ -d "$PWD/.git" ]]; then
die "The current directory ($PWD) does not appear to be the root of a git repo."
fi
# Assume transitive debugging state for build-push.sh if set
if [[ "$(automation_version | cut -d '.' -f 1)" -ge 4 ]]; then
# Valid for version 4.0.0 and above only
export A_DEBUG
else
export DEBUG
fi
# Arches to build by default - may be overridden for testing
ARCHES="${ARCHES:-amd64,ppc64le,s390x,arm64}"
# First arg (REPO_URL) is the clone URL for repository for informational purposes
REPO_URL="$1"
REPO_NAME=$(basename "${REPO_URL%.git}")
# Second arg (CTX_SUB) is the context subdirectory relative to the clone path
CTX_SUB="$2"
# Historically, the basename of second arg set the image flavor(i.e. `upstream`,
# `testing`, or `stable`). For cases where this convention doesn't fit,
# it's possible to pass the flavor-name as the third argument. Both methods
# will populate a "FLAVOR" build-arg value.
if [[ "$#" -lt 3 ]]; then
FLAVOR_NAME=$(basename "$CTX_SUB")
elif [[ "$#" -ge 3 ]]; then
FLAVOR_NAME="$3" # An empty-value is valid
else
die "Expecting a non-empty third argument indicating the FLAVOR build-arg value."
fi
_REG="quay.io"
if [[ "$REPO_NAME" =~ testing ]]; then
_REG="example.com"
fi
REPO_FQIN="$_REG/$REPO_NAME/$FLAVOR_NAME"
req_env_vars REPO_URL REPO_NAME CTX_SUB FLAVOR_NAME
# Common library defines SCRIPT_FILENAME
# shellcheck disable=SC2154
dbg "$SCRIPT_FILENAME operating constants:
REPO_URL=$REPO_URL
REPO_NAME=$REPO_NAME
CTX_SUB=$CTX_SUB
FLAVOR_NAME=$FLAVOR_NAME
REPO_FQIN=$REPO_FQIN
"
# Set non-zero to avoid actually executing build-push, simply print
# the command-line that would have been executed
DRYRUN=${DRYRUN:-0}
_DRNOPUSH=""
if ((DRYRUN)); then
_DRNOPUSH="--nopush"
warn "Operating in dry-run mode with $_DRNOPUSH"
fi
### MAIN
declare -a build_args
if [[ -n "$FLAVOR_NAME" ]]; then
build_args=(--build-arg "FLAVOR=$FLAVOR_NAME")
fi
head_sha=$(git rev-parse HEAD)
dbg "HEAD is $head_sha"
# Labels to add to all images
# N/B: These won't show up in the manifest-list itself, only it's constituents.
lblargs="\
--label=org.opencontainers.image.source=$REPO_URL \
--label=org.opencontainers.image.revision=$head_sha \
--label=org.opencontainers.image.created=$(date -u --iso-8601=seconds)"
dbg "lblargs=$lblargs"
modcmdarg="tag_version.sh $FLAVOR_NAME"
# For stable images, the version number of the command is needed for tagging.
if [[ "$FLAVOR_NAME" == "stable" ]]; then
# only native arch is needed to extract the version
dbg "Building local-arch image to extract stable version number"
podman build -t $REPO_FQIN "${build_args[@]}" ./$CTX_SUB
case "$REPO_NAME" in
skopeo) version_cmd="--version" ;;
buildah) version_cmd="buildah --version" ;;
podman) version_cmd="podman --version" ;;
testing) version_cmd="cat FAKE_VERSION" ;;
*) die "Unknown/unsupported repo '$REPO_NAME'" ;;
esac
pvcmd="podman run -i --rm $REPO_FQIN $version_cmd"
dbg "Extracting version with command: $pvcmd"
version_output=$($pvcmd)
dbg "version output:
$version_output
"
img_cmd_version=$(awk -r -e '/^.+ version /{print $3}' <<<"$version_output")
dbg "parsed version: $img_cmd_version"
test -n "$img_cmd_version"
lblargs="$lblargs --label=org.opencontainers.image.version=$img_cmd_version"
# Prevent temporary build colliding with multi-arch manifest list (built next)
# but preserve image (by ID) for use as cache.
dbg "Un-tagging $REPO_FQIN"
podman untag $REPO_FQIN
# tag-version.sh expects this arg. when FLAVOR_NAME=stable
modcmdarg+=" $img_cmd_version"
# Stable images get pushed to 'containers' namespace as latest & version-tagged
build-push.sh \
$_DRNOPUSH \
--arches=$ARCHES \
--modcmd="$modcmdarg" \
$_REG/containers/$REPO_NAME \
./$CTX_SUB \
$lblargs \
"${build_args[@]}"
fi
# All images are pushed to quay.io/<reponame>, both
# latest and version-tagged (if available).
build-push.sh \
$_DRNOPUSH \
--arches=$ARCHES \
--modcmd="$modcmdarg" \
$REPO_FQIN \
./$CTX_SUB \
$lblargs \
"${build_args[@]}"

View File

@ -0,0 +1,69 @@
#!/bin/bash
# This script is not intended for humans. It should only be referenced
# as an argument to the build-push.sh `--modcmd` option. It's purpose
# is to ensure stable images are re-tagged with a verison-number
# cooresponding to the included tool's version.
set -eo pipefail
if [[ -r "/etc/automation_environment" ]]; then
source /etc/automation_environment # defines AUTOMATION_LIB_PATH
#shellcheck disable=SC1090,SC2154
source "$AUTOMATION_LIB_PATH/common_lib.sh"
else
echo "Unexpected operating environment"
exit 1
fi
# Vars defined by build-push.sh spec. for mod scripts
req_env_vars SCRIPT_FILENAME SCRIPT_FILEPATH RUNTIME PLATFORMOS FQIN CONTEXT \
PUSH ARCHES REGSERVER NAMESPACE IMGNAME MODCMD
if [[ "$#" -ge 1 ]]; then
FLAVOR_NAME="$1" # upstream, testing, or stable
fi
if [[ "$#" -ge 2 ]]; then
# Enforce all version-tags start with a 'v'
VERSION="v${2#v}" # output of $version_cmd
fi
if [[ -z "$FLAVOR_NAME" ]]; then
# Defined by common_lib.sh
# shellcheck disable=SC2154
warn "$SCRIPT_FILENAME passed empty flavor-name argument (optional)."
elif [[ -z "$VERSION" ]]; then
warn "$SCRIPT_FILENAME received empty version argument (req. for FLAVOR_NAME=stable)."
fi
# shellcheck disable=SC2154
dbg "Mod-command operating on $FQIN in '$FLAVOR_NAME' flavor"
if [[ "$FLAVOR_NAME" == "stable" ]]; then
# Stable images must all be tagged with a version number.
# Confirm this value is passed in by caller.
req_env_vars VERSION
VERSION=v${VERSION#v}
if egrep -q '^v[0-9]+\.[0-9]+\.[0-9]+'<<<"$VERSION"; then
msg "Found image command version '$VERSION'"
else
die "Encountered unexpected/non-conforming version '$VERSION'"
fi
# shellcheck disable=SC2154
$RUNTIME tag $FQIN:latest $FQIN:$VERSION
msg "Successfully tagged $FQIN:$VERSION"
# Tag as x.y to provide a consistent tag even for a future z+1
xy_ver=$(awk -F '.' '{print $1"."$2}'<<<"$VERSION")
$RUNTIME tag $FQIN:latest $FQIN:$xy_ver
msg "Successfully tagged $FQIN:$xy_ver"
# Tag as x to provide consistent tag even for a future y+1
x_ver=$(awk -F '.' '{print $1}'<<<"$xy_ver")
$RUNTIME tag $FQIN:latest $FQIN:$x_ver
msg "Successfully tagged $FQIN:$x_ver"
else
warn "$SCRIPT_FILENAME not version-tagging for '$FLAVOR_NAME' stage of '$FQIN'"
fi

View File

@ -0,0 +1,36 @@
# This script is not intended for humans. It should only be sourced by
# main.sh. If BUILDPUSHAUTOUPDATED!=0 this it will be a no-op. Otherwise,
# it will download the latest version of the build-push scripts and re-exec
# main.sh. This allows the scripts to be updated without requiring new VM
# images to be composed and deployed.
#
# WARNING: Changes to this script _do_ require new VM images as auto-updating
# the auto-update script would be complex and hard to test.
# Must be exported - .install.sh checks this is set.
export BUILDPUSHAUTOUPDATED="${BUILDPUSHAUTOUPDATED:-0}"
if ! ((BUILDPUSHAUTOUPDATED)); then
msg "Auto-updating build-push operational scripts..."
#shellcheck disable=SC2154
GITTMP=$(mktemp -p '' -d "$MKTEMP_FORMAT")
trap "rm -rf $GITTMP" EXIT
msg "Obtaining latest version..."
git clone --quiet --depth=1 \
https://github.com/containers/automation_images.git \
"$GITTMP"
msg "Installing..."
cd $GITTMP/build-push || exit 1
bash ./.install.sh
# Important: Return to directory main.sh was started from
cd - || exit 1
rm -rf "$GITTMP"
#shellcheck disable=SC2145
msg "Re-executing main.sh $@..."
export BUILDPUSHAUTOUPDATED=1
exec main.sh "$@" # guaranteed on $PATH
fi

200
build-push/test.sh Normal file
View File

@ -0,0 +1,200 @@
# DO NOT USE - This script is intended to be called by the Cirrus-CI
# `test_build-push` task. It is not intended to be used otherwise
# and may cause harm. It's purpose is to confirm the 'main.sh' script
# behaves in an expected way, given a local test repository as input.
set -eo pipefail
SCRIPT_DIRPATH=$(dirname $(realpath "${BASH_SOURCE[0]}"))
source $SCRIPT_DIRPATH/../lib.sh
req_env_vars CIRRUS_CI
# No need to test if image wasn't built
if TARGET_NAME=build-push skip_on_pr_label; then exit 0; fi
# Architectures to test with (golang standard names)
TESTARCHES="amd64 arm64"
# main.sh is sensitive to this value
ARCHES=$(tr " " ","<<<"$TESTARCHES")
export ARCHES
# Contrived "version" for testing purposes
FAKE_VER_X=$RANDOM
FAKE_VER_Y=$RANDOM
FAKE_VER_Z=$RANDOM
FAKE_VERSION="$FAKE_VER_X.$FAKE_VER_Y.$FAKE_VER_Z"
# Contrived source repository for testing
SRC_TMP=$(mktemp -p '' -d tmp-build-push-test-XXXX)
# Do not change, main.sh is sensitive to the 'testing' name
TEST_FQIN=example.com/testing/stable
# Stable build should result in manifest list tagged this
TEST_FQIN2=example.com/containers/testing
# Don't allow main.sh or tag_version.sh to auto-update at runtime
export BUILDPUSHAUTOUPDATED=1
trap "rm -rf $SRC_TMP" EXIT
# main.sh expects $PWD to be a git repository.
msg "
##### Constructing local test repository #####"
cd $SRC_TMP
showrun git init -b main testing
cd testing
git config --local user.name "Testy McTestface"
git config --local user.email "test@example.com"
git config --local advice.detachedHead "false"
git config --local commit.gpgsign "false"
# The following paths match the style of sub-dir in the actual
# skopeo/buildah/podman repositories. Only the 'stable' flavor
# is tested here, since it involves the most complex workflow.
mkdir -vp "contrib/testimage/stable"
cd "contrib/testimage/stable"
echo "build-push-test version v$FAKE_VERSION" | tee "FAKE_VERSION"
cat <<EOF | tee "Containerfile"
FROM registry.fedoraproject.org/fedora:latest
ARG FLAVOR
ADD /FAKE_VERSION /
RUN echo "FLAVOUR=\$FLAVOR" > /FLAVOUR
EOF
# As an additional test, build and check images when pasing
# the 'stable' flavor name as a command-line arg instead
# of using the subdirectory dirname (old method).
cd $SRC_TMP/testing/contrib/testimage
cp stable/* ./
cd $SRC_TMP/testing
# The images will have the repo & commit ID set as labels
git add --all
git commit -m 'test repo initial commit'
TEST_REVISION=$(git rev-parse HEAD)
# Given the flavor-name as the first argument, verify built image
# expectations. For 'stable' image, verify that main.sh will properly
# version-tagged both FQINs. For other flavors, verify expected labels
# on the `latest` tagged FQINs.
verify_built_images() {
local _fqin _arch xy_ver x_ver img_ver img_src img_rev _fltr
local _test_tag expected_flavor _test_fqins
expected_flavor="$1"
msg "
##### Testing execution of '$expected_flavor' images for arches $TESTARCHES #####"
podman --version
req_env_vars TESTARCHES FAKE_VERSION TEST_FQIN TEST_FQIN2
declare -a _test_fqins
_test_fqins=("${TEST_FQIN%stable}$expected_flavor")
if [[ "$expected_flavor" == "stable" ]]; then
_test_fqins+=("$TEST_FQIN2")
test_tag="v$FAKE_VERSION"
xy_ver="v$FAKE_VER_X.$FAKE_VER_Y"
x_ver="v$FAKE_VER_X"
else
test_tag="latest"
xy_ver="latest"
x_ver="latest"
fi
for _fqin in "${_test_fqins[@]}"; do
for _arch in $TESTARCHES; do
msg "Testing container can execute '/bin/true'"
showrun podman run -i --arch=$_arch --rm "$_fqin:$test_tag" /bin/true
msg "Testing container FLAVOR build-arg passed correctly"
showrun podman run -i --arch=$_arch --rm "$_fqin:$test_tag" \
cat /FLAVOUR | tee /dev/stderr | fgrep -xq "FLAVOUR=$expected_flavor"
if [[ "$expected_flavor" == "stable" ]]; then
msg "Testing tag '$xy_ver'"
if ! showrun podman manifest exists $_fqin:$xy_ver; then
die "Failed to find manifest-list tagged '$xy_ver'"
fi
msg "Testing tag '$x_ver'"
if ! showrun podman manifest exists $_fqin:$x_ver; then
die "Failed to find manifest-list tagged '$x_ver'"
fi
fi
done
if [[ "$expected_flavor" == "stable" ]]; then
msg "Testing image $_fqin:$test_tag version label"
_fltr='.[].Config.Labels."org.opencontainers.image.version"'
img_ver=$(podman inspect $_fqin:$test_tag | jq -r -e "$_fltr")
showrun test "$img_ver" == "v$FAKE_VERSION"
fi
msg "Testing image $_fqin:$test_tag source label"
_fltr='.[].Config.Labels."org.opencontainers.image.source"'
img_src=$(podman inspect $_fqin:$test_tag | jq -r -e "$_fltr")
showrun test "$img_src" == "git://testing"
msg "Testing image $_fqin:$test_tag source revision"
_fltr='.[].Config.Labels."org.opencontainers.image.revision"'
img_rev=$(podman inspect $_fqin:$test_tag | jq -r -e "$_fltr")
showrun test "$img_rev" == "$TEST_REVISION"
done
}
remove_built_images() {
buildah --version
for _fqin in $TEST_FQIN $TEST_FQIN2; do
for tag in latest v$FAKE_VERSION v$FAKE_VER_X.$FAKE_VER_Y v$FAKE_VER_X; do
# Don't care if this fails
podman manifest rm $_fqin:$tag || true
done
done
}
msg "
##### Testing build-push subdir-flavor run of '$TEST_FQIN' & '$TEST_FQIN2' #####"
cd $SRC_TMP/testing
export DRYRUN=1 # Force main.sh not to push anything
req_env_vars ARCHES DRYRUN
# main.sh is sensitive to 'testing' value.
# Also confirms main.sh is on $PATH
env A_DEBUG=1 main.sh git://testing contrib/testimage/stable
verify_built_images stable
msg "
##### Testing build-push flavour-arg run for '$TEST_FQIN' & '$TEST_FQIN2' #####"
remove_built_images
env A_DEBUG=1 main.sh git://testing contrib/testimage foobarbaz
verify_built_images foobarbaz
# This script verifies it's only/ever running inside CI. Use a fake
# main.sh to verify it auto-updates itself w/o actually performing
# a build. N/B: This test must be run last, in a throw-away environment,
# it _WILL_ modify on-disk contents!
msg "
##### Testing auto-update capability #####"
cd $SRC_TMP
#shellcheck disable=SC2154
cat >main.sh<< EOF
#!/bin/bash
source /etc/automation_environment # defines AUTOMATION_LIB_PATH
source "$AUTOMATION_LIB_PATH/common_lib.sh"
source "$AUTOMATION_LIB_PATH/autoupdate.sh"
EOF
chmod +x main.sh
# Back to where we were
cd -
# Expect the real main.sh to bark one of two error messages
# and exit non-zero.
EXP_RX1="Must.be.called.with.at.least.two.arguments"
EXP_RX2="does.not.appear.to.be.the.root.of.a.git.repo"
if output=$(env --ignore-environment \
BUILDPUSHAUTOUPDATED=0 \
AUTOMATION_LIB_PATH=$AUTOMATION_LIB_PATH \
$SRC_TMP/main.sh 2>&1); then
die "Fail. Expected main.sh to exit non-zero"
else
if [[ "$output" =~ $EXP_RX1 ]] || [[ "$output" =~ $EXP_RX2 ]]; then
echo "PASS"
else
die "Fail. Expecting match to '$EXP_RX1' or '$EXP_RX2', got:
$output"
fi
fi

View File

@ -27,10 +27,8 @@ INSTALL_PACKAGES=(\
git
jq
podman
python3-pip
qemu-user-static
skopeo
unzip
)
echo "Installing general build/test dependencies"
@ -39,7 +37,11 @@ bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}"
# It was observed in F33, dnf install doesn't always get you the latest/greatest
lilto $SUDO dnf update -y
# Re-install would append to this, making a mess.
$SUDO rm -f /etc/automation_environment
# Re-install the latest version with the 'build-push' component
install_automation_tooling latest build-push
# Re-install with the 'build-push' component
install_automation_tooling build-push
# Install main scripts into directory on $PATH
cd $REPO_DIRPATH/build-push
set -x
# Do not auto-update to allow testing inside a PR
$SUDO env BUILDPUSHAUTOUPDATED=1 bash ./.install.sh

View File

@ -19,7 +19,6 @@ variables: # Empty value means it must be passed in on command-line
# See Makefile for definitions
FEDORA_RELEASE: "{{env `FEDORA_RELEASE`}}"
PRIOR_FEDORA_RELEASE: "{{env `PRIOR_FEDORA_RELEASE`}}"
RAWHIDE_RELEASE: "{{env `RAWHIDE_RELEASE`}}"
DEBIAN_RELEASE: "{{env `DEBIAN_RELEASE`}}"
builders:
@ -49,15 +48,6 @@ builders:
# Permit running nested VM's to support specialized testing
image_licenses: ["projects/vm-options/global/licenses/enable-vmx"]
- <<: *gce_hosted_image
name: 'rawhide'
# The latest fedora base image will be "upgraded" to rawhide
source_image: 'fedora-b{{user `IMG_SFX`}}'
labels:
<<: *gce_labels
src: 'fedora-b{{user `IMG_SFX` }}'
release: 'rawhide-{{user `RAWHIDE_RELEASE`}}'
- <<: *gce_hosted_image
name: 'fedora'
labels: &fedora_gce_labels
@ -75,6 +65,9 @@ builders:
source_image_family: 'fedora-base'
labels: *fedora_gce_labels
- <<: *aux_fed_img
name: 'fedora-podman-py'
- <<: *aux_fed_img
name: 'fedora-netavark'
@ -180,30 +173,23 @@ provisioners:
- type: 'shell'
inline:
- 'set -e'
- 'mkdir -p /var/tmp/automation_images'
- 'mkdir -p /tmp/automation_images'
- type: 'file'
source: '{{ pwd }}/'
destination: "/var/tmp/automation_images"
- only: ['rawhide']
type: 'shell'
expect_disconnect: true # VM will be rebooted at end of script
inline:
- 'set -e'
- '/bin/bash /var/tmp/automation_images/cache_images/rawhide_setup.sh'
destination: "/tmp/automation_images"
- except: ['debian']
type: 'shell'
inline:
- 'set -e'
- '/bin/bash /var/tmp/automation_images/cache_images/fedora_setup.sh'
- '/bin/bash /tmp/automation_images/cache_images/fedora_setup.sh'
- only: ['debian']
type: 'shell'
inline:
- 'set -e'
- 'env DEBIAN_FRONTEND=noninteractive /bin/bash /var/tmp/automation_images/cache_images/debian_setup.sh'
- '/bin/bash /tmp/automation_images/cache_images/debian_setup.sh'
post-processors:
# This is critical for human-interaction. Copntents will be used

View File

@ -14,9 +14,12 @@ REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
msg "Updating/Installing repos and packages for $OS_REL_VER"
lilto ooe.sh $SUDO apt-get -q -y update
bigto ooe.sh $SUDO apt-get -q -y upgrade
echo "Updating/Installing repos and packages for $OS_REL_VER"
lilto ooe.sh $SUDO apt-get -qq -y update
bigto ooe.sh $SUDO apt-get -qq -y upgrade
echo "Configuring additional package repositories"
INSTALL_PACKAGES=(\
apache2-utils
@ -39,12 +42,13 @@ INSTALL_PACKAGES=(\
crun
dnsmasq
e2fslibs-dev
emacs-nox
file
fuse3
fuse-overlayfs
gcc
gettext
git
git-daemon-run
gnupg2
go-md2man
golang
@ -59,6 +63,7 @@ INSTALL_PACKAGES=(\
libdevmapper-dev
libdevmapper1.02.1
libfuse-dev
libfuse2
libfuse3-dev
libglib2.0-dev
libgpgme11-dev
@ -103,8 +108,6 @@ INSTALL_PACKAGES=(\
skopeo
slirp4netns
socat
libsqlite3-0
libsqlite3-dev
systemd-container
sudo
time
@ -118,16 +121,10 @@ INSTALL_PACKAGES=(\
zstd
)
# bpftrace is only needed on the host as containers cannot run ebpf
# programs anyway and it is very big so we should not bloat the container
# images unnecessarily.
if ! ((CONTAINER)); then
INSTALL_PACKAGES+=( \
bpftrace
)
fi
# Necessary to update cache of newly added repos
lilto $SUDO apt-get -q -y update
msg "Installing general build/testing dependencies"
echo "Installing general build/testing dependencies"
bigto $SUDO apt-get -q -y install "${INSTALL_PACKAGES[@]}"
# The nc installed by default is missing many required options
@ -152,9 +149,7 @@ curl --fail --silent --location \
$SUDO tee /etc/apt/trusted.gpg.d/docker_com.gpg &> /dev/null
# Buildah CI does conformance testing vs the most recent Docker version.
# FIXME: As of 7-2023, there is no 'trixie' dist for docker. Fix the next lines once that changes.
#docker_debian_release=$(source /etc/os-release; echo "$VERSION_CODENAME")
docker_debian_release="bookworm"
docker_debian_release=$(source /etc/os-release; echo "$VERSION_CODENAME")
echo "deb https://download.docker.com/linux/debian $docker_debian_release stable" | \
ooe.sh $SUDO tee /etc/apt/sources.list.d/docker.list &> /dev/null

View File

@ -17,44 +17,14 @@ fi
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
# Generate en_US.UTF-8 locale as this is required for a podman test (https://github.com/containers/podman/pull/19635).
$SUDO sed -i '/en_US.UTF-8/s/^#//g' /etc/locale.gen
$SUDO locale-gen
# Debian doesn't mount tmpfs on /tmp as default but we want this to speed tests up so
# they don't have to write to persistent disk.
# https://github.com/containers/podman/pull/22533
$SUDO mkdir -p /etc/systemd/system/local-fs.target.wants/
cat <<EOF | $SUDO tee /etc/systemd/system/tmp.mount
[Unit]
Description=Temporary Directory /tmp
ConditionPathIsSymbolicLink=!/tmp
DefaultDependencies=no
Conflicts=umount.target
Before=local-fs.target umount.target
After=swap.target
[Mount]
What=tmpfs
Where=/tmp
Type=tmpfs
Options=size=75%%,mode=1777
EOF
# enable the unit by default
$SUDO ln -s ../tmp.mount /etc/systemd/system/local-fs.target.wants/tmp.mount
req_env_vars PACKER_BUILD_NAME
bash $SCRIPT_DIRPATH/debian_packaging.sh
# dnsmasq is set to bind 0.0.0.0:53, that will conflict with our dns tests.
# We don't need a local resolver.
$SUDO systemctl disable dnsmasq.service
$SUDO systemctl mask dnsmasq.service
if ! ((CONTAINER)); then
warn "Making Debian kernel enable cgroup swap accounting"
SEDCMD='s/^GRUB_CMDLINE_LINUX="(.*)"/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1"/'
warn "Forcing CgroupsV1"
SEDCMD='s/^GRUB_CMDLINE_LINUX="(.*)"/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 systemd.unified_cgroup_hierarchy=0"/'
ooe.sh $SUDO sed -re "$SEDCMD" -i /etc/default/grub.d/*
ooe.sh $SUDO sed -re "$SEDCMD" -i /etc/default/grub
ooe.sh $SUDO update-grub
@ -62,10 +32,6 @@ fi
nm_ignore_cni
if ! ((CONTAINER)); then
initialize_local_cache_registry
fi
finalize
echo "SUCCESS!"

View File

@ -88,9 +88,8 @@ if [[ $(uname -m) == "aarch64" ]]; then
$SUDO env PATH=$PATH CARGO_HOME=$CARGO_HOME rustup target add aarch64-unknown-linux-gnu
fi
msg "Install tool to generate man pages"
$SUDO go install github.com/cpuguy83/go-md2man/v2@latest
$SUDO install /root/go/bin/go-md2man /usr/local/bin/
msg "Install mandown to generate man pages"
$SUDO env PATH=$PATH CARGO_HOME=$CARGO_HOME cargo install mandown
# Downstream users of this image are specifically testing netavark & aardvark-dns
# code changes. We want to start with using the RPMs because they deal with any

View File

@ -0,0 +1,98 @@
#!/bin/bash
# This script is called from fedora_setup.sh and various Dockerfiles.
# It's not intended to be used outside of those contexts. It assumes the lib.sh
# library has already been sourced, and that all "ground-up" package-related activity
# needs to be done, including repository setup and initial update.
set -e
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
# shellcheck disable=SC2154
warn "Enabling updates-testing repository for $PACKER_BUILD_NAME"
lilto ooe.sh $SUDO dnf install -y 'dnf-command(config-manager)'
lilto ooe.sh $SUDO dnf config-manager --set-enabled updates-testing
msg "Updating/Installing repos and packages for $OS_REL_VER"
bigto ooe.sh $SUDO dnf update -y
INSTALL_PACKAGES=(\
bash-completion
bridge-utils
buildah
bzip2
curl
findutils
fuse3
gcc
git
git-daemon
glib2-devel
glibc-devel
hostname
httpd-tools
iproute
iptables
jq
libtool
lsof
make
nmap-ncat
openssl
openssl-devel
pkgconfig
podman
policycoreutils
protobuf
protobuf-devel
python-pip-wheel
python-setuptools-wheel
python-toml
python-wheel-wheel
python3-PyYAML
python3-coverage
python3-dateutil
python3-docker
python3-fixtures
python3-libselinux
python3-libsemanage
python3-libvirt
python3-pip
python3-psutil
python3-pylint
python3-pytest
python3-pyxdg
python3-requests
python3-requests-mock
python3-virtualenv
python3.6
python3.8
python3.9
redhat-rpm-config
rsync
sed
skopeo
socat
tar
time
tox
unzip
vim
wget
xz
zip
zstd
)
echo "Installing general build/test dependencies"
bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}"
# It was observed in F33, dnf install doesn't always get you the latest/greatest
lilto $SUDO dnf update -y

View File

@ -18,17 +18,21 @@ source "$REPO_DIRPATH/lib.sh"
# for both VM and container image build workflows.
req_env_vars PACKER_BUILD_NAME
# Only enable updates-testing on all 'latest' Fedora images (except rawhide)
# Do not enable updates-testing on the 'prior' Fedora release images
# as a matter of general policy. Historically there have been many
# problems with non-uniform behavior when both supported Fedora releases
# receive container-related dependency updates at the same time. Since
# the 'prior' release has the shortest support lifetime, keep it's behavior
# stable by only using released updates.
# shellcheck disable=SC2154
if [[ "$PACKER_BUILD_NAME" == "fedora" ]] && [[ ! "$PACKER_BUILD_NAME" =~ "prior" ]]; then
if [[ ! "$PACKER_BUILD_NAME" =~ prior ]]; then
warn "Enabling updates-testing repository for $PACKER_BUILD_NAME"
lilto ooe.sh $SUDO dnf install -y 'dnf-command(config-manager)'
lilto ooe.sh $SUDO dnf config-manager setopt updates-testing.enabled=1
lilto ooe.sh $SUDO dnf config-manager --set-enabled updates-testing
# Could be on prior-fedora also, but copr isn't installed by default
warn "Enabling sbrivio/passt repo. for passt packages"
$SUDO dnf copr enable -y sbrivio/passt
else
warn "NOT enabling updates-testing repository for $PACKER_BUILD_NAME"
fi
@ -56,7 +60,7 @@ INSTALL_PACKAGES=(\
curl
device-mapper-devel
dnsmasq
docker-distribution
docker-compose
e2fsprogs-devel
emacs-nox
fakeroot
@ -65,12 +69,10 @@ INSTALL_PACKAGES=(\
fuse3
fuse3-devel
gcc
gh
git
git-daemon
glib2-devel
glibc-devel
glibc-langpack-en
glibc-static
gnupg
go-md2man
@ -83,7 +85,6 @@ INSTALL_PACKAGES=(\
iproute
iptables
jq
koji
krb5-workstation
libassuan
libassuan-devel
@ -103,7 +104,7 @@ INSTALL_PACKAGES=(\
libxslt-devel
lsof
make
man-db
mlocate
msitools
nfs-utils
nmap-ncat
@ -113,31 +114,42 @@ INSTALL_PACKAGES=(\
pandoc
parallel
passt
perl-Clone
perl-FindBin
pigz
pkgconfig
podman
podman-remote
pre-commit
procps-ng
protobuf
protobuf-c
protobuf-c-devel
protobuf-devel
python3-fedora-distro-aliases
python3-koji-cli-plugins
python-pip-wheel
python-setuptools-wheel
python-toml
python-wheel-wheel
python2
python3-PyYAML
python3-coverage
python3-dateutil
python3-devel
python3-docker
python3-fixtures
python3-libselinux
python3-libsemanage
python3-libvirt
python3-pip
python3-psutil
python3-pylint
python3-pyxdg
python3-requests
python3-requests-mock
redhat-rpm-config
rpcbind
rsync
runc
sed
ShellCheck
skopeo
slirp4netns
socat
sqlite-libs
sqlite-devel
squashfs-tools
tar
time
@ -151,77 +163,44 @@ INSTALL_PACKAGES=(\
zstd
)
# Rawhide images don't need these packages
if [[ "$PACKER_BUILD_NAME" =~ fedora ]]; then
INSTALL_PACKAGES+=( \
python-pip-wheel
python-setuptools-wheel
python-toml
python-wheel-wheel
python3-PyYAML
python3-coverage
python3-dateutil
python3-devel
python3-docker
python3-fixtures
python3-libselinux
python3-libsemanage
python3-libvirt
python3-pip
python3-psutil
python3-pylint
python3-pyxdg
python3-requests
python3-requests-mock
)
else # podman-sequoia is only available in Rawhide
timebomb 20251101 "Also install the package in future Fedora releases, and enable Sequoia support in users of the images."
INSTALL_PACKAGES+=( \
podman-sequoia
)
# Test with CNI in Fedora N-1
EXARG=""
if [[ "$PACKER_BUILD_NAME" =~ prior ]]; then
EXARG="--exclude=netavark --exclude=aardvark-dns"
fi
# Workarond: Around the time of this commit, the `criu` package
# was found to be missing a recommends-dependency on criu-libs.
# Until a fixed rpm lands in the Fedora repositories, manually
# include it here. This workaround should be removed once the
# package is corrected (likely > 3.17.1-3).
INSTALL_PACKAGES+=(criu-libs)
# When installing during a container-build, having this present
# will seriously screw up future dnf operations in very non-obvious ways.
# bpftrace is only needed on the host as containers cannot run ebpf
# programs anyway and it is very big so we should not bloat the container
# images unnecessarily.
if ! ((CONTAINER)); then
INSTALL_PACKAGES+=( \
bpftrace
composefs
container-selinux
fuse-overlayfs
libguestfs-tools
selinux-policy-devel
policycoreutils
)
# Extra packages needed by podman-machine-os
INSTALL_PACKAGES+=( \
podman-machine
osbuild
osbuild-tools
osbuild-ostree
xfsprogs
e2fsprogs
)
fi
# Download these package files, but don't install them; Any tests
# wishing to, may install them using their native tools at runtime.
DOWNLOAD_PACKAGES=(\
oci-umount
parallel
podman-docker
python3-devel
python3-pip
podman-plugins
python3-pytest
python3-virtualenv
)
msg "Installing general build/test dependencies"
bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}"
bigto $SUDO dnf install -y $EXARG "${INSTALL_PACKAGES[@]}"
msg "Downloading packages for optional installation at runtime, as needed."
$SUDO mkdir -p "$PACKAGE_DOWNLOAD_DIR"
@ -235,6 +214,6 @@ $SUDO curl --fail --silent --location -O \
https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm
cd -
# Occasionally following an install, there are more updates available.
# This may be due to activation of suggested/recommended dependency resolution.
# It was observed in F33, dnf install doesn't always get you the latest/greatest
lilto $SUDO dnf update -y

View File

@ -17,12 +17,6 @@ fi
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
# Make /tmp tmpfs bigger, by default we only get 50%. Bump it to 75% so the tests have more storage.
# Do not use 100% so we do not run out of memory for the process itself if tests start leaking big
# files on /tmp.
$SUDO mkdir -p /etc/systemd/system/tmp.mount.d
echo -e "[Mount]\nOptions=size=75%%,mode=1777\n" | $SUDO tee /etc/systemd/system/tmp.mount.d/override.conf
# packer and/or a --build-arg define this envar value uniformly
# for both VM and container image build workflows.
req_env_vars PACKER_BUILD_NAME
@ -30,10 +24,17 @@ req_env_vars PACKER_BUILD_NAME
# shellcheck disable=SC2154
if [[ "$PACKER_BUILD_NAME" =~ "netavark" ]]; then
bash $SCRIPT_DIRPATH/fedora-netavark_packaging.sh
elif [[ "$PACKER_BUILD_NAME" =~ "podman-py" ]]; then
bash $SCRIPT_DIRPATH/fedora-podman-py_packaging.sh
elif [[ "$PACKER_BUILD_NAME" =~ "build-push" ]]; then
bash $SCRIPT_DIRPATH/build-push_packaging.sh
# Registers qemu emulation for non-native execution
$SUDO systemctl enable systemd-binfmt
for arch in amd64 s390x ppc64le arm64; do
msg "Caching latest $arch fedora image..."
$SUDO podman pull --quiet --arch=$arch \
registry.fedoraproject.org/fedora:$OS_RELEASE_VER
done
else
bash $SCRIPT_DIRPATH/fedora_packaging.sh
fi
@ -47,8 +48,6 @@ if ! ((CONTAINER)); then
else
msg "Enabling cgroup management from containers"
ooe.sh $SUDO setsebool -P container_manage_cgroup true
initialize_local_cache_registry
fi
fi

View File

@ -1,345 +0,0 @@
#! /bin/bash
#
# local-cache-registry - set up and manage a local registry with cached images
#
# Used in containers CI, to reduce exposure to registry flakes.
#
# We start with the docker registry image. Pull it, extract the registry
# binary and config, tweak the config, and create a systemd unit file that
# will start the registry at boot.
#
# We also populate that registry with a (hardcoded) list of container
# images used in CI tests. That way a CI VM comes up alreay ready,
# and CI tests do not need to do remote pulls. The image list is
# hardcoded right here in this script file, in the automation_images
# repo. See below for reasons.
#
ME=$(basename $0)
###############################################################################
# BEGIN defaults
# FQIN of registry image. From this image, we extract the registry to run.
PODMAN_REGISTRY_IMAGE=quay.io/libpod/registry:2.8.2
# Fixed path to registry setup. This is the directory used by the registry.
PODMAN_REGISTRY_WORKDIR=/var/cache/local-registry
# Fixed port on which registry listens. This is hardcoded and must be
# shared knowledge among all CI repos that use this registry.
REGISTRY_PORT=60333
# Podman binary to run
PODMAN=${PODMAN:-/usr/bin/podman}
# Temporary directories for podman, so we don't clobber any system files.
# Wipe them upon script exit.
PODMAN_TMPROOT=$(mktemp -d --tmpdir $ME.XXXXXXX)
trap 'status=$?; rm -rf $PODMAN_TMPROOT && exit $status' 0
# Images to cache. Default prefix is "quay.io/libpod/"
#
# It seems evil to hardcode this list as part of the script itself
# instead of a separate file or resource but there's a good reason:
# keeping code and data together in one place makes it possible for
# a podman (and some day other repo?) developer to run a single
# command, contrib/cirrus/get-local-registry-script, which will
# fetch this script and allow the dev to run it to start a local
# registry on their system.
#
# As of 2024-07-02 this list includes podman and buildah images
#
# FIXME: periodically run this to look for no-longer-needed images:
#
# for i in $(sed -ne '/IMAGELIST=/,/^[^ ]/p' <cache_images/local-cache-registry | sed -ne 's/^ *//p');do grep -q -R $i ../podman/test ../buildah/tests || echo "unused $i";done
#
declare -a IMAGELIST=(
alpine:3.10.2
alpine:latest
alpine_healthcheck:latest
alpine_nginx:latest
alpine@sha256:634a8f35b5f16dcf4aaa0822adc0b1964bb786fca12f6831de8ddc45e5986a00
alpine@sha256:f270dcd11e64b85919c3bab66886e59d677cf657528ac0e4805d3c71e458e525
alpine@sha256:fa93b01658e3a5a1686dc3ae55f170d8de487006fb53a28efcd12ab0710a2e5f
autoupdatebroken:latest
badhealthcheck:latest
busybox:1.30.1
busybox:glibc
busybox:latest
busybox:musl
cirros:latest
fedora/python-311:latest
healthcheck:config-only
k8s-pause:3.5
podman_python:latest
redis:alpine
registry:2.8.2
registry:volume_omitted
systemd-image:20240124
testartifact:20250206-single
testartifact:20250206-multi
testartifact:20250206-multi-no-title
testartifact:20250206-evil
testdigest_v2s2
testdigest_v2s2:20200210
testimage:00000000
testimage:00000004
testimage:20221018
testimage:20241011
testimage:multiimage
testimage@sha256:1385ce282f3a959d0d6baf45636efe686c1e14c3e7240eb31907436f7bc531fa
testdigest_v2s2:20200210
testdigest_v2s2@sha256:755f4d90b3716e2bf57060d249e2cd61c9ac089b1233465c5c2cb2d7ee550fdb
volume-plugin-test-img:20220623
podman/stable:v4.3.1
podman/stable:v4.8.0
skopeo/stable:latest
ubuntu:latest
)
# END defaults
###############################################################################
# BEGIN help messages
missing=" argument is missing; see $ME -h for details"
usage="Usage: $ME [options] [initialize | cache IMAGE...]
$ME manages a local instance of a container registry.
When called to initialize a registry, $ME will pull
this image into a local temporary directory:
$PODMAN_REGISTRY_IMAGE
...then extract the registry binary and config, tweak the config,
start the registry, and populate it with a list of images needed by tests:
\$ $ME initialize
To fetch individual images into the cache:
\$ $ME cache libpod/testimage:21120101
Override the default image and/or port with:
-i IMAGE registry image to pull (default: $PODMAN_REGISTRY_IMAGE)
-P PORT port to bind to (on 127.0.0.1) (default: $REGISTRY_PORT)
Other options:
-h display usage message
"
die () {
echo "$ME: $*" >&2
exit 1
}
# END help messages
###############################################################################
# BEGIN option processing
while getopts "i:P:hv" opt; do
case "$opt" in
i) PODMAN_REGISTRY_IMAGE=$OPTARG ;;
P) REGISTRY_PORT=$OPTARG ;;
h) echo "$usage"; exit 0;;
v) verbose=1 ;;
\?) echo "Run '$ME -h' for help" >&2; exit 1;;
esac
done
shift $((OPTIND-1))
# END option processing
###############################################################################
# BEGIN helper functions
function podman() {
${PODMAN} --root ${PODMAN_TMPROOT}/root \
--runroot ${PODMAN_TMPROOT}/runroot \
--tmpdir ${PODMAN_TMPROOT}/tmp \
"$@"
}
###############
# must_pass # Run a command quietly; abort with error on failure
###############
function must_pass() {
local log=${PODMAN_TMPROOT}/log
"$@" &> $log
if [ $? -ne 0 ]; then
echo "$ME: Command failed: $*" >&2
cat $log >&2
# If we ever get here, it's a given that the registry is not running.
exit 1
fi
}
###################
# wait_for_port # Returns once port is available on localhost
###################
function wait_for_port() {
local port=$1 # Numeric port
local host=127.0.0.1
local _timeout=5
# Wait
while [ $_timeout -gt 0 ]; do
{ exec {unused_fd}<> /dev/tcp/$host/$port; } &>/dev/null && return
sleep 1
_timeout=$(( $_timeout - 1 ))
done
die "Timed out waiting for port $port"
}
#################
# cache_image # (singular) fetch one remote image
#################
function cache_image() {
local img=$1
# Almost all our images are under libpod; no need to repeat that part
if ! expr "$img" : "^\(.*\)/" >/dev/null; then
img="libpod/$img"
fi
# Almost all our images are from quay.io, but "domain.tld" prefix overrides
registry=$(expr "$img" : "^\([^/.]\+\.[^/]\+\)/" || true)
if [[ -n "$registry" ]]; then
img=$(expr "$img" : "[^/]\+/\(.*\)")
else
registry=quay.io
fi
echo
echo "...caching: $registry / $img"
# FIXME: inspect, and only pull if missing?
for retry in 1 2 3 0;do
skopeo --registries-conf /dev/null \
copy --all --dest-tls-verify=false \
docker://$registry/$img \
docker://127.0.0.1:${REGISTRY_PORT}/$img \
&& return
sleep $((retry * 30))
done
die "Too many retries; unable to cache $registry/$img"
}
##################
# cache_images # (plural) fetch all remote images
##################
function cache_images() {
for img in "${IMAGELIST[@]}"; do
cache_image "$img"
done
}
# END helper functions
###############################################################################
# BEGIN action processing
###################
# do_initialize # Start, then cache images
###################
#
# Intended to be run only from automation_images repo, or by developer
# on local workstation. This should never be run from podman/buildah/etc
# because it defeats the entire purpose of the cache -- a dead registry
# will cause this to fail.
#
function do_initialize() {
# This action can only be run as root
if [[ "$(id -u)" != "0" ]]; then
die "this script must be run as root"
fi
# For the next few commands, die on any error
set -e
mkdir -p ${PODMAN_REGISTRY_WORKDIR}
# Copy of this script
if ! [[ $0 =~ ${PODMAN_REGISTRY_WORKDIR} ]]; then
rm -f ${PODMAN_REGISTRY_WORKDIR}/$ME
cp $0 ${PODMAN_REGISTRY_WORKDIR}/$ME
fi
# Give it three tries, to compensate for flakes
podman pull ${PODMAN_REGISTRY_IMAGE} &>/dev/null ||
podman pull ${PODMAN_REGISTRY_IMAGE} &>/dev/null ||
must_pass podman pull ${PODMAN_REGISTRY_IMAGE}
# Mount the registry image...
registry_root=$(podman image mount ${PODMAN_REGISTRY_IMAGE})
# ...copy the registry binary into our own bin...
cp ${registry_root}/bin/registry /usr/bin/docker-registry
# ...and copy the config, making a few adjustments to it.
sed -e "s;/var/lib/registry;${PODMAN_REGISTRY_WORKDIR};" \
-e "s;:5000;127.0.0.1:${REGISTRY_PORT};" \
< ${registry_root}/etc/docker/registry/config.yml \
> /etc/local-registry.yml
podman image umount -a
# Create a systemd unit file. Enable it (so it starts at boot)
# and also start it --now.
cat > /etc/systemd/system/$ME.service <<EOF
[Unit]
Description=Local Cache Registry for CI tests
[Service]
ExecStart=/usr/bin/docker-registry serve /etc/local-registry.yml
Type=exec
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now $ME.service
wait_for_port ${REGISTRY_PORT}
cache_images
}
##############
# do_cache # Cache one or more images
##############
function do_cache() {
if [[ -z "$*" ]]; then
die "missing args to 'cache'"
fi
for img in "$@"; do
cache_image "$img"
done
}
# END action processing
###############################################################################
# BEGIN command-line processing
# First command-line arg must be an action
action=${1?ACTION$missing}
shift
case "$action" in
init|initialize) do_initialize ;;
cache) do_cache "$@" ;;
*) die "Unknown action '$action'; must be init | cache IMAGE" ;;
esac
# END command-line processing
###############################################################################
exit 0

View File

@ -1,38 +0,0 @@
#!/bin/bash
# This script is called by packer on the rawhide VM, to update and reboot using
# the rawhide kernel. It's not intended to be used outside of this context.
set -e
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
# packer and/or a --build-arg define this envar value uniformly
# for both VM and container image build workflows.
req_env_vars PACKER_BUILD_NAME
warn "Upgrading Fedora '$OS_RELEASE_VER' to rawhide, this might break."
# shellcheck disable=SC2154
warn "If so, this script may be found in the repo. as '$SCRIPT_DIRPATH/$SCRIPT_FILENAME'."
# Show what's happening
set -x
# Rawhide often has GPG issues, don't bother checking
$SUDO sed -i -r -e 's/^gpgcheck=.+/gpgcheck=False/' /etc/dnf/dnf.conf
$SUDO sed -i -r -e 's/^gpgcheck=.+/gpgcheck=0/' /etc/yum.repos.d/*.repo
# Called as `dnf5` here to confirm "old" dnf has been replaced.
$SUDO dnf5 -y distro-sync --releasever=rawhide --allowerasing
$SUDO dnf5 upgrade -y
# A shared fedora_packaging.sh script is called next that doesn't always support dnf5
$SUDO ln -s $(type -P dnf5) /usr/local/bin/dnf
# Packer will try to run 'cache_images/fedora_setup.sh' next, make sure the system
# is actually running rawhide (and verify it boots).
$SUDO reboot

View File

@ -1,13 +1,8 @@
ARG BASE_NAME=registry.fedoraproject.org/fedora-minimal
# FIXME FIXME FIXME! 2023-11-16: revert "38" to "latest"
# ...38 is because as of this moment, latest is 39, which
# has python-3.12, which causes something to barf:
# aiohttp/_websocket.c:3744:45: error: PyLongObject {aka struct _longobject} has no member named ob_digit
# Possible cause: https://github.com/cython/cython/issues/5238
ARG BASE_TAG=38
ARG BASE_TAG=latest
FROM ${BASE_NAME}:${BASE_TAG} as updated_base
RUN microdnf upgrade -y && \
RUN microdnf update -y && \
microdnf clean all
ENV _RUNTIME_DEPS="bash python3"

View File

@ -0,0 +1,17 @@
{
"builds": [
{
"name": "fedora-podman-py",
"builder_type": "googlecompute",
"build_time": 1658176090,
"files": null,
"artifact_id": "fedora-podman-py-c5419329914142720",
"packer_run_uuid": "e5b1e6ab-37a5-a695-624d-47bf0060b272",
"custom_data": {
"IMG_SFX": "5419329914142720",
"STAGE": "cache"
}
}
],
"last_run_uuid": "e5b1e6ab-37a5-a695-624d-47bf0060b272"
}

View File

@ -1,36 +0,0 @@
#!/bin/bash
#
# 2024-01-25 esm
# 2024-06-28 cevich
#
# This script is intended to be used by the `pre-commit` utility, or it may
# be manually copied (or symlinked) as local `.git/hooks/pre-push` file.
# It's purpose is to keep track of image-suffix values which have already
# been pushed, to avoid them being immediately rejected by CI validation.
# To use it with the `pre-commit` utility, simply add something like this
# to your `.pre-commit-config.yaml`:
#
# ---
# repos:
# - repo: https://github.com/containers/automation_images.git
# rev: <tag or commit sha>
# hooks:
# - id: check-imgsfx
set -eo pipefail
# Ensure CWD is the repo root
cd $(dirname "${BASH_SOURCE[0]}")
imgsfx=$(<IMG_SFX)
imgsfx_history=".git/hooks/imgsfx.history"
if [[ -e $imgsfx_history ]]; then
if grep -q "$imgsfx" $imgsfx_history; then
echo "FATAL: $imgsfx has already been used" >&2
echo "Please rerun 'make IMG_SFX'" >&2
exit 1
fi
fi
echo $imgsfx >>$imgsfx_history

View File

@ -1,4 +1,4 @@
# This Containerfile defines the environment for Cirrus-CI when
# This dockerfile defines the environment for Cirrus-CI when
# running automated checks and tests. It may also be used
# for development/debugging or manually building most
# Makefile targets.
@ -8,17 +8,17 @@ FROM registry.fedoraproject.org/fedora:${FEDORA_RELEASE}
ARG PACKER_VERSION
MAINTAINER https://github.com/containers/automation_images/ci
ENV CIRRUS_WORKING_DIR=/var/tmp/automation_images \
ENV CIRRUS_WORKING_DIR=/tmp/automation_images \
PACKER_INSTALL_DIR=/usr/local/bin \
PACKER_VERSION=$PACKER_VERSION \
CONTAINER=1
# When using the containerfile-as-ci feature of Cirrus-CI, it's unsafe
# When using the dockerfile-as-ci feature of Cirrus-CI, it's unsafe
# to rely on COPY or ADD instructions. See documentation for warning.
RUN test -n "$PACKER_VERSION"
RUN dnf update -y && \
dnf -y mark dependency $(rpm -qa | grep -Ev '(gpg-pubkey)|(dnf)|(sudo)') && \
dnf install -y \
dnf mark remove $(rpm -qa | grep -Ev '(gpg-pubkey)|(dnf)|(sudo)') && \
dnf install -y --exclude selinux-policy-targeted \
ShellCheck \
bash-completion \
coreutils \
@ -38,7 +38,7 @@ RUN dnf update -y && \
util-linux \
unzip \
&& \
dnf -y mark user dnf sudo $_ && \
dnf mark install dnf sudo $_ && \
dnf autoremove -y && \
dnf clean all

View File

@ -35,14 +35,6 @@ if [[ -n "$AWS_INI" ]]; then
set_aws_filepath
fi
id
# FIXME: ssh-keygen seems to fail to create keys with Permission denied
# in the base_images make target, I have no idea why but all CI jobs are
# broken because of this. Let's try without selinux.
if [[ "$(getenforce)" == "Enforcing" ]]; then
setenforce 0
fi
set -x
cd "$REPO_DIRPATH"
export IMG_SFX=$IMG_SFX

View File

@ -44,6 +44,13 @@ SRC_FQIN="$TARGET_NAME:$IMG_SFX"
make "$TARGET_NAME" IMG_SFX=$IMG_SFX
# Prevent pushing 'latest' images from PRs, only branches and tags
# shellcheck disable=SC2154
if [[ $PUSH_LATEST -eq 1 ]] && [[ -n "$CIRRUS_PR" ]]; then
echo -e "\nWarning: Refusing to push 'latest' images when testing from a PR.\n"
PUSH_LATEST=0
fi
# Don't leave credential file sticking around anywhere
trap "podman logout --all" EXIT INT CONT
set +x # protect username/password values
@ -57,3 +64,9 @@ set -x # Easier than echo'ing out status for everything
# shellcheck disable=SC2154
podman tag "$SRC_FQIN" "$DEST_FQIN"
podman push "$DEST_FQIN"
if ((PUSH_LATEST)); then
LATEST_FQIN="${DEST_FQIN%:*}:latest"
podman tag "$SRC_FQIN" "$LATEST_FQIN"
podman push "$LATEST_FQIN"
fi

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -eo pipefail
if [[ -z "$CI" ]] || [[ "$CI" != "true" ]] || [[ -z "$IMG_SFX" ]]; then
echo "This script is intended to be run by CI and nowhere else."
exit 1
fi
# This envar is set by the CI system
# shellcheck disable=SC2154
if [[ "$CIRRUS_CHANGE_MESSAGE" =~ .*CI:DOCS.* ]]; then
echo "This script must never tag anything after a [CI:DOCS] PR merge"
exit 0
fi
# Ensure no secrets leak via debugging var expansion
set +x
# This secret envar is set by the CI system
# shellcheck disable=SC2154
echo "$REG_PASSWORD" | \
skopeo login --password-stdin --username "$REG_USERNAME" "$REGPFX"
declare -a imgnames
imgnames=( imgts imgobsolete imgprune gcsupld get_ci_vm orphanvms ccia )
# A [CI:TOOLING] build doesn't produce CI VM images
if [[ ! "$CIRRUS_CHANGE_MESSAGE" =~ .*CI:TOOLING.* ]]; then
imgnames+=( skopeo_cidev fedora_podman prior-fedora_podman )
fi
for imgname in "${imgnames[@]}"; do
echo "##### Tagging $imgname -> latest"
# IMG_SFX is defined by CI system
# shellcheck disable=SC2154
skopeo copy "docker://$REGPFX/$imgname:c${IMG_SFX}" "docker://$REGPFX/${imgname}:latest"
done

View File

@ -13,24 +13,12 @@ REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
req_env_vars CIRRUS_PR CIRRUS_PR_TITLE CIRRUS_USER_PERMISSION CIRRUS_BASE_BRANCH
show_env_vars
req_env_vars CIRRUS_PR CIRRUS_BASE_SHA CIRRUS_CHANGE_TITLE
# die() will add a reference to this file and line number.
[[ "$CIRRUS_CI" == "true" ]] || \
die "This script is only/ever intended to be run by Cirrus-CI."
# This is imperfect security-wise, but attempt to catch an accidental
# change in Cirrus-CI Repository settings. Namely the hard-to-read
# "slider" that enables non-contributors to run jobs. We don't want
# that on this repo, ever. because there are sensitive secrets in use.
# This variable is set by CI and validated non-empty above
# shellcheck disable=SC2154
if [[ "$CIRRUS_USER_PERMISSION" != "write" ]] && [[ "$CIRRUS_USER_PERMISSION" != "admin" ]]; then
die "CI Execution not supported with permission level '$CIRRUS_USER_PERMISSION'"
fi
for target in image_builder/gce.json base_images/cloud.json \
cache_images/cloud.json win_images/win-server-wsl.json; do
if ! make $target; then
@ -44,28 +32,18 @@ if [[ -z "$CIRRUS_PR" ]]; then
exit 0
fi
# For Docs-only PRs, no further checks are needed
# Variable is defined by Cirrus-CI at runtime
# shellcheck disable=SC2154
if [[ "$CIRRUS_PR_TITLE" =~ CI:DOCS ]]; then
msg "This looks like a docs-only PR, skipping further validation checks."
exit 0
fi
if [[ ! "$CIRRUS_CHANGE_TITLE" =~ CI:DOCS ]] && \
! git diff --name-only ${CIRRUS_BASE_SHA}..HEAD | grep -q IMG_SFX; then
# Fix "Not a valid object name main" error from Cirrus's
# incomplete checkout.
git remote update origin
# Determine where PR branched off of $CIRRUS_BASE_BRANCH
# shellcheck disable=SC2154
base_sha=$(git merge-base origin/${CIRRUS_BASE_BRANCH:-main} HEAD)
if ! git diff --name-only ${base_sha}..HEAD | grep -q IMG_SFX; then
die "Every PR that builds images must include an updated IMG_SFX file.
Simply run 'make IMG_SFX', commit the result, and re-push."
else
IMG_SFX="$(<./IMG_SFX)"
# IMG_SFX was modified vs PR's base-branch, confirm version moved forward
v_prev=$(git show ${base_sha}:IMG_SFX 2>&1 || true)
# shellcheck disable=SC2154
v_prev=$(git show ${CIRRUS_BASE_SHA}:IMG_SFX 2>&1 || true)
# Verify new IMG_SFX value always version-sorts later than previous value.
# This prevents screwups due to local timezone, bad, or unset clocks, etc.
new_img_ver=$(awk -F 't' '{print $1"."$2}'<<<"$IMG_SFX" | cut -dz -f1)

View File

@ -1,43 +0,0 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- id: check-symlinks
- id: mixed-line-ending
- id: no-commit-to-branch
args: [--branch, main]
- repo: https://github.com/codespell-project/codespell
rev: v2.3.0
hooks:
- id: codespell
args: [--config, .codespellrc]
- repo: https://github.com/jumanjihouse/pre-commit-hooks
rev: 3.0.0
hooks:
- id: forbid-binary
exclude: >
(?x)^(
get_ci_vm/good_repo_test/dot_git.tar.gz
)$
- id: script-must-have-extension
- id: shellcheck
# These come from ci/shellcheck.sh
args:
- --color=always
- --format=tty
- --shell=bash
- --external-sources
- --enable=add-default-case,avoid-nullary-conditions,check-unassigned-uppercase
- --exclude=SC2046,SC2034,SC2090,SC2064
- --wiki-link-count=0
- --severity=warning
- repo: https://github.com/containers/automation_images.git
rev: 2e5a2acfe21cc4b13511b453733b8875e592ad9c
hooks:
- id: check-imgsfx

View File

@ -1,13 +1,14 @@
# This is a listing of Google Cloud Platform Project IDs for
# orphan VM monitoring and possibly other automation tasks.
# Note: CI VM images produced by this repo are all stored within
# the libpod-218412 project (in addition to some AWS EC2)
# This is a listing of GCP Project IDs which use images produced by
# this repo. It's used by the "Orphan VMs" github action to monitor
# for any leftover/lost VMs.
buildah
conmon-222014
containers-build-source-image
dnsname-8675309
libpod-218412
netavark-2021
oci-seccomp-bpf-hook
podman-py
skopeo
storage-240716
udica-247612

View File

@ -19,7 +19,8 @@ RUN bash ./get_ci_vm/setup.sh
# conflicts.
ADD /get_ci_vm/entrypoint.sh ./get_ci_vm/
ENTRYPOINT ["/usr/bin/ssh-agent", "/bin/bash", "/usr/src/automation_images/get_ci_vm/entrypoint.sh"]
# Add this late to optomize cache effecacy for development workflows
ENTRYPOINT ["/bin/bash", "/usr/src/automation_images/get_ci_vm/entrypoint.sh"]
WORKDIR "/root"
ENV HOME="/root" \
SRCDIR="" \

View File

@ -5,36 +5,6 @@ This directory contains the source for building [the
This image image is used by many containers-org repos. `hack/get_ci_vm.sh` script.
It is not intended to be called via any other mechanism.
In general/high-level terms, the architecture and operation is:
1. [containers/automation hosts cirrus-ci_env](https://github.com/containers/automation/tree/main/cirrus-ci_env),
a python mini-implementation of a `.cirrus.yml` parser. It's only job is to extract all required envars,
given a task name (including from a matrix element). It's highly dependent on
[certain YAML formatting requirements](README.md#downstream-repository-cirrusyml-requirements). If the target
repo. doesn't follow those standards, nasty/ugly python errors will vomit forth. Mainly this has to do with
Cirrus-CI's use of a non-standard YAML parser, allowing things like certain duplicate dictionary keys.
1. [containers/automation_images hosts get_ci_vm](https://github.com/containers/automation_images/tree/main/get_ci_vm),
a bundling of the `cirrus-ci_env` python script with an `entrypoint.sh` script inside a container image.
1. When a user runs `hack/get_ci_vm.sh` inside a target repo, the container image is entered, and `.cirrus.yml`
is parsed based on the CLI task-name. A VM is then provisioned based on specific envars (see the "Env. Vars."
entries in the sections for [APIv1](README.md#env-vars) and [APIv2](README.md#env-vars-1) sections below).
This is the most complex part of the process.
1. The remote system will not have **any** of the otherwise automatic Cirrus-CI operations performed (like "clone")
nor any magic CI variables defined. Having a VM ready, the container entrypoint script transfers a copy of
the local repo (including any uncommited changes).
1. The container entrypoint script then performs **_remote_** execution of the `hack/get_ci_vm.sh` script
including the magic `--setup` parameter. Though it varies by repo, typically this will establish everything
necessary to simulate a CI environment, via a call to the repo's own `setup.sh` or equivalent. Typically
The repo's setup scripts will persist any required envars into a `/etc/ci_environment` or similar. Though
this isn't universal.
1. Lastly, the user is dropped into a shell on the VM, inside the repo copy, with all envars defined and
ready to start running tests.
_Note_: If there are any envars found to be missing, they must be defined by updating either the repo normal CI
setup scripts (preferred), or in the `hack/get_ci_vm.sh` `--setup` section.
# Building
Example build (from repository root):
```bash

View File

@ -66,9 +66,9 @@ delvm() {
}
image_hints() {
_BIS=$(grep -E -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' \
_BIS=$(egrep -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' \
"$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]')
grep -E '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \
egrep '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \
"$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \
sed -r -e "s/\\\$[{]_BUILT_IMAGE_SUFFIX[}]/$_BIS/" | sort -u
}
@ -141,7 +141,7 @@ cd $SECCOMPHOOKROOT
# Attempt to determine if named 'oci-seccomp-bpf-hook' gcloud configuration exists
showrun $PGCLOUD info > $TMPDIR/gcloud-info
if grep -E -q "Account:.*None" $TMPDIR/gcloud-info
if egrep -q "Account:.*None" $TMPDIR/gcloud-info
then
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for 'oci-seccomp-bpf-hook', running init.${NOR}"
echo -e " ${RED}Please choose '#1: Re-initialize' and 'login' if asked.${NOR}"
@ -151,7 +151,7 @@ then
# Verify it worked (account name == someone@example.com)
$PGCLOUD info > $TMPDIR/gcloud-info-after-init
if grep -E -q "Account:.*None" $TMPDIR/gcloud-info-after-init
if egrep -q "Account:.*None" $TMPDIR/gcloud-info-after-init
then
echo -e "${RED}ERROR: Could not initialize 'oci-seccomp-bpf-hook' configuration in gcloud.${NOR}"
exit 5

View File

@ -235,7 +235,7 @@ has_valid_aws_credentials() {
_awsoutput=$($AWSCLI configure list 2>&1 || true)
dbg "$AWSCLI configure list"
dbg "$_awsoutput"
if grep -E -qx 'The config profile.+could not be found'<<<"$_awsoutput"; then
if egrep -qx 'The config profile.+could not be found'<<<"$_awsoutput"; then
dbg "AWS config/credentials are missing"
return 1
elif [[ ! -r "$EC2_SSH_KEY" ]] || [[ ! -r "${EC2_SSH_KEY}.pub" ]]; then
@ -413,9 +413,6 @@ make_setup_tarball() {
status "Preparing setup tarball for instance."
req_env_vars DESTDIR _TMPDIR SRCDIR UPSTREAM_REPO
mkdir -p "${_TMPDIR}$DESTDIR"
# Mark the volume-mounted source repo as safe system-wide (w/in the container)
git config --global --add safe.directory "$SRCDIR"
git config --global --add safe.directory "$SRCDIR/.git"
# We have no way of knowing what state or configuration the user's
# local repository is in. Work from a local clone, so we can
# specify our own setup and prevent unexpected script breakage.
@ -518,8 +515,8 @@ init_gcevm() {
DNS_NAME=$INST_NAME # gcloud compute ssh wrapper will resolve this
GCLOUD="${GCLOUD:-gcloud} --configuration=$GCLOUD_CFG --project=$GCLOUD_PROJECT"
_args="--force-key-file-overwrite --strict-host-key-checking=no --zone=$GCLOUD_ZONE"
SSH_CMD="$GCLOUD compute ssh --ssh-flag=-o=AddKeysToAgent=yes $_args root@$DNS_NAME --"
SCP_CMD="$GCLOUD compute scp --scp-flag=-o=AddKeysToAgent=yes $_args"
SSH_CMD="$GCLOUD compute ssh $_args root@$DNS_NAME --"
SCP_CMD="$GCLOUD compute scp $_args"
CREATE_CMD="$GCLOUD compute instances create \
--zone=$GCLOUD_ZONE --image-project=$GCLOUD_IMGPROJECT \
--image=$INST_IMAGE --custom-cpu=$GCLOUD_CPUS \
@ -536,6 +533,9 @@ init_gcevm() {
Can't find valid GCP credentials, attempting to (re)initialize.
If asked, please choose '#1: Re-initialize', 'login', and a nearby
GCLOUD_ZONE, otherwise simply follow the prompts.
Note: If asked to set a SSH-key passphrase, DO NOT SET ONE, it
will make your life miserable! Set an empty password for the key.
"
$GCLOUD init --project=$GCLOUD_PROJECT --console-only --skip-diagnostics
if ! has_valid_gcp_credentials; then

View File

@ -2,9 +2,9 @@
# This script is intended to be executed as part of the container
# image build process. Using it under any other context is virtually
# guaranteed to cause you much pain and suffering.
# guarantied to cause you much pain and suffering.
set -xeo pipefail
set -eo pipefail
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
@ -14,7 +14,6 @@ source "$REPO_DIRPATH/lib.sh"
declare -a PKGS
PKGS=( \
aws-cli
coreutils
curl
gawk
@ -31,7 +30,9 @@ apk upgrade
apk add --no-cache "${PKGS[@]}"
rm -rf /var/cache/apk/*
aws --version # Confirm that aws actually runs
pip3 install --upgrade pip
pip3 install --no-cache-dir awscli
aws --version # Confirm it actually runs
install_automation_tooling cirrus-ci_env

View File

@ -78,7 +78,7 @@ testf() {
echo "# $@" > /dev/stderr
fi
# Using grep -E vs file safer than shell builtin test
# Using egrep vs file safer than shell builtin test
local a_out_f
local a_exit=0
a_out_f=$(mktemp -p '' "tmp_${FUNCNAME[0]}_XXXXXXXX")
@ -109,7 +109,7 @@ testf() {
if ((TEST_DEBUG)); then
echo "Received $(wc -l $a_out_f | awk '{print $1}') output lines of $(wc -c $a_out_f | awk '{print $1}') bytes total"
fi
if grep -E -q "$e_out_re" "${a_out_f}.oneline"; then
if egrep -q "$e_out_re" "${a_out_f}.oneline"; then
_test_report "Command $1 exited as expected with expected output" "0" "$a_out_f"
else
_test_report "Expecting regex '$e_out_re' match to (whitespace-squashed) output" "1" "$a_out_f"

View File

@ -67,7 +67,7 @@ else
fi
# Support both '.CHECKSUM' and '-CHECKSUM' at the end
filename=$(grep -E -i -m 1 -- "$extension$" <<<"$by_arch" || true)
filename=$(egrep -i -m 1 -- "$extension$" <<<"$by_arch" || true)
[[ -n "$filename" ]] || \
die "No '$extension' targets among $by_arch"

View File

@ -4,7 +4,7 @@
# at the root of this repository. It should be built with
# the repository root as the context directory.
ARG CENTOS_STREAM_RELEASE=9
ARG CENTOS_STREAM_RELEASE=8
FROM quay.io/centos/centos:stream${CENTOS_STREAM_RELEASE}
ARG PACKER_VERSION
MAINTAINER https://github.com/containers/automation_images/image_builder

View File

@ -45,16 +45,16 @@ provisioners:
- type: 'shell'
inline:
- 'set -e'
- 'mkdir -p /var/tmp/automation_images'
- 'mkdir -p /tmp/automation_images'
- type: 'file'
source: '{{ pwd }}/'
destination: '/var/tmp/automation_images/'
destination: '/tmp/automation_images/'
- type: 'shell'
inline:
- 'set -e'
- '/bin/bash /var/tmp/automation_images/image_builder/setup.sh'
- '/bin/bash /tmp/automation_images/image_builder/setup.sh'
post-processors:
# Must be double-nested to guarantee execution order

View File

@ -1,9 +1,16 @@
# Copy-pasted from https://cloud.google.com/sdk/docs/install#red-hatfedoracentos
[google-cloud-cli]
name=Google Cloud CLI
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el9-x86_64
[google-compute-engine]
name=Google Compute Engine
baseurl=https://packages.cloud.google.com/yum/repos/google-compute-engine-el8-x86_64-stable
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
[google-cloud-sdk]
name=Google Cloud SDK
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el8-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

View File

@ -23,19 +23,6 @@ source "$REPO_DIRPATH/lib.sh"
dnf update -y
dnf -y install epel-release
# Allow erasing pre-installed curl-minimal package
dnf install -y --allowerasing $(<"$INST_PKGS_FP")
# As of 2024-04-24 installing the EPEL `awscli` package results in error:
# nothing provides python3.9dist(docutils) >= 0.10
# Grab the binary directly from amazon instead
# https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
AWSURL="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip"
cd /tmp
curl --fail --location -O "${AWSURL}"
# There's little reason to see every single file extracted
unzip -q awscli*.zip
./aws/install -i /usr/local/share/aws-cli -b /usr/local/bin
rm -rf awscli*.zip ./aws
dnf install -y $(<"$INST_PKGS_FP")
install_automation_tooling

View File

@ -1,3 +1,4 @@
awscli
buildah
bash-completion
curl
@ -5,13 +6,12 @@ findutils
gawk
genisoimage
git
google-cloud-cli
google-cloud-sdk
jq
libvirt
libvirt-admin
libvirt-client
libvirt-daemon
libxcrypt-compat
make
openssh
openssl
@ -24,7 +24,6 @@ rng-tools
rootfiles
rsync
sed
skopeo
tar
unzip
util-linux

View File

@ -11,13 +11,13 @@ set -eo pipefail
# shellcheck source=imgts/lib_entrypoint.sh
source /usr/local/bin/lib_entrypoint.sh
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI
gcloud_init
# Set this to 1 for testing
DRY_RUN="${DRY_RUN:-0}"
OBSOLETE_LIMIT=50
OBSOLETE_LIMIT=10
THEFUTURE=$(date --date='+1 hour' +%s)
TOO_OLD_DAYS='30'
TOO_OLD_DESC="$TOO_OLD_DAYS days ago"
@ -40,8 +40,8 @@ $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
count_image
reason=""
created_ymd=$(date --date=$creationTimestamp --iso-8601=date)
permanent=$(grep -E --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
last_used=$(grep -E --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true)
permanent=$(egrep --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
last_used=$(egrep --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true)
LABELSFX="labels: '$labels'"
@ -54,14 +54,6 @@ $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
continue
fi
# Any image matching the currently in-use IMG_SFX must always be preserved
# Value is defined in cirrus.yml
# shellcheck disable=SC2154
if [[ "$name" =~ $IMG_SFX ]]; then
msg "Retaining current (latest) image $name | $labels"
continue
fi
# No label was set
if [[ -z "$last_used" ]]
then # image lacks any tracking labels
@ -126,7 +118,7 @@ for (( i=nr_amis ; i ; i-- )); do
dep=$(jq -r -e ".DEP"<<<"$ami")
unset tags
# The name-tag is easier on human eys if one is set.
# The name-tag is easier on human eys if on is set.
name="$ami_id"
if name_tag=$(get_tag_value "Name" "$ami"); then
name="$name_tag"
@ -147,23 +139,13 @@ for (( i=nr_amis ; i ; i-- )); do
done
unset automation permanent reason
automation=$(grep -E --only-matching --max-count=1 \
automation=$(egrep --only-matching --max-count=1 \
--ignore-case 'automation=true' <<< $tags || true)
permanent=$(grep -E --only-matching --max-count=1 \
permanent=$(egrep --only-matching --max-count=1 \
--ignore-case 'permanent=true' <<< $tags || true)
if [[ -n "$permanent" ]]; then
msg "Retaining forever $name | $tags"
# Permanent AMIs should never ever have a deprecation date set
$AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null
continue
fi
# Any image matching the currently in-use IMG_SFX
# must always be preserved. Values are defined in cirrus.yml
# shellcheck disable=SC2154
if [[ "$name" =~ $IMG_SFX ]]; then
msg "Retaining current (latest) image $name | $tags"
continue
fi
@ -175,6 +157,12 @@ for (( i=nr_amis ; i ; i-- )); do
continue
fi
# Avoid perpetually updating the depreciation date on already deprecated AMIs
if [[ $dep != null ]]; then
echo "Skipping '$ami_id' already deprecated on '$dep'"
continue
fi
unset lltvalue last_used_timestamp last_used_ymd
if lltvalue=$("${lltcmd[@]}" $ami_id | jq -r -e ".Value") && [[ -n "$lltvalue" ]]; then
last_used_timestamp=$(date --date="$lltvalue" +%s)
@ -192,24 +180,18 @@ for (( i=nr_amis ; i ; i-- )); do
continue
else
msg "Retaining $ami_id | $created_ymd | $state | $tags"
if [[ "$dep" != "null" ]]; then
msg " Removing previously set AMI deprecation timestamp: $dep"
# Ignore confirmation output.
$AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null
fi
fi
done
COUNT=$(<"$IMGCOUNT")
CANDIDATES=$(wc -l <$TOOBSOLETE)
msg "########################################################################"
msg "Obsoleting $OBSOLETE_LIMIT random image candidates ($CANDIDATES/$COUNT total):"
msg "Obsoleting $OBSOLETE_LIMIT random images of $COUNT examined:"
# Require a minimum number of images to exist. Also if there is some
# horrible scripting accident, this limits the blast-radius.
if [[ "$CANDIDATES" -lt $OBSOLETE_LIMIT ]]
if [[ "$COUNT" -lt $OBSOLETE_LIMIT ]]
then
die 0 "Safety-net Insufficient images ($CANDIDATES) to process ($OBSOLETE_LIMIT required)"
die 0 "Safety-net Insufficient images ($COUNT) to process ($OBSOLETE_LIMIT required)"
fi
# Don't let one bad apple ruin the whole bunch

View File

@ -11,14 +11,14 @@ set -e
# shellcheck source=imgts/lib_entrypoint.sh
source /usr/local/bin/lib_entrypoint.sh
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI
gcloud_init
# Set this to 1 for testing
DRY_RUN="${DRY_RUN:-0}"
# For safety's sake limit nr deletions
DELETE_LIMIT=50
DELETE_LIMIT=10
ABOUTNOW=$(date --iso-8601=date) # precision is not needed for this use
# Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats
# Field list from `gcloud compute images list --limit=1 --format=text`
@ -39,20 +39,11 @@ $GCLOUD compute images list --show-deprecated \
do
count_image
reason=""
permanent=$(grep -E --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
permanent=$(egrep --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
[[ -z "$permanent" ]] || \
die 1 "Refusing to delete a deprecated image labeled permanent=true. Please use gcloud utility to set image active, then research the cause of deprecation."
[[ "$dep_state" == "OBSOLETE" ]] || \
die 1 "Unexpected depreciation-state encountered for $name: $dep_state; labels: $labels"
# Any image matching the currently in-use IMG_SFX must always be preserved.
# Values are defined in cirrus.yml
# shellcheck disable=SC2154
if [[ "$name" =~ $IMG_SFX ]]; then
msg " Skipping current (latest) image $name"
continue
fi
reason="Obsolete as of $del_date; labels: $labels"
echo "GCP $name $reason" >> $TODELETE
done
@ -81,22 +72,7 @@ for (( i=nr_amis ; i ; i-- )); do
if permanent=$(get_tag_value "permanent" "$ami") && \
[[ "$permanent" == "true" ]]
then
warn 0 "Found permanent image '$ami_id' with deprecation '$dep_ymd'. Clearing deprecation date."
$AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null
continue
fi
unset name
if ! name=$(get_tag_value "Name" "$ami"); then
warn 0 " EC2 AMI ID '$ami_id' is missing a 'Name' tag"
fi
# Any image matching the currently in-use IMG_SFX
# must always be preserved.
if [[ "$name" =~ $IMG_SFX ]]; then
warn 0 " Retaining current (latest) image $name id $ami_id"
$AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null
continue
die 1 "Found image '$ami_id' labeled permanent=true with deprecation set for '$dep_ymd'. This should never happen, manual intervention required."
fi
if [[ $(echo -e "$ABOUTNOW\n$dep_ymd" | sort | tail -1) == "$ABOUTNOW" ]]; then
@ -106,14 +82,13 @@ for (( i=nr_amis ; i ; i-- )); do
done
COUNT=$(<"$IMGCOUNT")
CANDIDATES=$(wc -l <$TODELETE)
msg "########################################################################"
msg "Deleting up to $DELETE_LIMIT random image candidates ($CANDIDATES/$COUNT total)::"
msg "Deleting up to $DELETE_LIMIT random images of $COUNT examined:"
# Require a minimum number of images to exist
if [[ "$CANDIDATES" -lt $DELETE_LIMIT ]]
if [[ "$COUNT" -lt $DELETE_LIMIT ]]
then
die 0 "Safety-net Insufficient images ($CANDIDATES) to process deletions ($DELETE_LIMIT required)"
die 0 "Safety-net Insufficient images ($COUNT) to process deletions ($DELETE_LIMIT required)"
fi
sort --random-sort $TODELETE | tail -$DELETE_LIMIT | \

View File

@ -1,11 +1,11 @@
ARG CENTOS_STREAM_RELEASE=9
ARG CENTOS_STREAM_RELEASE=8
FROM quay.io/centos/centos:stream${CENTOS_STREAM_RELEASE}
# Only needed for installing build-time dependencies
COPY /imgts/google-cloud-sdk.repo /etc/yum.repos.d/google-cloud-sdk.repo
RUN dnf -y update && \
dnf -y install epel-release && \
dnf -y install python3 jq libxcrypt-compat && \
dnf -y install python3 jq && \
dnf -y install google-cloud-sdk && \
dnf clean all

View File

@ -181,10 +181,6 @@ if [[ -n "$EC2IMGNAMES" ]]; then
else
msg "${DRPREFIX}Updated image $image ($amiid) metadata."
fi
# Ensure image wasn't previously marked as deprecated. Ignore
# confirmation output.
$AWS ec2 disable-image-deprecation --image-id "$amiid" > /dev/null
done
fi

View File

@ -1,9 +1,19 @@
# Copy-pasted from https://cloud.google.com/sdk/docs/install#red-hatfedoracentos
# From https://github.com/GoogleCloudPlatform/compute-image-packages
[google-compute-engine]
name=Google Compute Engine
baseurl=https://packages.cloud.google.com/yum/repos/google-compute-engine-el8-x86_64-stable
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
[google-cloud-cli]
name=Google Cloud CLI
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el9-x86_64
# From https://cloud.google.com/sdk/docs/install#rpm
[google-cloud-sdk]
name=Google Cloud SDK
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el8-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

View File

@ -5,7 +5,7 @@ set -e
RED="\e[1;31m"
YEL="\e[1;33m"
NOR="\e[0m"
SENTINEL="__unknown__" # default set in Containerfile
SENTINEL="__unknown__" # default set in dockerfile
# Disable all input prompts
# https://cloud.google.com/sdk/docs/scripting-gcloud
GCLOUD="gcloud --quiet"
@ -55,7 +55,7 @@ gcloud_init() {
then
TMPF="$1"
else
TMPF=$(mktemp -p '' .XXXXXXXX)
TMPF=$(mktemp -p '' .$(uuidgen)_XXXX.json)
trap "rm -f $TMPF &> /dev/null" EXIT
# Required variable must be set by caller
# shellcheck disable=SC2154
@ -77,7 +77,7 @@ aws_init() {
then
TMPF="$1"
else
TMPF=$(mktemp -p '' .XXXXXXXX)
TMPF=$(mktemp -p '' .$(uuidgen)_XXXX.ini)
fi
# shellcheck disable=SC2154
echo "$AWSINI" > $TMPF

94
import_images/README.md Normal file
View File

@ -0,0 +1,94 @@
# Semi-manual image imports
## Overview
[Due to a bug in
packer](https://github.com/hashicorp/packer-plugin-amazon/issues/264) and
the sheer complexity of EC2 image imports, this process is impractical for
full automation. It tends toward nearly always requiring supervision of a
human:
* There are multiple failure-points, some are not well reported to
the user by tools here or by AWS itself.
* The upload of the image to s3 can be unreliable. Silently corrupting image
data.
* The import-process is managed by a hosted AWS service which can be slow
and is occasionally unreliable.
* Failure often results in one or more leftover/incomplete resources
(s3 objects, EC2 snapshots, and AMIs)
## Requirements
* You're generally familiar with the (manual)
[EC2 snapshot import process](https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-import-snapshot.html).
* You are in possession of an AWS EC2 account, with the [IAM policy
`vmimport`](https://docs.aws.amazon.com/vm-import/latest/userguide/required-permissions.html#vmimport-role) attached.
* Both "Access Key" and "Secret Access Key" values set in [a credentials
file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html).
* Podman is installed and functional
* At least 10gig free space under `/tmp`, more if there are failures / multiple runs.
* *Network bandwidth sufficient for downloading and uploading many GBs of
data, potentially multiple times.*
## Process
Unless there is a problem with the current contents or age of the
imported images, this process does not need to be followed. The
normal PR-based build workflow can simply be followed as usual.
This process is only needed to bring newly updated Fedora images into
AWS to build CI images from. For example, due to a new Beta or GA release.
***Note:*** Most of the steps below will happen within a container environment.
Any exceptions are noted in the individual steps below with *[HOST]*
1. *[HOST]* Edit the `Makefile`, update the Fedora release numbers
under the section
`##### Important image release and source details #####`
1. *[HOST]* Run `make IMPORT_IMG_SFX`
1. *[HOST]* Run
```bash
$ make image_builder_debug \
GAC_FILEPATH=/dev/null \
AWS_SHARED_CREDENTIALS_FILE=/path/to/.aws/credentials
```
1. Run `make import_images` (or `make --jobs=4 import_images` if you're brave).
1. The following steps should all occur successfully for each imported image.
1. Image is downloaded.
1. Image checksum is downloaded.
1. Image is verified against the checksum.
1. Image is converted to `VHDX` format.
1. The `VHDX` image is uploaded to the `packer-image-import` S3 bucket.
1. AWS `import-snapshot` process is started (uses AWS vmimport service)
1. Progress of snapshot import is monitored until completion or failure.
1. The imported snapshot is converted into an AMI
1. Essential tags are added to the AMI
1. Details ascii-table about the new AMI is printed on success.
1. Assuming all image imports were successful, a final success message will be
printed by `make` with instructions for updating the `IMPORT_IMG_SFX` file.
1. *[HOST]* Update the `Makefile` as instructed, commit the
changes and push to a PR. The automated image building process
takes over and runs as usual.
## Failure responses
This list is not exhaustive, and only represents common/likely failures.
Normally there is no need to exit the build container.
* If image download fails, double-check any error output, run `make clean`
and retry.
* If checksum validation fails,
run `make clean`.
Retry `make import_images`.
* If s3 upload fails,
Confirm service availability,
retry `make import_images`.
* If snapshot import fails with a `Disk validation failed` error,
Retry `make import_images`.
* If snapshot import fails with non-validation error,
find snapshot in EC2 and delete it manually.
Retry `make import_images`.
* If AMI registration fails, remove any conflicting AMIs *and* snapshots.
Retry `make import_images`.
* If import was successful but AMI tagging failed, manually add
the required tags to AMI: `automation=false` and `Name=<name>-i${IMG_SFX}`.
Where `<name>` is `fedora-aws` or `fedora-aws-arm64`.

View File

@ -0,0 +1,45 @@
#!/bin/bash
# This script is intended to be run by packer, usage under any other
# environment may behave badly. Its purpose is to download a VM
# image and a checksum file. Verify the image's checksum matches.
# If it does, convert the downloaded image into the format indicated
# by the first argument's `.extension`.
#
# The first argument is the file path and name for the output image,
# the second argument is the image download URL (ending in a filename).
# The third argument is the download URL for a checksum file containing
# details necessary to verify vs filename included in image download URL.
set -eo pipefail
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
# shellcheck source=./lib.sh
source "$REPO_DIRPATH/lib.sh"
[[ "$#" -eq 3 ]] || \
die "Expected to be called with three arguments, not: $#"
# Packer needs to provide the desired filename as it's unable to parse
# a filename out of the URL or interpret output from this script.
dest_dirpath=$(dirname "$1")
dest_filename=$(basename "$1")
dest_format=$(cut -d. -f2<<<"$dest_filename")
src_url="$2"
src_filename=$(basename "$src_url")
cs_url="$3"
req_env_vars dest_dirpath dest_filename dest_format src_url src_filename cs_url
mkdir -p "$dest_dirpath"
cd "$dest_dirpath"
[[ -r "$src_filename" ]] || \
curl --fail --location -O "$src_url"
echo "Downloading & verifying checksums in $cs_url"
curl --fail --location "$cs_url" -o - | \
sha256sum --ignore-missing --check -
echo "Converting '$src_filename' to ($dest_format format) '$dest_filename'"
qemu-img convert "$src_filename" -O "$dest_format" "${dest_filename}"

View File

@ -0,0 +1,31 @@
{
"builds": [
{
"name": "fedora-aws",
"builder_type": "hamsterwheel",
"build_time": 0,
"files": null,
"artifact_id": "",
"packer_run_uuid": null,
"custom_data": {
"IMG_SFX": "fedora-aws-i@@@IMPORT_IMG_SFX@@@",
"STAGE": "import",
"TASK": "@@@CIRRUS_TASK_ID@@@"
}
},
{
"name": "fedora-aws-arm64",
"builder_type": "hamsterwheel",
"build_time": 0,
"files": null,
"artifact_id": "",
"packer_run_uuid": null,
"custom_data": {
"IMG_SFX": "fedora-aws-arm64-i@@@IMPORT_IMG_SFX@@@",
"STAGE": "import",
"TASK": "@@@CIRRUS_TASK_ID@@@"
}
}
],
"last_run_uuid": "00000000-0000-0000-0000-000000000000"
}

View File

@ -0,0 +1,18 @@
{
"Name": "@@@NAME@@@-i@@@IMPORT_IMG_SFX@@@",
"VirtualizationType": "hvm",
"Architecture": "@@@ARCH@@@",
"EnaSupport": true,
"RootDeviceName": "/dev/sda1",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"DeleteOnTermination": true,
"SnapshotId": "@@@SNAPSHOT_ID@@@",
"VolumeSize": 10,
"VolumeType": "gp2"
}
}
]
}

View File

@ -0,0 +1,84 @@
#!/bin/bash
# This script is intended to be called by the main Makefile
# to wait for and confirm successful import and conversion
# of an uploaded image object from S3 into EC2. It expects
# the path to a file containing the import task ID as the
# first argument.
#
# If the import is successful, the snapshot ID is written
# to stdout. Otherwise, all output goes to stderr, and
# the script exits non-zero on failure or timeout. On
# failure, the file containing the import task ID will
# be removed.
set -eo pipefail
AWS="${AWS:-aws --output json --region us-east-1}"
# The import/conversion process can take a LONG time, have observed
# > 10 minutes on occasion. Normally, takes 2-5 minutes.
SLEEP_SECONDS=10
TIMEOUT_SECONDS=720
TASK_ID_FILE="$1"
tmpfile=$(mktemp -p '' tmp.$(basename ${BASH_SOURCE[0]}).XXXX)
die() { echo "ERROR: ${1:-No error message provided}" > /dev/stderr; exit 1; }
msg() { echo "${1:-No error message provided}" > /dev/stderr; }
unset snapshot_id
handle_exit() {
set +e
rm -f "$tmpfile" &> /dev/null
if [[ -n "$snapshot_id" ]]; then
msg "Success ($task_id): $snapshot_id"
echo -n "$snapshot_id" > /dev/stdout
return 0
fi
rm -f "$TASK_ID_FILE"
die "Timeout or other error reported while waiting for snapshot import"
}
trap handle_exit EXIT
[[ -n "$AWS_SHARED_CREDENTIALS_FILE" ]] || \
die "\$AWS_SHARED_CREDENTIALS_FILE must not be unset/empty."
[[ -r "$1" ]] || \
die "Can't read task id from file '$TASK_ID_FILE'"
task_id=$(<$TASK_ID_FILE)
msg "Waiting up to $TIMEOUT_SECONDS seconds for '$task_id' import. Checking progress every $SLEEP_SECONDS seconds."
for (( i=$TIMEOUT_SECONDS ; i ; i=i-$SLEEP_SECONDS )); do \
# Sleep first, to give AWS time to start meaningful work.
sleep ${SLEEP_SECONDS}s
$AWS ec2 describe-import-snapshot-tasks \
--import-task-ids $task_id > $tmpfile
if ! st_msg=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.StatusMessage?' $tmpfile) && \
[[ -n $st_msg ]] && \
[[ ! "$st_msg" =~ null ]]
then
die "Unexpected result: $st_msg"
elif egrep -iq '(error)|(fail)' <<<"$st_msg"; then
die "$task_id: $st_msg"
fi
msg "$task_id: $st_msg (${i}s remaining)"
# Why AWS you use StatusMessage && Status? Bad names! WHY!?!?!?!
if status=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.Status?' $tmpfile) && \
[[ "$status" == "completed" ]] && \
snapshot_id=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId?' $tmpfile)
then
msg "Import complete to: $snapshot_id"
break
else
unset snapshot_id
fi
done

69
lib.sh
View File

@ -19,11 +19,26 @@ OS_REL_VER="$OS_RELEASE_ID-$OS_RELEASE_VER"
# This location is checked by automation in other repos, please do not change.
PACKAGE_DOWNLOAD_DIR=/var/cache/download
# N/B: This is managed by renovate
INSTALL_AUTOMATION_VERSION="5.0.1"
INSTALL_AUTOMATION_VERSION="4.2.1"
PUSH_LATEST="${PUSH_LATEST:-0}"
# Mask secrets in show_env_vars() from automation library
SECRET_ENV_RE='(^PATH$)|(^BASH_FUNC)|(^_.*)|(.*PASSWORD.*)|(.*TOKEN.*)|(.*SECRET.*)|(.*ACCOUNT.*)|(.+_JSON)|(AWS.+)|(.*SSH.*)|(.*GCP.*)'
SECRET_ENV_RE='(ACCOUNT)|(.+_JSON)|(AWS.+)|(SSH)|(PASSWORD)|(TOKEN)'
# Some platforms set and make this read-only
[[ -n "$UID" ]] || \
UID=$(getent passwd $USER | cut -d : -f 3)
SUDO=""
if [[ -n "$UID" ]] && [[ "$UID" -ne 0 ]]; then
SUDO="sudo"
fi
if [[ "$OS_RELEASE_ID" == "debian" ]]; then
export DEBIAN_FRONTEND=noninteractive
SUDO="$SUDO env DEBIAN_FRONTEND=$DEBIAN_FRONTEND"
fi
if [[ -r "/etc/automation_environment" ]]; then
source /etc/automation_environment
@ -40,28 +55,13 @@ else # Automation common library not installed yet
bigto() { die "Automation library not installed; Required for bigto()"; }
fi
# Setting noninteractive is critical, apt-get can hang w/o it.
# N/B: Must be done _after_ potential loading of automation libraries
export SUDO="env DEBIAN_FRONTEND=noninteractive"
if [[ "$UID" -ne 0 ]]; then
export SUDO="sudo env DEBIAN_FRONTEND=noninteractive"
fi
install_automation_tooling() {
local version_arg
version_arg="$INSTALL_AUTOMATION_VERSION"
if [[ "$1" == "latest" ]]; then
version_arg="latest"
shift
fi
# This script supports installing all current and previous versions
local installer_url="https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh"
curl --silent --show-error --location \
--url "$installer_url" | \
$SUDO env INSTALL_PREFIX=/usr/share /bin/bash -s - \
"$version_arg" "$@"
"$INSTALL_AUTOMATION_VERSION" "$@"
# This defines AUTOMATION_LIB_PATH
source /usr/share/automation/environment
#shellcheck disable=SC1090
@ -168,13 +168,9 @@ skip_on_pr_label() {
# print a space-separated list of labels when run under Cirrus-CI for a PR
get_pr_labels() {
req_env_vars CIRRUS_CI CIRRUS_REPO_CLONE_TOKEN
req_env_vars CIRRUS_CI CIRRUS_PR CIRRUS_REPO_CLONE_TOKEN
req_env_vars CIRRUS_REPO_OWNER CIRRUS_REPO_NAME
# Empty for non-PRs
# shellcheck disable=SC2154
[[ -n "$CIRRUS_PR" ]] || return 0
local query h_accept h_content api result fltrpfx
local filter labels h_auth h_accept h_content
@ -238,7 +234,7 @@ remove_netavark_aardvark_files() {
do
# Sub-directories may contain unrelated/valuable stuff
if [[ -d "$fullpath" ]]; then continue; fi
$SUDO rm -vf "$fullpath"
sudo rm -vf "$fullpath"
done
}
@ -286,16 +282,6 @@ unmanaged-devices=interface-name:*podman*;interface-name:veth*
EOF
}
# Create a local registry, seed it with remote images
initialize_local_cache_registry() {
msg "Initializing local cache registry"
#shellcheck disable=SC2154
$SUDO ${SCRIPT_DIRPATH}/local-cache-registry initialize
msg "du -sh /var/cache/local-registry"
du -sh /var/cache/local-registry
}
common_finalize() {
set -x # extra detail is no-longer necessary
cd /
@ -308,7 +294,7 @@ common_finalize() {
$SUDO rm -rf /var/lib/cloud/instanc*
$SUDO rm -rf /root/.ssh/*
$SUDO rm -rf /etc/ssh/*key*
$SUDO rm -rf /tmp/* /var/tmp/automation_images
$SUDO rm -rf /tmp/*
$SUDO rm -rf /tmp/.??*
echo -n "" | $SUDO tee /etc/machine-id
$SUDO sync
@ -330,10 +316,7 @@ rh_finalize() {
# Packaging cache is preserved across builds of container images
$SUDO rm -f /etc/udev/rules.d/*-persistent-*.rules
$SUDO touch /.unconfigured # force firstboot to run
echo
echo "# PACKAGE LIST"
rpm -qa | sort
common_finalize
}
# Called during VM Image setup, not intended for general use.
@ -349,9 +332,7 @@ debian_finalize() {
fi
set -x
# Packaging cache is preserved across builds of container images
# pipe-cat is not a NOP! It prevents using $PAGER and then hanging
echo "# PACKAGE LIST"
dpkg -l | cat
common_finalize
}
finalize() {
@ -364,6 +345,4 @@ finalize() {
else
die "Unknown/Unsupported Distro '$OS_RELEASE_ID'"
fi
common_finalize
}

View File

@ -40,10 +40,8 @@ fi
# I don't expect there will ever be more than maybe 0-20 instances at any time.
for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do
instance=$(jq -e ".[$instance_index - 1]"<<<"$simple_inst_list")
# aws commands require an instance ID
instid=$(jq -r ".ID"<<<"$instance")
# A Name-tag isn't guaranteed, default to stupid, unreadable, generated ID
name=$instid
name=$(jq -r ".ID"<<<"$instance")
if name_tag=$(get_tag_value "Name" "$instance"); then
# This is MUCH more human-friendly and easier to find in the WebUI.
# If it was an instance leaked by Cirrus-CI, it may even include the
@ -71,7 +69,6 @@ for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do
continue
fi
# First part of the status line item to append in the e-mail
line="* VM $name running $age_days days"
# It would be nice to list all the tags like we do for GCE VMs,
@ -79,39 +76,7 @@ for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do
# Only print this handy-one (set by get_ci_vm) if it's there.
if inuseby_tag=$(get_tag_value "in-use-by" "$instance"); then
dbg "Found instance '$name' tagged in-use-by=$inuseby_tag."
line+="; likely get_ci_vm, in-use-by=$inuseby_tag"
elif ((DRY_RUN==0)); then # NOT a persistent or a get_ci_vm instance
# Around Jun/Jul '23 an annoyingly steady stream of EC2 orphans were
# reported to Cirrus-support. They've taken actions to resolve,
# but the failure-modes are many and complex. Since most of the EC2
# instances are rather expensive to keep needlessly running, and manual
# cleanup is annoying, try to terminate them automatically.
dbg "Attempting to terminate instance '$name'"
# Operation runs asynchronously, no error reported for already terminated instance.
# Any stdout/stderr here would make the eventual e-mail unreadable.
if ! termout=$(aws ec2 terminate-instances --no-paginate --output json --instance-ids "$instid" 2>&1)
then
echo "::error::Auto-term. of '$instid' failed, 'aws' output: $termout" > /dev/stderr
# Catch rare TOCTOU race, instance was running, terminated, and pruned while looping.
# (terminated instances stick around for a while until purged automatically)
if [[ "$termout" =~ InvalidInstanceID ]]; then
line+="; auto-term. failed, instance vanished"
else # Something else horrible broke, let the operators know.
line+="; auto-term. failed, see GHA workflow log"
fi
else
dbg "Successful term. command output: '$termout'"
# At this point, the script could sit around in a poll-loop, waiting to confirm
# the `$termout` JSON contains `CurrentState: { Code: 48, Name: terminated }`.
# However this could take _minutes_, and there may be a LOT of instances left
# to process. Do the next best thing: Hope the termination eventually works,
# but also let the operator know an attempt was made.
line+="; probably successful auto-termination"
fi
else # no in-use-by tag, DRY_RUN==1
dbg "DRY_RUN: Would normally have tried to terminate instance '$name' (ID $instid)"
line+=" tagged in-use-by=$inuseby_tag"
fi
echo "$line" >> "$OUTPUT"

View File

@ -18,9 +18,7 @@ req_env_vars GCPJSON GCPNAME GCPPROJECT GCPPROJECTS AWSINI
NOW=$(date +%s)
TOO_OLD='3 days ago' # Detect Friday Orphans on Monday
EVERYTHING=${EVERYTHING:-0} # set to '1' for testing
DRY_RUN=${DRY_RUN:-0}
if ((EVERYTHING)); then
DRY_RUN=1
TOO_OLD="3 seconds ago"
fi
# Anything older than this is "too old"

View File

@ -15,16 +15,6 @@ ARG PACKER_BUILD_NAME=
ENV AI_PATH=/usr/src/automation_images \
CONTAINER=1
ARG IMG_SFX=
ARG CIRRUS_TASK_ID=
ARG GIT_HEAD=
# Ref: https://github.com/opencontainers/image-spec/blob/main/annotations.md
LABEL org.opencontainers.image.url="https://cirrus-ci.com/task/${CIRRUS_TASK_ID}"
LABEL org.opencontainers.image.documentation="https://github.com/containers/automation_images/blob/${GIT_HEAD}/README.md#container-images-overview-step-2"
LABEL org.opencontainers.image.source="https://github.com/containers/automation_images/blob/${GIT_HEAD}/podman/Containerfile"
LABEL org.opencontainers.image.version="${IMG_SFX}"
LABEL org.opencontainers.image.revision="${GIT_HEAD}"
# Only add needed files to avoid invalidating build cache
ADD /lib.sh "$AI_PATH/"
ADD /podman/* "$AI_PATH/podman/"

View File

@ -12,6 +12,7 @@ RUN dnf -y update && \
dnf clean all
ENV REG_REPO="https://github.com/docker/distribution.git" \
REG_COMMIT="b5ca020cfbe998e5af3457fda087444cf5116496" \
REG_COMMIT_SCHEMA1="ec87e9b6971d831f0eff752ddb54fb64693e51cd" \
OSO_REPO="https://github.com/openshift/origin.git" \
OSO_TAG="v1.5.0-alpha.3"

View File

@ -9,6 +9,7 @@ set -e
declare -a req_vars
req_vars=(\
REG_REPO
REG_COMMIT
REG_COMMIT_SCHEMA1
OSO_REPO
OSO_TAG
@ -42,6 +43,12 @@ cd "$REG_GOSRC"
(
# This is required to be set like this by the build system
export GOPATH="$PWD/Godeps/_workspace:$GOPATH"
# This comes in from the Containerfile
# shellcheck disable=SC2154
git checkout -q "$REG_COMMIT"
go build -o /usr/local/bin/registry-v2 \
github.com/docker/distribution/cmd/registry
# This comes in from the Containerfile
# shellcheck disable=SC2154
git checkout -q "$REG_COMMIT_SCHEMA1"
@ -61,10 +68,6 @@ sed -i -e 's/\[\[ "\${go_version\[2]}" < "go1.5" ]]/false/' ./hack/common.sh
# 8 characters long. This can happen if/when systemd-resolved adds 'trust-ad'.
sed -i '/== "attempts:"/s/ 8 / 9 /' vendor/github.com/miekg/dns/clientconfig.go
# Backport https://github.com/ugorji/go/commit/8286c2dc986535d23e3fad8d3e816b9dd1e5aea6
# Go ≥ 1.22 panics with a base64 encoding using duplicated characters.
sed -i -e 's,"encoding/base64","encoding/base32", ; s,base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__"),base32.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef"),' vendor/github.com/ugorji/go/codec/gen.go
make build
make all WHAT=cmd/dockerregistry
cp -a ./_output/local/bin/linux/*/* /usr/local/bin/

View File

@ -7,12 +7,11 @@
set +e # Not all of these exist on every platform
# Setting noninteractive is critical, apt-get can hang w/o it.
if [[ "$UID" -ne 0 ]]; then
export SUDO="sudo env DEBIAN_FRONTEND=noninteractive"
fi
SUDO=""
[[ "$UID" -eq 0 ]] || \
SUDO="sudo"
EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean update-notifier-download mlocate-updatedb plocate-updatedb"
EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean update-notifier-download mlocate-updatedb"
if [[ "$1" == "--list" ]]
then
@ -42,44 +41,3 @@ if [[ -d "$EAAD" ]]; then
echo "Checking/Patching $filename"
$SUDO sed -i -r -e "s/$PERIODIC_APT_RE/"'\10"\;/' "$EAAD/$filename"; done
fi
# Early 2023: https://github.com/containers/podman/issues/16973
#
# We see countless instances of "lookup cdn03.quay.io" flakes.
# Disabling the systemd resolver (Podman #17505) seems to have almost
# eliminated those -- the exceptions are early-on steps that run
# before that happens.
#
# Opinions differ on the merits of systemd-resolve, but the fact is
# it breaks our CI testing. Here we disable it for all VMs.
# shellcheck disable=SC2154
if ! ((CONTAINER)); then
nsswitch=/etc/authselect/nsswitch.conf
if [[ -e $nsswitch ]]; then
if grep -q -E 'hosts:.*resolve' $nsswitch; then
echo "Disabling systemd-resolved"
$SUDO sed -i -e 's/^\(hosts: *\).*/\1files dns myhostname/' $nsswitch
$SUDO systemctl disable --now systemd-resolved
$SUDO rm -f /etc/resolv.conf
# NetworkManager may already be running, or it may not....
$SUDO systemctl start NetworkManager
sleep 1
$SUDO systemctl restart NetworkManager
# ...and it may create resolv.conf upon start/restart, or it
# may not. Keep restarting until it does. (Yes, I realize
# this is cargocult thinking. Don't care. Not worth the effort
# to diagnose and solve properly.)
retries=10
while ! test -e /etc/resolv.conf;do
retries=$((retries - 1))
if [[ $retries -eq 0 ]]; then
die "Timed out waiting for resolv.conf"
fi
$SUDO systemctl restart NetworkManager
sleep 5
done
fi
fi
fi

View File

@ -1,13 +1,6 @@
. $PSScriptRoot\win-lib.ps1
# Disable WinRM as a security precuation (cirrus launches an agent from user-data, so we don't need it)
Set-Service winrm -StartupType Disabled
# Also disable RDP (can be enabled via user-data manually)
Set-ItemProperty -Path "HKLM:\System\CurrentControlSet\Control\Terminal Server" -Name "fDenyTSConnections" -Value 1
Disable-NetFirewallRule -DisplayGroup "Remote Desktop"
$ErrorActionPreference = "stop"
$username = "Administrator"
# Temporary random password to allow autologon that will be replaced
# before the instance is put into service.
$syms = [char[]]([char]'a'..[char]'z' `
@ -22,8 +15,8 @@ $encPass = ConvertTo-SecureString $password -AsPlainText -Force
Set-LocalUser -Name $username -Password $encPass
$winLogon= "HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon"
Set-ItemProperty $winLogon "AutoAdminLogon" -Value "1" -type String
Set-ItemProperty $winLogon "DefaultUsername" -Value $username -type String
Set-ItemProperty $winLogon "AutoAdminLogon" -Value "1" -type String
Set-ItemProperty $winLogon "DefaultUsername" -Value $username -type String
Set-ItemProperty $winLogon "DefaultPassword" -Value $password -type String
# Lock the screen immediately, even though it's unattended, just in case
@ -35,6 +28,6 @@ Set-ItemProperty `
# NOTE: For now, we do not run sysprep, since initialization with reboots
# are exceptionally slow on metal nodes, which these target to run. This
# will lead to a duplicate machine id, which is not ideal, but allows
# instances to start quickly. So, instead of sysprep, trigger a reset so
# that the admin password reset, and activation rerun on boot.
# instances to start instantly. So, instead of sysprep, trigger a reset so
# that the admin password reset, and activation rerun on boot
& 'C:\Program Files\Amazon\EC2Launch\ec2launch' reset --block

View File

@ -1,4 +0,0 @@
<powershell>
Set-ItemProperty -Path "HKLM:\System\CurrentControlSet\Control\Terminal Server" -Name "fDenyTSConnections" -Value 0
Enable-NetFirewallRule -DisplayGroup "Remote Desktop"
</powershell>

View File

@ -1,50 +0,0 @@
$ErrorActionPreference = "stop"
Set-ExecutionPolicy Bypass -Scope Process -Force
function Check-Exit {
param(
[parameter(ValueFromRemainingArguments = $true)]
[string[]] $codes = @(0)
)
if ($LASTEXITCODE -eq $null) {
return
}
foreach ($code in $codes) {
if ($LASTEXITCODE -eq $code) {
return
}
}
Exit $LASTEXITCODE
}
# Retry installation on failure or 5-minute timeout (for all packages)
function retryInstall {
param([Parameter(ValueFromRemainingArguments)] [string[]] $pkgs)
foreach ($pkg in $pkgs) {
for ($retries = 0; ; $retries++) {
if ($retries -gt 5) {
throw "Could not install package $pkg"
}
if ($pkg -match '(.[^\@]+)@(.+)') {
$pkg = @("--version", $Matches.2, $Matches.1)
}
# Chocolatey best practices as of 2024-04:
# https://docs.chocolatey.org/en-us/choco/commands/#scripting-integration-best-practices-style-guide
# Some of those are suboptimal, e.g., using "upgrade" to mean "install",
# hardcoding a specific API URL. We choose to reject those.
choco install $pkg -y --allow-downgrade --execution-timeout=300
if ($LASTEXITCODE -eq 0) {
break
}
Write-Host "Error installing, waiting before retry..."
Start-Sleep -Seconds 6
}
}
}

View File

@ -17,29 +17,24 @@ builders:
most_recent: true
owners:
- amazon
# While this image should run on metal, we can build it on smaller/cheaper systems
# While this image should run on metal, we can build it on smaller/cheaper systems
instance_type: t3.large
force_deregister: true # Remove AMI with same name if exists
force_delete_snapshot: true # Also remove snapshots of force-removed AMI
# Note that we do not set shutdown_behavior to terminate, as a clean shutdown is required
# for windows provisioning to complete successfully.
communicator: winrm
winrm_username: Administrator # AWS provisions Administrator, unlike GCE
winrm_username: Administrator # AWS provisions Administrator, unlike GCE
winrm_insecure: true
winrm_use_ssl: true
winrm_timeout: 25m
# Script that runs on server start, needed to prep and enable winrm
user_data_file: '{{template_dir}}/bootstrap.ps1'
user_data_file: '{{template_dir}}/bootstrap.ps1'
# Required for network access, must be the 'default' group used by Cirrus-CI
security_group_id: "sg-042c75677872ef81c"
ami_name: &ami_name '{{build_name}}-c{{user `IMG_SFX`}}'
ami_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}'
launch_block_device_mappings:
- device_name: '/dev/sda1'
volume_size: 200
volume_type: 'gp3'
iops: 6000
delete_on_termination: true
# These are critical and used by security-polciy to enforce instance launch limits.
tags: &awstags
# EC2 expects "Name" to be capitalized
@ -58,22 +53,18 @@ builders:
provisioners:
- type: powershell
inline:
- '$ErrorActionPreference = "stop"'
- 'New-Item -Path "c:\" -Name "temp" -ItemType "directory" -Force'
- 'New-Item -Path "c:\temp" -Name "automation_images" -ItemType "directory" -Force'
- type: 'file'
source: '{{ pwd }}/'
destination: "c:\\temp\\automation_images\\"
- type: powershell
inline:
- 'c:\temp\automation_images\win_images\win_packaging.ps1'
# Several installed items require a reboot, do that now in case it would
# cause a problem with final image preperations.
script: '{{template_dir}}/win_packaging.ps1'
- type: windows-restart
- type: powershell
inline:
- 'c:\temp\automation_images\win_images\win_finalization.ps1'
# Disable WinRM as a security precuation (cirrus launches an agent from user-data, so we don't need it)
- Set-Service winrm -StartupType Disabled
# Also disable RDP (can be enabled via user-data manually)
- Set-ItemProperty -Path "HKLM:\System\CurrentControlSet\Control\Terminal Server" -Name "fDenyTSConnections" -Value 1
- Disable-NetFirewallRule -DisplayGroup "Remote Desktop"
# Setup Autologon and reset, must be last, due to pw change
- type: powershell
script: '{{template_dir}}/auto_logon.ps1'
post-processors:
@ -84,3 +75,4 @@ post-processors:
IMG_SFX: '{{ user `IMG_SFX` }}'
STAGE: cache
TASK: '{{user `CIRRUS_TASK_ID`}}'

View File

@ -1,36 +1,36 @@
function CheckExit {
param(
[parameter(ValueFromRemainingArguments = $true)]
[string[]] $codes = @(0)
)
if ($LASTEXITCODE -eq $null) {
return
}
foreach ($code in $codes) {
if ($LASTEXITCODE -eq $code) {
return
}
}
Exit $LASTEXITCODE
}
. $PSScriptRoot\win-lib.ps1
# Disables runtime process virus scanning, which is not necessary
Set-MpPreference -DisableRealtimeMonitoring 1
$ErrorActionPreference = "stop"
Set-ExecutionPolicy Bypass -Scope Process -Force
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072
iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
# Install basic required tooling.
# psexec needed to workaround session 0 WSL bug
retryInstall 7zip git archiver psexec golang mingw StrawberryPerl zstandard; Check-Exit
# Update service is required for dotnet
Set-Service -Name wuauserv -StartupType "Manual"; Check-Exit
# Install dotnet as that's the best way to install WiX 4+
# Choco does not support installing anything over WiX 3.14
Invoke-WebRequest -Uri https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.ps1 -OutFile dotnet-install.ps1
.\dotnet-install.ps1 -InstallDir 'C:\Program Files\dotnet'
# Configure NuGet sources for dotnet to fetch wix (and other packages) from
& 'C:\Program Files\dotnet\dotnet.exe' nuget add source https://api.nuget.org/v3/index.json -n nuget.org
# Install wix
& 'C:\Program Files\dotnet\dotnet.exe' tool install --global wix
# Install Hyper-V
Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All -NoRestart
Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V-Management-PowerShell -All -NoRestart
Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V-Management-Clients -All -NoRestart
# Install Git, BZ2 archive support, Go, and the MingW (GCC for Win) compiler for CGO support
# Add pstools to workaorund sess 0 WSL bug
choco install -y git mingw archiver psexec; CheckExit
choco install golang --version 1.19.2 -y; CheckExit
# Install WSL, and capture text output which is not normally visible
$x = wsl --install; Check-Exit 0 1 # wsl returns 1 on reboot required
Write-Host $x
$x = wsl --install; CheckExit 0 1 # wsl returns 1 on reboot required
Write-Output $x
Exit 0