Compare commits
No commits in common. "main" and "20230726t191046z-f38f37d13" have entirely different histories.
main
...
20230726t1
|
@ -6,6 +6,7 @@ load("cirrus", "fs")
|
||||||
def main():
|
def main():
|
||||||
return {
|
return {
|
||||||
"env": {
|
"env": {
|
||||||
"IMG_SFX": fs.read("IMG_SFX").strip()
|
"IMG_SFX": fs.read("IMG_SFX").strip(),
|
||||||
|
"IMPORT_IMG_SFX": fs.read("IMPORT_IMG_SFX").strip()
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
164
.cirrus.yml
164
.cirrus.yml
|
@ -10,8 +10,6 @@ env:
|
||||||
CIRRUS_CLONE_DEPTH: 50
|
CIRRUS_CLONE_DEPTH: 50
|
||||||
# Version of packer to use when building images
|
# Version of packer to use when building images
|
||||||
PACKER_VERSION: &PACKER_VERSION "1.8.3"
|
PACKER_VERSION: &PACKER_VERSION "1.8.3"
|
||||||
# Registry/namespace prefix where container images live
|
|
||||||
REGPFX: "quay.io/libpod"
|
|
||||||
#IMG_SFX = <See IMG_SFX file and .cirrus.star script>
|
#IMG_SFX = <See IMG_SFX file and .cirrus.star script>
|
||||||
#IMPORT_IMG_SFX = <See IMPORT_IMG_SFX file and .cirrus.star script>
|
#IMPORT_IMG_SFX = <See IMPORT_IMG_SFX file and .cirrus.star script>
|
||||||
|
|
||||||
|
@ -47,7 +45,7 @@ image_builder_task:
|
||||||
# Packer needs time to clean up partially created VM images
|
# Packer needs time to clean up partially created VM images
|
||||||
auto_cancellation: $CI != "true"
|
auto_cancellation: $CI != "true"
|
||||||
stateful: true
|
stateful: true
|
||||||
timeout_in: 50m
|
timeout_in: 40m
|
||||||
container:
|
container:
|
||||||
dockerfile: "image_builder/Containerfile"
|
dockerfile: "image_builder/Containerfile"
|
||||||
cpu: 2
|
cpu: 2
|
||||||
|
@ -71,7 +69,7 @@ container_images_task: &container_images
|
||||||
skip: *ci_docs_tooling
|
skip: *ci_docs_tooling
|
||||||
depends_on:
|
depends_on:
|
||||||
- image_builder
|
- image_builder
|
||||||
timeout_in: &cntr_timeout 40m
|
timeout_in: 30m
|
||||||
gce_instance: &ibi_vm
|
gce_instance: &ibi_vm
|
||||||
image_project: "libpod-218412"
|
image_project: "libpod-218412"
|
||||||
# Trust whatever was built most recently is functional
|
# Trust whatever was built most recently is functional
|
||||||
|
@ -83,7 +81,7 @@ container_images_task: &container_images
|
||||||
env:
|
env:
|
||||||
TARGET_NAME: 'fedora_podman'
|
TARGET_NAME: 'fedora_podman'
|
||||||
# Add a 'c' to the tag for consistency with VM Image names
|
# Add a 'c' to the tag for consistency with VM Image names
|
||||||
DEST_FQIN: &fqin '${REGPFX}/${TARGET_NAME}:c$IMG_SFX'
|
DEST_FQIN: &fqin 'quay.io/libpod/${TARGET_NAME}:c$IMG_SFX'
|
||||||
- name: *name
|
- name: *name
|
||||||
env:
|
env:
|
||||||
TARGET_NAME: 'prior-fedora_podman'
|
TARGET_NAME: 'prior-fedora_podman'
|
||||||
|
@ -99,12 +97,12 @@ container_images_task: &container_images
|
||||||
# TARGET_NAME: 'debian'
|
# TARGET_NAME: 'debian'
|
||||||
# DEST_FQIN: *fqin
|
# DEST_FQIN: *fqin
|
||||||
env: &image_env
|
env: &image_env
|
||||||
# For $REGPFX namespace, select FQINs only.
|
# For quay.io/libpod namespace
|
||||||
REG_USERNAME: ENCRYPTED[df4efe530b9a6a731cfea19233e395a5206d24dfac25e84329de035393d191e94ead8c39b373a0391fa025cab15470f8]
|
REG_USERNAME: ENCRYPTED[de755aef351c501ee480231c24eae25b15e2b2a2b7c629f477c1d427fc5269e360bb358a53bd8914605bae588e99b52a]
|
||||||
REG_PASSWORD: ENCRYPTED[255ec05057707c20237a6c7d15b213422779c534f74fe019b8ca565f635dba0e11035a034e533a6f39e146e7435d87b5]
|
REG_PASSWORD: ENCRYPTED[52268944bb0d6642c33efb1c5d7fb82d0c40f9e6988448de35827f9be2cc547c1383db13e8b21516dbd7a0a69a7ae536]
|
||||||
script: ci/make_container_images.sh;
|
script: ci/make_container_images.sh;
|
||||||
package_cache: &package_cache
|
package_cache: &package_cache
|
||||||
folder: "/var/tmp/automation_images_tmp/.cache/**"
|
folder: "/tmp/automation_images_tmp/.cache/**"
|
||||||
fingerprint_key: "${TARGET_NAME}-cache-version-1"
|
fingerprint_key: "${TARGET_NAME}-cache-version-1"
|
||||||
|
|
||||||
|
|
||||||
|
@ -113,27 +111,31 @@ container_images_task: &container_images
|
||||||
imgts_build_task:
|
imgts_build_task:
|
||||||
alias: imgts_build
|
alias: imgts_build
|
||||||
name: 'Build IMGTS image'
|
name: 'Build IMGTS image'
|
||||||
only_if: *is_pr
|
only_if: $CIRRUS_CRON == ''
|
||||||
skip: &ci_docs $CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'
|
skip: &ci_docs $CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'
|
||||||
depends_on:
|
depends_on:
|
||||||
- image_builder
|
- image_builder
|
||||||
timeout_in: *cntr_timeout
|
timeout_in: 20m
|
||||||
gce_instance: *ibi_vm
|
gce_instance: *ibi_vm
|
||||||
env: *image_env
|
env:
|
||||||
|
<<: *image_env
|
||||||
|
PUSH_LATEST: 1 # scripts force to 0 if $CIRRUS_PR
|
||||||
script: |
|
script: |
|
||||||
export TARGET_NAME=imgts
|
export TARGET_NAME=imgts
|
||||||
export DEST_FQIN="${REGPFX}/${TARGET_NAME}:c${IMG_SFX}";
|
export DEST_FQIN="quay.io/libpod/${TARGET_NAME}:c${IMG_SFX}";
|
||||||
|
export PUSH_LATEST=0
|
||||||
|
[[ -n "$CIRRUS_PR" ]] || export PUSH_LATEST=1
|
||||||
ci/make_container_images.sh;
|
ci/make_container_images.sh;
|
||||||
|
|
||||||
|
|
||||||
tooling_images_task:
|
tooling_images_task:
|
||||||
alias: tooling_images
|
alias: tooling_images
|
||||||
name: 'Build Tooling image ${TARGET_NAME}'
|
name: 'Build Tooling image ${TARGET_NAME}'
|
||||||
only_if: *is_pr
|
only_if: $CIRRUS_CRON == ''
|
||||||
skip: *ci_docs
|
skip: &ci_docs $CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'
|
||||||
depends_on:
|
depends_on:
|
||||||
- imgts_build
|
- imgts_build
|
||||||
timeout_in: *cntr_timeout
|
timeout_in: 30m
|
||||||
gce_instance: *ibi_vm
|
gce_instance: *ibi_vm
|
||||||
env: *image_env
|
env: *image_env
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -150,7 +152,9 @@ tooling_images_task:
|
||||||
- env:
|
- env:
|
||||||
TARGET_NAME: ccia
|
TARGET_NAME: ccia
|
||||||
script: |
|
script: |
|
||||||
export DEST_FQIN="${REGPFX}/${TARGET_NAME}:c${IMG_SFX}";
|
export DEST_FQIN="quay.io/libpod/${TARGET_NAME}:c${IMG_SFX}";
|
||||||
|
export PUSH_LATEST=0
|
||||||
|
[[ -n "$CIRRUS_PR" ]] || export PUSH_LATEST=1
|
||||||
ci/make_container_images.sh;
|
ci/make_container_images.sh;
|
||||||
|
|
||||||
base_images_task:
|
base_images_task:
|
||||||
|
@ -164,21 +168,20 @@ base_images_task:
|
||||||
# Packer needs time to clean up partially created VM images
|
# Packer needs time to clean up partially created VM images
|
||||||
auto_cancellation: $CI != "true"
|
auto_cancellation: $CI != "true"
|
||||||
stateful: true
|
stateful: true
|
||||||
timeout_in: 70m
|
timeout_in: 45m
|
||||||
gce_instance: *ibi_vm
|
# Cannot use a container for this task, virt required for fedora image conversion
|
||||||
|
gce_instance:
|
||||||
|
<<: *ibi_vm
|
||||||
|
# Nested-virt is required, need Intel Haswell or better CPU
|
||||||
|
enable_nested_virtualization: true
|
||||||
|
type: "n2-standard-2"
|
||||||
|
scopes: ["cloud-platform"]
|
||||||
matrix:
|
matrix:
|
||||||
- &base_image
|
- &base_image
|
||||||
name: "${PACKER_BUILDS} Base Image"
|
name: "${PACKER_BUILDS} Base Image"
|
||||||
gce_instance: &nested_virt_vm
|
|
||||||
<<: *ibi_vm
|
|
||||||
# Nested-virt is required, need Intel Haswell or better CPU
|
|
||||||
enable_nested_virtualization: true
|
|
||||||
type: "n2-standard-16"
|
|
||||||
scopes: ["cloud-platform"]
|
|
||||||
env:
|
env:
|
||||||
PACKER_BUILDS: "fedora"
|
PACKER_BUILDS: "fedora"
|
||||||
- <<: *base_image
|
- <<: *base_image
|
||||||
gce_instance: *nested_virt_vm
|
|
||||||
env:
|
env:
|
||||||
PACKER_BUILDS: "prior-fedora"
|
PACKER_BUILDS: "prior-fedora"
|
||||||
- <<: *base_image
|
- <<: *base_image
|
||||||
|
@ -193,8 +196,6 @@ base_images_task:
|
||||||
env:
|
env:
|
||||||
GAC_JSON: &gac_json ENCRYPTED[7fba7fb26ab568ae39f799ab58a476123206576b0135b3d1019117c6d682391370c801e149f29324ff4b50133012aed9]
|
GAC_JSON: &gac_json ENCRYPTED[7fba7fb26ab568ae39f799ab58a476123206576b0135b3d1019117c6d682391370c801e149f29324ff4b50133012aed9]
|
||||||
AWS_INI: &aws_ini ENCRYPTED[4cd69097cd29a9899e51acf3bbacceeb83cb5c907d272ca1e2a8ccd515b03f2368a0680870c0d120fc32bc578bb0a930]
|
AWS_INI: &aws_ini ENCRYPTED[4cd69097cd29a9899e51acf3bbacceeb83cb5c907d272ca1e2a8ccd515b03f2368a0680870c0d120fc32bc578bb0a930]
|
||||||
AWS_MAX_ATTEMPTS: 300
|
|
||||||
AWS_TIMEOUT_SECONDS: 3000
|
|
||||||
script: "ci/make.sh base_images"
|
script: "ci/make.sh base_images"
|
||||||
manifest_artifacts:
|
manifest_artifacts:
|
||||||
path: base_images/manifest.json
|
path: base_images/manifest.json
|
||||||
|
@ -212,7 +213,7 @@ cache_images_task:
|
||||||
# Packer needs time to clean up partially created VM images
|
# Packer needs time to clean up partially created VM images
|
||||||
auto_cancellation: $CI != "true"
|
auto_cancellation: $CI != "true"
|
||||||
stateful: true
|
stateful: true
|
||||||
timeout_in: 90m
|
timeout_in: 45m
|
||||||
container:
|
container:
|
||||||
dockerfile: "image_builder/Containerfile"
|
dockerfile: "image_builder/Containerfile"
|
||||||
cpu: 2
|
cpu: 2
|
||||||
|
@ -233,6 +234,9 @@ cache_images_task:
|
||||||
- <<: *cache_image
|
- <<: *cache_image
|
||||||
env:
|
env:
|
||||||
PACKER_BUILDS: "fedora-netavark"
|
PACKER_BUILDS: "fedora-netavark"
|
||||||
|
- <<: *cache_image
|
||||||
|
env:
|
||||||
|
PACKER_BUILDS: "fedora-podman-py"
|
||||||
- <<: *cache_image
|
- <<: *cache_image
|
||||||
env:
|
env:
|
||||||
PACKER_BUILDS: "fedora-aws"
|
PACKER_BUILDS: "fedora-aws"
|
||||||
|
@ -251,8 +255,6 @@ cache_images_task:
|
||||||
env:
|
env:
|
||||||
GAC_JSON: *gac_json
|
GAC_JSON: *gac_json
|
||||||
AWS_INI: *aws_ini
|
AWS_INI: *aws_ini
|
||||||
AWS_MAX_ATTEMPTS: 300
|
|
||||||
AWS_TIMEOUT_SECONDS: 3000
|
|
||||||
script: "ci/make.sh cache_images"
|
script: "ci/make.sh cache_images"
|
||||||
manifest_artifacts:
|
manifest_artifacts:
|
||||||
path: cache_images/manifest.json
|
path: cache_images/manifest.json
|
||||||
|
@ -271,6 +273,7 @@ win_images_task:
|
||||||
# Packer needs time to clean up partially created VM images
|
# Packer needs time to clean up partially created VM images
|
||||||
auto_cancellation: $CI != "true"
|
auto_cancellation: $CI != "true"
|
||||||
stateful: true
|
stateful: true
|
||||||
|
timeout_in: 45m
|
||||||
# Packer WinRM communicator is not reliable on container tasks
|
# Packer WinRM communicator is not reliable on container tasks
|
||||||
gce_instance:
|
gce_instance:
|
||||||
<<: *ibi_vm
|
<<: *ibi_vm
|
||||||
|
@ -283,39 +286,19 @@ win_images_task:
|
||||||
path: win_images/manifest.json
|
path: win_images/manifest.json
|
||||||
type: application/json
|
type: application/json
|
||||||
|
|
||||||
# These targets are intended for humans, make sure they builds and function on a basic level
|
|
||||||
test_debug_task:
|
|
||||||
name: "Test ${TARGET} make target"
|
|
||||||
alias: test_debug
|
|
||||||
only_if: *is_pr
|
|
||||||
skip: *ci_docs
|
|
||||||
depends_on:
|
|
||||||
- validate
|
|
||||||
gce_instance: *nested_virt_vm
|
|
||||||
matrix:
|
|
||||||
- env:
|
|
||||||
TARGET: ci_debug
|
|
||||||
- env:
|
|
||||||
TARGET: image_builder_debug
|
|
||||||
env:
|
|
||||||
HOME: "/root"
|
|
||||||
GAC_FILEPATH: "/dev/null"
|
|
||||||
AWS_SHARED_CREDENTIALS_FILE: "/dev/null"
|
|
||||||
DBG_TEST_CMD: "true"
|
|
||||||
script: make ${TARGET}
|
|
||||||
|
|
||||||
# Test metadata addition to images (built or not) to ensure container functions
|
# Test metadata addition to images (built or not) to ensure container functions
|
||||||
test_imgts_task: &imgts
|
test_imgts_task: &imgts
|
||||||
name: "Test image timestamp/metadata updates"
|
name: "Test image timestamp/metadata updates"
|
||||||
alias: test_imgts
|
alias: test_imgts
|
||||||
only_if: *is_pr
|
only_if: $CIRRUS_CRON == ''
|
||||||
skip: *ci_docs
|
skip: *ci_docs
|
||||||
depends_on: &imgts_deps
|
depends_on: &imgts_deps
|
||||||
- base_images
|
- base_images
|
||||||
- cache_images
|
- cache_images
|
||||||
- imgts_build
|
- imgts_build
|
||||||
container:
|
container:
|
||||||
image: '${REGPFX}/imgts:c$IMG_SFX'
|
image: 'quay.io/libpod/imgts:c$IMG_SFX'
|
||||||
cpu: 2
|
cpu: 2
|
||||||
memory: '2G'
|
memory: '2G'
|
||||||
env: &imgts_env
|
env: &imgts_env
|
||||||
|
@ -337,6 +320,7 @@ test_imgts_task: &imgts
|
||||||
fedora-c${IMG_SFX}
|
fedora-c${IMG_SFX}
|
||||||
prior-fedora-c${IMG_SFX}
|
prior-fedora-c${IMG_SFX}
|
||||||
fedora-netavark-c${IMG_SFX}
|
fedora-netavark-c${IMG_SFX}
|
||||||
|
fedora-podman-py-c${IMG_SFX}
|
||||||
rawhide-c${IMG_SFX}
|
rawhide-c${IMG_SFX}
|
||||||
debian-c${IMG_SFX}
|
debian-c${IMG_SFX}
|
||||||
build-push-c${IMG_SFX}
|
build-push-c${IMG_SFX}
|
||||||
|
@ -344,7 +328,7 @@ test_imgts_task: &imgts
|
||||||
fedora-aws-i${IMPORT_IMG_SFX}
|
fedora-aws-i${IMPORT_IMG_SFX}
|
||||||
fedora-aws-b${IMG_SFX}
|
fedora-aws-b${IMG_SFX}
|
||||||
fedora-aws-c${IMG_SFX}
|
fedora-aws-c${IMG_SFX}
|
||||||
fedora-aws-arm64-i${IMPORT_IMG_SFX}
|
fedora-aws-arm64-i${IMG_SFX}
|
||||||
fedora-aws-arm64-b${IMG_SFX}
|
fedora-aws-arm64-b${IMG_SFX}
|
||||||
fedora-podman-aws-arm64-c${IMG_SFX}
|
fedora-podman-aws-arm64-c${IMG_SFX}
|
||||||
fedora-netavark-aws-arm64-c${IMG_SFX}
|
fedora-netavark-aws-arm64-c${IMG_SFX}
|
||||||
|
@ -376,13 +360,13 @@ imgts_task:
|
||||||
test_imgobsolete_task: &lifecycle_test
|
test_imgobsolete_task: &lifecycle_test
|
||||||
name: "Test obsolete image detection"
|
name: "Test obsolete image detection"
|
||||||
alias: test_imgobsolete
|
alias: test_imgobsolete
|
||||||
only_if: *is_pr
|
only_if: &only_prs $CIRRUS_PR != ''
|
||||||
skip: *ci_docs
|
skip: *ci_docs
|
||||||
depends_on:
|
depends_on:
|
||||||
- tooling_images
|
- tooling_images
|
||||||
- imgts
|
- imgts
|
||||||
container:
|
container:
|
||||||
image: '${REGPFX}/imgobsolete:c$IMG_SFX'
|
image: 'quay.io/libpod/imgobsolete:c$IMG_SFX'
|
||||||
cpu: 2
|
cpu: 2
|
||||||
memory: '2G'
|
memory: '2G'
|
||||||
env: &lifecycle_env
|
env: &lifecycle_env
|
||||||
|
@ -401,8 +385,9 @@ test_orphanvms_task:
|
||||||
<<: *lifecycle_test
|
<<: *lifecycle_test
|
||||||
name: "Test orphan VMs detection"
|
name: "Test orphan VMs detection"
|
||||||
alias: test_orphanvms
|
alias: test_orphanvms
|
||||||
|
skip: *ci_docs
|
||||||
container:
|
container:
|
||||||
image: '$REGPFX/orphanvms:c$IMG_SFX'
|
image: 'quay.io/libpod/orphanvms:c$IMG_SFX'
|
||||||
cpu: 2
|
cpu: 2
|
||||||
memory: '2G'
|
memory: '2G'
|
||||||
env:
|
env:
|
||||||
|
@ -411,7 +396,6 @@ test_orphanvms_task:
|
||||||
GCPPROJECT: 'libpod-218412'
|
GCPPROJECT: 'libpod-218412'
|
||||||
GCPPROJECTS: 'libpod-218412' # value for testing, otherwise see gcpprojects.txt
|
GCPPROJECTS: 'libpod-218412' # value for testing, otherwise see gcpprojects.txt
|
||||||
AWSINI: ENCRYPTED[1ab89ff7bc1515dc964efe7ef6e094e01164ba8dd2e11c9a01259c6af3b3968ab841dbe473fe4ab5b573f2f5fa3653e8]
|
AWSINI: ENCRYPTED[1ab89ff7bc1515dc964efe7ef6e094e01164ba8dd2e11c9a01259c6af3b3968ab841dbe473fe4ab5b573f2f5fa3653e8]
|
||||||
DRY_RUN: 1
|
|
||||||
EVERYTHING: 1 # Alter age-limit from 3-days -> 3 seconds for a test-run.
|
EVERYTHING: 1 # Alter age-limit from 3-days -> 3 seconds for a test-run.
|
||||||
script: /usr/local/bin/entrypoint.sh
|
script: /usr/local/bin/entrypoint.sh
|
||||||
|
|
||||||
|
@ -420,23 +404,24 @@ test_imgprune_task:
|
||||||
<<: *lifecycle_test
|
<<: *lifecycle_test
|
||||||
name: "Test obsolete image removal"
|
name: "Test obsolete image removal"
|
||||||
alias: test_imgprune
|
alias: test_imgprune
|
||||||
depends_on:
|
|
||||||
- tooling_images
|
|
||||||
- imgts
|
|
||||||
container:
|
|
||||||
image: '$REGPFX/imgprune:c$IMG_SFX'
|
|
||||||
|
|
||||||
|
|
||||||
test_gcsupld_task:
|
|
||||||
name: "Test uploading to GCS"
|
|
||||||
alias: test_gcsupld
|
|
||||||
only_if: *is_pr
|
|
||||||
skip: *ci_docs
|
skip: *ci_docs
|
||||||
depends_on:
|
depends_on:
|
||||||
- tooling_images
|
- tooling_images
|
||||||
- imgts
|
- imgts
|
||||||
container:
|
container:
|
||||||
image: '$REGPFX/gcsupld:c$IMG_SFX'
|
image: 'quay.io/libpod/imgprune:c$IMG_SFX'
|
||||||
|
|
||||||
|
|
||||||
|
test_gcsupld_task:
|
||||||
|
name: "Test uploading to GCS"
|
||||||
|
alias: test_gcsupld
|
||||||
|
only_if: *only_prs
|
||||||
|
skip: *ci_docs
|
||||||
|
depends_on:
|
||||||
|
- tooling_images
|
||||||
|
- imgts
|
||||||
|
container:
|
||||||
|
image: 'quay.io/libpod/gcsupld:c$IMG_SFX'
|
||||||
cpu: 2
|
cpu: 2
|
||||||
memory: '2G'
|
memory: '2G'
|
||||||
env:
|
env:
|
||||||
|
@ -449,13 +434,13 @@ test_gcsupld_task:
|
||||||
test_get_ci_vm_task:
|
test_get_ci_vm_task:
|
||||||
name: "Test get_ci_vm entrypoint"
|
name: "Test get_ci_vm entrypoint"
|
||||||
alias: test_get_ci_vm
|
alias: test_get_ci_vm
|
||||||
only_if: *is_pr
|
only_if: *only_prs
|
||||||
skip: *ci_docs
|
skip: *ci_docs
|
||||||
depends_on:
|
depends_on:
|
||||||
- tooling_images
|
- tooling_images
|
||||||
- imgts
|
- imgts
|
||||||
container:
|
container:
|
||||||
image: '$REGPFX/get_ci_vm:c$IMG_SFX'
|
image: 'quay.io/libpod/get_ci_vm:c$IMG_SFX'
|
||||||
cpu: 2
|
cpu: 2
|
||||||
memory: '2G'
|
memory: '2G'
|
||||||
env:
|
env:
|
||||||
|
@ -466,12 +451,12 @@ test_get_ci_vm_task:
|
||||||
test_ccia_task:
|
test_ccia_task:
|
||||||
name: "Test ccia entrypoint"
|
name: "Test ccia entrypoint"
|
||||||
alias: test_ccia
|
alias: test_ccia
|
||||||
only_if: *is_pr
|
only_if: *only_prs
|
||||||
skip: *ci_docs
|
skip: *ci_docs
|
||||||
depends_on:
|
depends_on:
|
||||||
- tooling_images
|
- tooling_images
|
||||||
container:
|
container:
|
||||||
image: '$REGPFX/ccia:c$IMG_SFX'
|
image: 'quay.io/libpod/ccia:c$IMG_SFX'
|
||||||
cpu: 2
|
cpu: 2
|
||||||
memory: '2G'
|
memory: '2G'
|
||||||
test_script: ./ccia/test.sh
|
test_script: ./ccia/test.sh
|
||||||
|
@ -480,45 +465,27 @@ test_ccia_task:
|
||||||
test_build-push_task:
|
test_build-push_task:
|
||||||
name: "Test build-push VM functions"
|
name: "Test build-push VM functions"
|
||||||
alias: test_build-push
|
alias: test_build-push
|
||||||
only_if: |
|
only_if: *only_prs
|
||||||
$CIRRUS_PR != '' &&
|
|
||||||
$CIRRUS_PR_LABELS !=~ ".*no_build-push.*"
|
|
||||||
skip: *ci_docs_tooling
|
skip: *ci_docs_tooling
|
||||||
depends_on:
|
depends_on:
|
||||||
- cache_images
|
- cache_images
|
||||||
gce_instance:
|
gce_instance:
|
||||||
image_project: "libpod-218412"
|
image_project: "libpod-218412"
|
||||||
image_name: build-push-c${IMG_SFX}
|
image_family: 'build-push-cache'
|
||||||
zone: "us-central1-a"
|
zone: "us-central1-a"
|
||||||
disk: 200
|
disk: 200
|
||||||
# More muscle to emulate multi-arch
|
# More muscle to emulate multi-arch
|
||||||
type: "n2-standard-4"
|
type: "n2-standard-4"
|
||||||
script: |
|
script: bash ./build-push/test.sh
|
||||||
mkdir /tmp/context
|
|
||||||
echo -e "FROM scratch\nENV foo=bar\n" > /tmp/context/Containerfile
|
|
||||||
source /etc/automation_environment
|
|
||||||
A_DEBUG=1 build-push.sh --nopush --arches=amd64,arm64,s390x,ppc64le example.com/foo/bar /tmp/context
|
|
||||||
|
|
||||||
|
|
||||||
tag_latest_images_task:
|
|
||||||
alias: tag_latest_images
|
|
||||||
name: "Tag latest built container images."
|
|
||||||
only_if: |
|
|
||||||
$CIRRUS_CRON == '' &&
|
|
||||||
$CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH
|
|
||||||
skip: *ci_docs
|
|
||||||
gce_instance: *ibi_vm
|
|
||||||
env: *image_env
|
|
||||||
script: ci/tag_latest.sh
|
|
||||||
|
|
||||||
|
|
||||||
# N/B: "latest" image produced after PR-merge (branch-push)
|
# N/B: "latest" image produced after PR-merge (branch-push)
|
||||||
cron_imgobsolete_task: &lifecycle_cron
|
cron_imgobsolete_task: &lifecycle_cron
|
||||||
name: "Periodicly mark old images obsolete"
|
name: "Periodicly mark old images obsolete"
|
||||||
alias: cron_imgobsolete
|
alias: cron_imgobsolete
|
||||||
only_if: $CIRRUS_CRON == 'lifecycle'
|
only_if: $CIRRUS_PR == '' && $CIRRUS_CRON != ''
|
||||||
container:
|
container:
|
||||||
image: '$REGPFX/imgobsolete:latest'
|
image: 'quay.io/libpod/imgobsolete:latest'
|
||||||
cpu: 2
|
cpu: 2
|
||||||
memory: '2G'
|
memory: '2G'
|
||||||
env:
|
env:
|
||||||
|
@ -534,7 +501,7 @@ cron_imgprune_task:
|
||||||
depends_on:
|
depends_on:
|
||||||
- cron_imgobsolete
|
- cron_imgobsolete
|
||||||
container:
|
container:
|
||||||
image: '$REGPFX/imgprune:latest'
|
image: 'quay.io/libpod/imgprune:latest'
|
||||||
|
|
||||||
|
|
||||||
success_task:
|
success_task:
|
||||||
|
@ -548,7 +515,6 @@ success_task:
|
||||||
- base_images
|
- base_images
|
||||||
- cache_images
|
- cache_images
|
||||||
- win_images
|
- win_images
|
||||||
- test_debug
|
|
||||||
- test_imgts
|
- test_imgts
|
||||||
- imgts
|
- imgts
|
||||||
- test_imgobsolete
|
- test_imgobsolete
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
IMGSFX,IMG-SFX->IMG_SFX
|
|
||||||
Dockerfile->Containerfile
|
|
|
@ -1,4 +0,0 @@
|
||||||
[codespell]
|
|
||||||
ignore-words = .codespellignore
|
|
||||||
dictionary = .codespelldict
|
|
||||||
quiet-level = 3
|
|
|
@ -13,9 +13,9 @@ import sys
|
||||||
|
|
||||||
def msg(msg, newline=True):
|
def msg(msg, newline=True):
|
||||||
"""Print msg to stderr with optional newline."""
|
"""Print msg to stderr with optional newline."""
|
||||||
nl = ""
|
nl = ''
|
||||||
if newline:
|
if newline:
|
||||||
nl = "\n"
|
nl = '\n'
|
||||||
sys.stderr.write(f"{msg}{nl}")
|
sys.stderr.write(f"{msg}{nl}")
|
||||||
sys.stderr.flush()
|
sys.stderr.flush()
|
||||||
|
|
||||||
|
@ -23,13 +23,13 @@ def msg(msg, newline=True):
|
||||||
def stage_sort(item):
|
def stage_sort(item):
|
||||||
"""Return sorting-key for build-image-json item."""
|
"""Return sorting-key for build-image-json item."""
|
||||||
if item["stage"] == "import":
|
if item["stage"] == "import":
|
||||||
return str("0010" + item["name"])
|
return str("0010"+item["name"])
|
||||||
elif item["stage"] == "base":
|
elif item["stage"] == "base":
|
||||||
return str("0020" + item["name"])
|
return str("0020"+item["name"])
|
||||||
elif item["stage"] == "cache":
|
elif item["stage"] == "cache":
|
||||||
return str("0030" + item["name"])
|
return str("0030"+item["name"])
|
||||||
else:
|
else:
|
||||||
return str("0100" + item["name"])
|
return str("0100"+item["name"])
|
||||||
|
|
||||||
|
|
||||||
if "GITHUB_ENV" not in os.environ:
|
if "GITHUB_ENV" not in os.environ:
|
||||||
|
@ -40,58 +40,46 @@ github_workspace = os.environ.get("GITHUB_WORKSPACE", ".")
|
||||||
|
|
||||||
# File written by a previous workflow step
|
# File written by a previous workflow step
|
||||||
with open(f"{github_workspace}/built_images.json") as bij:
|
with open(f"{github_workspace}/built_images.json") as bij:
|
||||||
msg(f"Reading image build data from {bij.name}:")
|
msg(f"Reading image build data from {bij.name}:")
|
||||||
data = []
|
data = []
|
||||||
for build in json.load(bij): # list of build data maps
|
for build in json.load(bij): # list of build data maps
|
||||||
stage = build.get("stage", False)
|
stage = build.get("stage", False)
|
||||||
name = build.get("name", False)
|
name = build.get("name", False)
|
||||||
sfx = build.get("sfx", False)
|
sfx = build.get("sfx", False)
|
||||||
task = build.get("task", False)
|
task = build.get("task", False)
|
||||||
if bool(stage) and bool(name) and bool(sfx) and bool(task):
|
if bool(stage) and bool(name) and bool(sfx) and bool(task):
|
||||||
image_suffix = f"{stage[0]}{sfx}"
|
image_suffix = f'{stage[0]}{sfx}'
|
||||||
data.append(
|
data.append(dict(stage=stage, name=name,
|
||||||
dict(stage=stage, name=name, image_suffix=image_suffix, task=task)
|
image_suffix=image_suffix, task=task))
|
||||||
)
|
if cirrus_ci_build_id is None:
|
||||||
if cirrus_ci_build_id is None:
|
cirrus_ci_build_id = sfx
|
||||||
cirrus_ci_build_id = sfx
|
msg(f"Including '{stage}' stage build '{name}' for task '{task}'.")
|
||||||
msg(f"Including '{stage}' stage build '{name}' for task '{task}'.")
|
else:
|
||||||
else:
|
msg(f"Skipping '{stage}' stage build '{name}' for task '{task}'.")
|
||||||
msg(f"Skipping '{stage}' stage build '{name}' for task '{task}'.")
|
|
||||||
|
|
||||||
url = "https://cirrus-ci.com/task"
|
url = 'https://cirrus-ci.com/task'
|
||||||
lines = []
|
lines = []
|
||||||
data.sort(key=stage_sort)
|
data.sort(key=stage_sort)
|
||||||
for item in data:
|
for item in data:
|
||||||
image_suffix = item["image_suffix"]
|
lines.append('|*{0}*|[{1}]({2})|`{3}`|\n'.format(item['stage'],
|
||||||
# Base-images should never actually be used, but it may be helpful
|
item['name'], '{0}/{1}'.format(url, item['task']),
|
||||||
# to have them in the list in case some debugging is needed.
|
item['image_suffix']))
|
||||||
if item["stage"] != "cache":
|
|
||||||
image_suffix = "do-not-use"
|
|
||||||
lines.append(
|
|
||||||
"|*{0}*|[{1}]({2})|`{3}`|\n".format(
|
|
||||||
item["stage"],
|
|
||||||
item["name"],
|
|
||||||
"{0}/{1}".format(url, item["task"]),
|
|
||||||
image_suffix,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# This is the mechanism required to set an multi-line env. var.
|
# This is the mechanism required to set an multi-line env. var.
|
||||||
# value to be consumed by future workflow steps.
|
# value to be consumed by future workflow steps.
|
||||||
with open(os.environ["GITHUB_ENV"], "a") as ghenv, open(
|
with open(os.environ["GITHUB_ENV"], "a") as ghenv, \
|
||||||
f"{github_workspace}/images.md", "w"
|
open(f'{github_workspace}/images.md', "w") as mdfile, \
|
||||||
) as mdfile, open(f"{github_workspace}/images.json", "w") as images_json:
|
open(f'{github_workspace}/images.json', "w") as images_json:
|
||||||
|
|
||||||
env_header = "IMAGE_TABLE<<EOF\n"
|
env_header = ("IMAGE_TABLE<<EOF\n")
|
||||||
header = (
|
header = (f"[Cirrus CI build](https://cirrus-ci.com/build/{cirrus_ci_build_id})"
|
||||||
f"[Cirrus CI build](https://cirrus-ci.com/build/{cirrus_ci_build_id})"
|
" successful. [Found built image names and"
|
||||||
" successful. [Found built image names and"
|
f' IDs](https://github.com/{os.environ["GITHUB_REPOSITORY"]}'
|
||||||
f' IDs](https://github.com/{os.environ["GITHUB_REPOSITORY"]}'
|
f'/actions/runs/{os.environ["GITHUB_RUN_ID"]}):\n'
|
||||||
f'/actions/runs/{os.environ["GITHUB_RUN_ID"]}):\n'
|
"\n")
|
||||||
"\n"
|
c_head = ("|*Stage*|**Image Name**|`IMAGE_SUFFIX`|\n"
|
||||||
)
|
"|---|---|---|\n")
|
||||||
c_head = "|*Stage*|**Image Name**|`IMAGE_SUFFIX`|\n" "|---|---|---|\n"
|
|
||||||
# Different output destinations get slightly different content
|
# Different output destinations get slightly different content
|
||||||
for dst in [ghenv, mdfile, sys.stderr]:
|
for dst in [ghenv, mdfile, sys.stderr]:
|
||||||
if dst == ghenv:
|
if dst == ghenv:
|
||||||
|
@ -104,7 +92,5 @@ with open(os.environ["GITHUB_ENV"], "a") as ghenv, open(
|
||||||
dst.write("EOF\n\n")
|
dst.write("EOF\n\n")
|
||||||
|
|
||||||
json.dump(data, images_json, indent=4, sort_keys=True)
|
json.dump(data, images_json, indent=4, sort_keys=True)
|
||||||
msg(
|
msg(f"Wrote github env file '{ghenv.name}', md-file '{mdfile.name}',"
|
||||||
f"Wrote github env file '{ghenv.name}', md-file '{mdfile.name}',"
|
f" and json-file '{images_json.name}'")
|
||||||
f" and json-file '{images_json.name}'"
|
|
||||||
)
|
|
||||||
|
|
|
@ -1,12 +1,20 @@
|
||||||
/*
|
/*
|
||||||
Renovate is a service similar to GitHub Dependabot.
|
Renovate is a service similar to GitHub Dependabot, but with
|
||||||
|
(fantastically) more configuration options. So many options
|
||||||
|
in fact, if you're new I recommend glossing over this cheat-sheet
|
||||||
|
prior to the official documentation:
|
||||||
|
|
||||||
Please Manually validate any changes to this file with:
|
https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet
|
||||||
|
|
||||||
|
Configuration Update/Change Procedure:
|
||||||
|
1. Make changes
|
||||||
|
2. Manually validate changes (from repo-root):
|
||||||
|
|
||||||
podman run -it \
|
podman run -it \
|
||||||
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
|
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
|
||||||
ghcr.io/renovatebot/renovate:latest \
|
docker.io/renovate/renovate:latest \
|
||||||
renovate-config-validator
|
renovate-config-validator
|
||||||
|
3. Commit.
|
||||||
|
|
||||||
Configuration Reference:
|
Configuration Reference:
|
||||||
https://docs.renovatebot.com/configuration-options/
|
https://docs.renovatebot.com/configuration-options/
|
||||||
|
@ -14,9 +22,11 @@
|
||||||
Monitoring Dashboard:
|
Monitoring Dashboard:
|
||||||
https://app.renovatebot.com/dashboard#github/containers
|
https://app.renovatebot.com/dashboard#github/containers
|
||||||
|
|
||||||
Note: The Renovate bot will create/manage its business on
|
Note: The Renovate bot will create/manage it's business on
|
||||||
branches named 'renovate/*'. The only copy of this
|
branches named 'renovate/*'. Otherwise, and by
|
||||||
file that matters is the one on the `main` branch.
|
default, the only the copy of this file that matters
|
||||||
|
is the one on the `main` branch. No other branches
|
||||||
|
will be monitored or touched in any way.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -34,45 +44,55 @@
|
||||||
// This repo builds images, don't try to manage them.
|
// This repo builds images, don't try to manage them.
|
||||||
"docker:disable"
|
"docker:disable"
|
||||||
],
|
],
|
||||||
|
/*************************************************
|
||||||
|
*** Repository-specific configuration options ***
|
||||||
|
*************************************************/
|
||||||
|
// Don't leave dep. update. PRs "hanging", assign them to people.
|
||||||
|
"assignees": ["cevich"],
|
||||||
|
|
||||||
// Don't build CI VM images for dep. update PRs (by default)
|
// Don't build CI VM images for dep. update PRs (by default)
|
||||||
"commitMessagePrefix": "[CI:DOCS]",
|
commitMessagePrefix: "[CI:DOCS]",
|
||||||
|
|
||||||
"customManagers": [
|
"regexManagers": [
|
||||||
// Manage updates to the common automation library version
|
|
||||||
{
|
{
|
||||||
"customType": "regex",
|
|
||||||
"fileMatch": "^lib.sh$",
|
"fileMatch": "^lib.sh$",
|
||||||
"matchStrings": ["INSTALL_AUTOMATION_VERSION=\"(?<currentValue>.+)\""],
|
"matchStrings": ["^INSTALL_AUTOMATION_VERSION=\"(?<currentValue>.+)\""],
|
||||||
"depNameTemplate": "containers/automation",
|
"depNameTemplate": "containers/automation",
|
||||||
"datasourceTemplate": "github-tags",
|
"datasourceTemplate": "github-tags",
|
||||||
"versioningTemplate": "semver-coerced",
|
"versioningTemplate": "semver-coerced",
|
||||||
// "v" included in tag, but should not be used in lib.sh
|
// "v" included in tag, but should not be used in lib.sh
|
||||||
"extractVersionTemplate": "^v(?<version>.+)$"
|
"extractVersionTemplate": "v(?<version>.+)",
|
||||||
}
|
},
|
||||||
],
|
],
|
||||||
|
|
||||||
// N/B: LAST MATCHING RULE WINS, match statems are ANDed together.
|
// N/B: LAST MATCHING RULE WINS, match statems are ANDed together.
|
||||||
|
// https://docs.renovatebot.com/configuration-options/#packagerules
|
||||||
"packageRules": [
|
"packageRules": [
|
||||||
// When automation library version updated, full CI VM image build
|
|
||||||
// is needed, along with some other overrides not required in
|
|
||||||
// (for example) github-action updates.
|
|
||||||
{
|
{
|
||||||
"matchManagers": ["custom.regex"],
|
"matchManagers": ["regex"],
|
||||||
"matchFileNames": ["lib.sh"],
|
"matchFiles": ["lib.sh"], // full-path exact-match
|
||||||
|
// Don't wait, roll out CI VM Updates immediately
|
||||||
"schedule": ["at any time"],
|
"schedule": ["at any time"],
|
||||||
"commitMessagePrefix": null,
|
// Override default `[CI:DOCS]`, DO build new CI VM images.
|
||||||
|
commitMessagePrefix: null,
|
||||||
|
// Frequently, library updates require adjustments to build-scripts
|
||||||
"draftPR": true,
|
"draftPR": true,
|
||||||
|
"reviewers": ["cevich"],
|
||||||
"prBodyNotes": [
|
"prBodyNotes": [
|
||||||
"\
|
// handlebar conditionals don't have logical operators, and renovate
|
||||||
|
// does not provide an 'isMinor' template field
|
||||||
|
"\
|
||||||
{{#if isMajor}}\
|
{{#if isMajor}}\
|
||||||
:warning: Changes are **likely** required for build-scripts and/or downstream CI VM \
|
:warning: Changes are **likely** required for build-scripts \
|
||||||
image users. Please check very carefully. :warning:\
|
and/or downstream CI VM image users. Please check very carefully. :warning:\
|
||||||
|
{{/if}}\
|
||||||
|
{{#if isPatch}}\
|
||||||
{{else}}\
|
{{else}}\
|
||||||
:warning: Changes may be required for build-scripts and/or downstream CI VM \
|
:warning: Changes *might be* required for build-scripts \
|
||||||
image users. Please double-check. :warning:\
|
and/or downstream CI VM image users. Please double-check. :warning:\
|
||||||
{{/if}}"
|
{{/if}}\
|
||||||
]
|
"
|
||||||
|
],
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,9 +14,4 @@ jobs:
|
||||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||||
call_cron_failures:
|
call_cron_failures:
|
||||||
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
|
uses: containers/podman/.github/workflows/check_cirrus_cron.yml@main
|
||||||
secrets:
|
secrets: inherit
|
||||||
SECRET_CIRRUS_API_KEY: ${{secrets.SECRET_CIRRUS_API_KEY}}
|
|
||||||
ACTION_MAIL_SERVER: ${{secrets.ACTION_MAIL_SERVER}}
|
|
||||||
ACTION_MAIL_USERNAME: ${{secrets.ACTION_MAIL_USERNAME}}
|
|
||||||
ACTION_MAIL_PASSWORD: ${{secrets.ACTION_MAIL_PASSWORD}}
|
|
||||||
ACTION_MAIL_SENDER: ${{secrets.ACTION_MAIL_SENDER}}
|
|
||||||
|
|
|
@ -25,12 +25,12 @@ jobs:
|
||||||
orphan_vms:
|
orphan_vms:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
# Avoid duplicating cron-fail_addrs.csv
|
# Avoid duplicating cron-fail_addrs.csv
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
repository: containers/podman
|
repository: containers/podman
|
||||||
path: '_podman'
|
path: '_podman'
|
||||||
|
@ -44,14 +44,14 @@ jobs:
|
||||||
GCPPROJECT: 'libpod-218412'
|
GCPPROJECT: 'libpod-218412'
|
||||||
run: |
|
run: |
|
||||||
export GCPNAME GCPJSON AWSINI GCPPROJECT
|
export GCPNAME GCPJSON AWSINI GCPPROJECT
|
||||||
export GCPPROJECTS=$(grep -E -vx '^#+.*$' $GITHUB_WORKSPACE/gcpprojects.txt | tr -s '[:space:]' ' ')
|
export GCPPROJECTS=$(egrep -vx '^#+.*$' $GITHUB_WORKSPACE/gcpprojects.txt | tr -s '[:space:]' ' ')
|
||||||
podman run --rm \
|
podman run --rm \
|
||||||
-e GCPNAME -e GCPJSON -e AWSINI -e GCPPROJECT -e GCPPROJECTS \
|
-e GCPNAME -e GCPJSON -e AWSINI -e GCPPROJECT -e GCPPROJECTS \
|
||||||
quay.io/libpod/orphanvms:latest \
|
quay.io/libpod/orphanvms:latest \
|
||||||
> /tmp/orphanvms_output.txt
|
> /tmp/orphanvms_output.txt
|
||||||
|
|
||||||
- if: always()
|
- if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: orphanvms_output
|
name: orphanvms_output
|
||||||
path: /tmp/orphanvms_output.txt
|
path: /tmp/orphanvms_output.txt
|
||||||
|
@ -59,7 +59,7 @@ jobs:
|
||||||
- name: Count number of orphaned VMs
|
- name: Count number of orphaned VMs
|
||||||
id: orphans
|
id: orphans
|
||||||
run: |
|
run: |
|
||||||
count=$(grep -E -x '\* VM .+' /tmp/orphanvms_output.txt | wc -l)
|
count=$(egrep -x '\* VM .+' /tmp/orphanvms_output.txt | wc -l)
|
||||||
# Assist with debugging job (step-outputs are otherwise hidden)
|
# Assist with debugging job (step-outputs are otherwise hidden)
|
||||||
printf "Orphan VMs count:%d\n" $count
|
printf "Orphan VMs count:%d\n" $count
|
||||||
if [[ "$count" =~ ^[0-9]+$ ]]; then
|
if [[ "$count" =~ ^[0-9]+$ ]]; then
|
||||||
|
@ -86,20 +86,20 @@ jobs:
|
||||||
- if: steps.orphans.outputs.count > 0
|
- if: steps.orphans.outputs.count > 0
|
||||||
name: Send orphan notification e-mail
|
name: Send orphan notification e-mail
|
||||||
# Ref: https://github.com/dawidd6/action-send-mail
|
# Ref: https://github.com/dawidd6/action-send-mail
|
||||||
uses: dawidd6/action-send-mail@v3.12.0
|
uses: dawidd6/action-send-mail@v3.7.2
|
||||||
with:
|
with:
|
||||||
server_address: ${{ secrets.ACTION_MAIL_SERVER }}
|
server_address: ${{ secrets.ACTION_MAIL_SERVER }}
|
||||||
server_port: 465
|
server_port: 465
|
||||||
username: ${{ secrets.ACTION_MAIL_USERNAME }}
|
username: ${{ secrets.ACTION_MAIL_USERNAME }}
|
||||||
password: ${{ secrets.ACTION_MAIL_PASSWORD }}
|
password: ${{ secrets.ACTION_MAIL_PASSWORD }}
|
||||||
subject: Orphaned CI VMs detected
|
subject: Orphaned GCP VMs
|
||||||
to: ${{env.RCPTCSV}}
|
to: ${{env.RCPTCSV}}
|
||||||
from: ${{ secrets.ACTION_MAIL_SENDER }}
|
from: ${{ secrets.ACTION_MAIL_SENDER }}
|
||||||
body: file:///tmp/email_body.txt
|
body: file:///tmp/email_body.txt
|
||||||
|
|
||||||
- if: failure()
|
- if: failure()
|
||||||
name: Send error notification e-mail
|
name: Send error notification e-mail
|
||||||
uses: dawidd6/action-send-mail@v3.12.0
|
uses: dawidd6/action-send-mail@v3.7.2
|
||||||
with:
|
with:
|
||||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||||
server_port: 465
|
server_port: 465
|
||||||
|
@ -108,4 +108,4 @@ jobs:
|
||||||
subject: Github workflow error on ${{github.repository}}
|
subject: Github workflow error on ${{github.repository}}
|
||||||
to: ${{env.RCPTCSV}}
|
to: ${{env.RCPTCSV}}
|
||||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||||
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"
|
body: "Job failed: https://github.com/${{github.repository}}/runs/${{github.job}}?check_suite_focus=true"
|
||||||
|
|
|
@ -58,7 +58,7 @@ jobs:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- if: steps.retro.outputs.is_pr == 'true'
|
- if: steps.retro.outputs.is_pr == 'true'
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
persist-credentials: false
|
persist-credentials: false
|
||||||
|
|
||||||
|
@ -132,10 +132,12 @@ jobs:
|
||||||
|
|
||||||
- if: steps.manifests.outputs.count > 0
|
- if: steps.manifests.outputs.count > 0
|
||||||
name: Post PR comment with image name/id table
|
name: Post PR comment with image name/id table
|
||||||
uses: thollander/actions-comment-pull-request@v3
|
uses: jungwinter/comment@v1.1.0
|
||||||
with:
|
with:
|
||||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
issue_number: '${{ steps.retro.outputs.prn }}'
|
||||||
message: |
|
type: 'create'
|
||||||
|
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||||
|
body: |
|
||||||
${{ env.IMAGE_TABLE }}
|
${{ env.IMAGE_TABLE }}
|
||||||
|
|
||||||
# Ref: https://github.com/marketplace/actions/deploy-to-gist
|
# Ref: https://github.com/marketplace/actions/deploy-to-gist
|
||||||
|
|
|
@ -1,3 +1,2 @@
|
||||||
*/*.json
|
*/*.json
|
||||||
/.cache
|
/.cache
|
||||||
.pre-commit-config.yaml
|
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# Ref: https://pre-commit.com/#creating-new-hooks
|
|
||||||
- id: check-imgsfx
|
|
||||||
name: Check IMG_SFX for accidental reuse.
|
|
||||||
description: |
|
|
||||||
Every PR intended to produce CI VM or container images must update
|
|
||||||
the `IMG_SFX` file via `make IMG_SFX`. The exact value will be
|
|
||||||
validated against global suffix usage (encoded as tags on the
|
|
||||||
`imgts` container image). This pre-commit hook verifies on every
|
|
||||||
push, the IMG_SFX file's value has not been pushed previously.
|
|
||||||
It's intended as a simple/imperfect way to save developers time
|
|
||||||
by avoiding force-pushes that will most certainly fail validation.
|
|
||||||
entry: ./check-imgsfx.sh
|
|
||||||
language: system
|
|
||||||
exclude: '.*' # Not examining any specific file/dir/link
|
|
||||||
always_run: true # ignore no matching files
|
|
||||||
fail_fast: true
|
|
||||||
pass_filenames: false
|
|
||||||
stages: ["pre-push"]
|
|
|
@ -0,0 +1 @@
|
||||||
|
20230613t155237z-f38f37d13
|
262
Makefile
262
Makefile
|
@ -1,7 +1,4 @@
|
||||||
|
|
||||||
# Default is sh, which has scripting limitations
|
|
||||||
SHELL := $(shell command -v bash;)
|
|
||||||
|
|
||||||
##### Functions #####
|
##### Functions #####
|
||||||
|
|
||||||
# Evaluates to $(1) if $(1) non-empty, otherwise evaluates to $(2)
|
# Evaluates to $(1) if $(1) non-empty, otherwise evaluates to $(2)
|
||||||
|
@ -18,15 +15,16 @@ if_ci_else = $(if $(findstring true,$(CI)),$(1),$(2))
|
||||||
|
|
||||||
##### Important image release and source details #####
|
##### Important image release and source details #####
|
||||||
|
|
||||||
export CENTOS_STREAM_RELEASE = 9
|
export CENTOS_STREAM_RELEASE = 8
|
||||||
|
|
||||||
# Warning: Beta Fedora releases are not supported. Verifiy EC2 AMI availability
|
export FEDORA_RELEASE = 38
|
||||||
# here: https://fedoraproject.org/cloud/download
|
export PRIOR_FEDORA_RELEASE = 37
|
||||||
export FEDORA_RELEASE = 42
|
|
||||||
export PRIOR_FEDORA_RELEASE = 41
|
|
||||||
|
|
||||||
# This should always be one-greater than $FEDORA_RELEASE (assuming it's actually the latest)
|
# This should always be one-greater than $FEDORA_RELEASE (assuming it's actually the latest)
|
||||||
export RAWHIDE_RELEASE = 43
|
export RAWHIDE_RELEASE = 39
|
||||||
|
|
||||||
|
# See import_images/README.md
|
||||||
|
export FEDORA_IMPORT_IMG_SFX = $(_IMPORT_IMG_SFX)
|
||||||
|
|
||||||
# Automation assumes the actual release number (after SID upgrade)
|
# Automation assumes the actual release number (after SID upgrade)
|
||||||
# is always one-greater than the latest DEBIAN_BASE_FAMILY (GCE image).
|
# is always one-greater than the latest DEBIAN_BASE_FAMILY (GCE image).
|
||||||
|
@ -103,6 +101,7 @@ override _HLPFMT = "%-20s %s\n"
|
||||||
|
|
||||||
# Suffix value for any images built from this make execution
|
# Suffix value for any images built from this make execution
|
||||||
_IMG_SFX ?= $(file <IMG_SFX)
|
_IMG_SFX ?= $(file <IMG_SFX)
|
||||||
|
_IMPORT_IMG_SFX ?= $(file <IMPORT_IMG_SFX)
|
||||||
|
|
||||||
# Env. vars needed by packer
|
# Env. vars needed by packer
|
||||||
export CHECKPOINT_DISABLE = 1 # Disable hashicorp phone-home
|
export CHECKPOINT_DISABLE = 1 # Disable hashicorp phone-home
|
||||||
|
@ -111,12 +110,6 @@ export PACKER_CACHE_DIR = $(call err_if_empty,_TEMPDIR)
|
||||||
# AWS CLI default, in case caller needs to override
|
# AWS CLI default, in case caller needs to override
|
||||||
export AWS := aws --output json --region us-east-1
|
export AWS := aws --output json --region us-east-1
|
||||||
|
|
||||||
# Needed for container-image builds
|
|
||||||
GIT_HEAD = $(shell git rev-parse HEAD)
|
|
||||||
|
|
||||||
# Save some typing
|
|
||||||
_IMGTS_FQIN := quay.io/libpod/imgts:c$(_IMG_SFX)
|
|
||||||
|
|
||||||
##### Targets #####
|
##### Targets #####
|
||||||
|
|
||||||
# N/B: The double-# after targets is gawk'd out as the target description
|
# N/B: The double-# after targets is gawk'd out as the target description
|
||||||
|
@ -131,39 +124,17 @@ help: ## Default target, parses special in-line comments as documentation.
|
||||||
# There are length/character limitations (a-z, 0-9, -) in GCE for image
|
# There are length/character limitations (a-z, 0-9, -) in GCE for image
|
||||||
# names and a max-length of 63.
|
# names and a max-length of 63.
|
||||||
.PHONY: IMG_SFX
|
.PHONY: IMG_SFX
|
||||||
IMG_SFX: timebomb-check ## Generate a new date-based image suffix, store in the file IMG_SFX
|
IMG_SFX: ## Generate a new date-based image suffix, store in the file IMG_SFX
|
||||||
@echo "$$(date -u +%Y%m%dt%H%M%Sz)-f$(FEDORA_RELEASE)f$(PRIOR_FEDORA_RELEASE)d$(subst .,,$(DEBIAN_RELEASE))" > "$@"
|
$(file >$@,$(shell date --utc +%Y%m%dt%H%M%Sz)-f$(FEDORA_RELEASE)f$(PRIOR_FEDORA_RELEASE)d$(subst .,,$(DEBIAN_RELEASE)))
|
||||||
@cat IMG_SFX
|
@echo "$(file <IMG_SFX)"
|
||||||
|
|
||||||
# Prevent us from wasting CI time when we have expired timebombs
|
.PHONY: IMPORT_IMG_SFX
|
||||||
.PHONY: timebomb-check
|
IMPORT_IMG_SFX: ## Generate a new date-based import-image suffix, store in the file IMPORT_IMG_SFX
|
||||||
timebomb-check:
|
$(file >$@,$(shell date --utc +%Y%m%dt%H%M%Sz)-f$(FEDORA_RELEASE)f$(PRIOR_FEDORA_RELEASE)d$(subst .,,$(DEBIAN_RELEASE)))
|
||||||
@now=$$(date -u +%Y%m%d); \
|
@echo "$(file <IMPORT_IMG_SFX)"
|
||||||
found=; \
|
|
||||||
while read -r bomb; do \
|
|
||||||
when=$$(echo "$$bomb" | sed -E -e 's/^.*timebomb ([0-9]+).*/\1/'); \
|
|
||||||
if [ "$$when" -le "$$now" ]; then \
|
|
||||||
echo "$$bomb"; \
|
|
||||||
found=found; \
|
|
||||||
fi; \
|
|
||||||
done < <(git grep --line-number '^[ ]*timebomb '); \
|
|
||||||
if [[ -n "$$found" ]]; then \
|
|
||||||
echo ""; \
|
|
||||||
echo "****** FATAL: Please check/fix expired timebomb(s) ^^^^^^"; \
|
|
||||||
false; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Given the path to a file containing 'sha256:<image id>' return <image id>
|
|
||||||
# or throw error if empty.
|
|
||||||
define imageid
|
|
||||||
$(if $(file < $(1)),$(subst sha256:,,$(file < $(1))),$(error Container IID file $(1) doesn't exist or is empty))
|
|
||||||
endef
|
|
||||||
|
|
||||||
# This is intended for use by humans, to debug the image_builder_task in .cirrus.yml
|
|
||||||
# as well as the scripts under the ci subdirectory. See the 'image_builder_debug`
|
|
||||||
# target if debugging of the packer builds is necessary.
|
|
||||||
.PHONY: ci_debug
|
.PHONY: ci_debug
|
||||||
ci_debug: $(_TEMPDIR)/ci_debug.iid ## Build and enter container for local development/debugging of container-based Cirrus-CI tasks
|
ci_debug: $(_TEMPDIR)/ci_debug.tar ## Build and enter container for local development/debugging of container-based Cirrus-CI tasks
|
||||||
/usr/bin/podman run -it --rm \
|
/usr/bin/podman run -it --rm \
|
||||||
--security-opt label=disable \
|
--security-opt label=disable \
|
||||||
-v $(_MKFILE_DIR):$(_MKFILE_DIR) -w $(_MKFILE_DIR) \
|
-v $(_MKFILE_DIR):$(_MKFILE_DIR) -w $(_MKFILE_DIR) \
|
||||||
|
@ -175,18 +146,19 @@ ci_debug: $(_TEMPDIR)/ci_debug.iid ## Build and enter container for local develo
|
||||||
-e GAC_FILEPATH=$(GAC_FILEPATH) \
|
-e GAC_FILEPATH=$(GAC_FILEPATH) \
|
||||||
-e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \
|
-e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \
|
||||||
-e TEMPDIR=$(_TEMPDIR) \
|
-e TEMPDIR=$(_TEMPDIR) \
|
||||||
$(call imageid,$<) $(if $(DBG_TEST_CMD),$(DBG_TEST_CMD),)
|
docker-archive:$<
|
||||||
|
|
||||||
# Takes 3 arguments: IID filepath, FQIN, context dir
|
# Takes 3 arguments: export filepath, FQIN, context dir
|
||||||
define podman_build
|
define podman_build
|
||||||
podman build -t $(2) \
|
podman build -t $(2) \
|
||||||
--iidfile=$(1) \
|
|
||||||
--build-arg CENTOS_STREAM_RELEASE=$(CENTOS_STREAM_RELEASE) \
|
--build-arg CENTOS_STREAM_RELEASE=$(CENTOS_STREAM_RELEASE) \
|
||||||
--build-arg PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \
|
--build-arg PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \
|
||||||
-f $(3)/Containerfile .
|
-f $(3)/Containerfile .
|
||||||
|
rm -f $(1)
|
||||||
|
podman save --quiet -o $(1) $(2)
|
||||||
endef
|
endef
|
||||||
|
|
||||||
$(_TEMPDIR)/ci_debug.iid: $(_TEMPDIR) $(wildcard ci/*)
|
$(_TEMPDIR)/ci_debug.tar: $(_TEMPDIR) $(wildcard ci/*)
|
||||||
$(call podman_build,$@,ci_debug,ci)
|
$(call podman_build,$@,ci_debug,ci)
|
||||||
|
|
||||||
$(_TEMPDIR):
|
$(_TEMPDIR):
|
||||||
|
@ -229,7 +201,7 @@ $(_TEMPDIR)/user-data: $(_TEMPDIR) $(_TEMPDIR)/cidata.ssh.pub $(_TEMPDIR)/cidata
|
||||||
cidata: $(_TEMPDIR)/user-data $(_TEMPDIR)/meta-data
|
cidata: $(_TEMPDIR)/user-data $(_TEMPDIR)/meta-data
|
||||||
|
|
||||||
define build_podman_container
|
define build_podman_container
|
||||||
$(MAKE) $(_TEMPDIR)/$(1).iid BASE_TAG=$(2)
|
$(MAKE) $(_TEMPDIR)/$(1).tar BASE_TAG=$(2)
|
||||||
endef
|
endef
|
||||||
|
|
||||||
# First argument is the path to the template JSON
|
# First argument is the path to the template JSON
|
||||||
|
@ -257,17 +229,14 @@ image_builder: image_builder/manifest.json ## Create image-building image and im
|
||||||
image_builder/manifest.json: image_builder/gce.json image_builder/setup.sh lib.sh systemd_banish.sh $(PACKER_INSTALL_DIR)/packer
|
image_builder/manifest.json: image_builder/gce.json image_builder/setup.sh lib.sh systemd_banish.sh $(PACKER_INSTALL_DIR)/packer
|
||||||
$(call packer_build,image_builder/gce.json)
|
$(call packer_build,image_builder/gce.json)
|
||||||
|
|
||||||
# Note: It's assumed there are important files in the callers $HOME
|
# Note: We assume this repo is checked out somewhere under the caller's
|
||||||
# needed for debugging (.gitconfig, .ssh keys, etc.). It's unsafe
|
# home-dir for bind-mounting purposes. Otherwise possibly necessary
|
||||||
# to assume $(_MKFILE_DIR) is also under $HOME. Both are mounted
|
# files/directories like $HOME/.gitconfig or $HOME/.ssh/ won't be available
|
||||||
# for good measure.
|
# from inside the debugging container.
|
||||||
.PHONY: image_builder_debug
|
.PHONY: image_builder_debug
|
||||||
image_builder_debug: $(_TEMPDIR)/image_builder_debug.iid ## Build and enter container for local development/debugging of targets requiring packer + virtualization
|
image_builder_debug: $(_TEMPDIR)/image_builder_debug.tar ## Build and enter container for local development/debugging of targets requiring packer + virtualization
|
||||||
/usr/bin/podman run -it --rm \
|
/usr/bin/podman run -it --rm \
|
||||||
--security-opt label=disable \
|
--security-opt label=disable -v $$HOME:$$HOME -w $(_MKFILE_DIR) \
|
||||||
-v $$HOME:$$HOME \
|
|
||||||
-v $(_MKFILE_DIR):$(_MKFILE_DIR) \
|
|
||||||
-w $(_MKFILE_DIR) \
|
|
||||||
-v $(_TEMPDIR):$(_TEMPDIR) \
|
-v $(_TEMPDIR):$(_TEMPDIR) \
|
||||||
-v $(call err_if_empty,GAC_FILEPATH):$(GAC_FILEPATH) \
|
-v $(call err_if_empty,GAC_FILEPATH):$(GAC_FILEPATH) \
|
||||||
-v $(call err_if_empty,AWS_SHARED_CREDENTIALS_FILE):$(AWS_SHARED_CREDENTIALS_FILE) \
|
-v $(call err_if_empty,AWS_SHARED_CREDENTIALS_FILE):$(AWS_SHARED_CREDENTIALS_FILE) \
|
||||||
|
@ -275,13 +244,113 @@ image_builder_debug: $(_TEMPDIR)/image_builder_debug.iid ## Build and enter cont
|
||||||
-e PACKER_INSTALL_DIR=/usr/local/bin \
|
-e PACKER_INSTALL_DIR=/usr/local/bin \
|
||||||
-e PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \
|
-e PACKER_VERSION=$(call err_if_empty,PACKER_VERSION) \
|
||||||
-e IMG_SFX=$(call err_if_empty,_IMG_SFX) \
|
-e IMG_SFX=$(call err_if_empty,_IMG_SFX) \
|
||||||
|
-e IMPORT_IMG_SFX=$(call err_if_empty,_IMPORT_IMG_SFX) \
|
||||||
-e GAC_FILEPATH=$(GAC_FILEPATH) \
|
-e GAC_FILEPATH=$(GAC_FILEPATH) \
|
||||||
-e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \
|
-e AWS_SHARED_CREDENTIALS_FILE=$(AWS_SHARED_CREDENTIALS_FILE) \
|
||||||
$(call imageid,$<) $(if $(DBG_TEST_CMD),$(DBG_TEST_CMD))
|
docker-archive:$<
|
||||||
|
|
||||||
$(_TEMPDIR)/image_builder_debug.iid: $(_TEMPDIR) $(wildcard image_builder/*)
|
$(_TEMPDIR)/image_builder_debug.tar: $(_TEMPDIR) $(wildcard image_builder/*)
|
||||||
$(call podman_build,$@,image_builder_debug,image_builder)
|
$(call podman_build,$@,image_builder_debug,image_builder)
|
||||||
|
|
||||||
|
# Avoid re-downloading unnecessarily
|
||||||
|
# Ref: https://www.gnu.org/software/make/manual/html_node/Special-Targets.html#Special-Targets
|
||||||
|
.PRECIOUS: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).$(IMPORT_FORMAT)
|
||||||
|
$(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).$(IMPORT_FORMAT): $(_TEMPDIR)
|
||||||
|
bash import_images/handle_image.sh \
|
||||||
|
$@ \
|
||||||
|
$(call err_if_empty,FEDORA_IMAGE_URL) \
|
||||||
|
$(call err_if_empty,FEDORA_CSUM_URL)
|
||||||
|
|
||||||
|
$(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).$(IMPORT_FORMAT): $(_TEMPDIR)
|
||||||
|
bash import_images/handle_image.sh \
|
||||||
|
$@ \
|
||||||
|
$(call err_if_empty,FEDORA_ARM64_IMAGE_URL) \
|
||||||
|
$(call err_if_empty,FEDORA_ARM64_CSUM_URL)
|
||||||
|
|
||||||
|
$(_TEMPDIR)/%.md5: $(_TEMPDIR)/%.$(IMPORT_FORMAT)
|
||||||
|
openssl md5 -binary $< | base64 > $@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
# MD5 metadata value checked by AWS after upload + 5 retries.
|
||||||
|
# Cache disabled to avoid sync. issues w/ vmimport service if
|
||||||
|
# image re-uploaded.
|
||||||
|
# TODO: Use sha256 from ..._CSUM_URL file instead of recalculating
|
||||||
|
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
|
||||||
|
# Avoid re-uploading unnecessarily
|
||||||
|
.SECONDARY: $(_TEMPDIR)/%.uploaded
|
||||||
|
$(_TEMPDIR)/%.uploaded: $(_TEMPDIR)/%.$(IMPORT_FORMAT) $(_TEMPDIR)/%.md5
|
||||||
|
-$(AWS) s3 rm --quiet s3://packer-image-import/%.$(IMPORT_FORMAT)
|
||||||
|
$(AWS) s3api put-object \
|
||||||
|
--content-md5 "$(file < $(_TEMPDIR)/$*.md5)" \
|
||||||
|
--content-encoding binary/octet-stream \
|
||||||
|
--cache-control no-cache \
|
||||||
|
--bucket packer-image-import \
|
||||||
|
--key $*.$(IMPORT_FORMAT) \
|
||||||
|
--body $(_TEMPDIR)/$*.$(IMPORT_FORMAT) > $@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
# For whatever reason, the 'Format' value must be all upper-case.
|
||||||
|
# Avoid creating unnecessary/duplicate import tasks
|
||||||
|
.SECONDARY: $(_TEMPDIR)/%.import_task_id
|
||||||
|
$(_TEMPDIR)/%.import_task_id: $(_TEMPDIR)/%.uploaded
|
||||||
|
$(AWS) ec2 import-snapshot \
|
||||||
|
--disk-container Format=$(shell tr '[:lower:]' '[:upper:]'<<<"$(IMPORT_FORMAT)"),UserBucket="{S3Bucket=packer-image-import,S3Key=$*.$(IMPORT_FORMAT)}" > $@.tmp.json
|
||||||
|
@cat $@.tmp.json
|
||||||
|
jq -r -e .ImportTaskId $@.tmp.json > $@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
# Avoid importing multiple snapshots for the same image
|
||||||
|
.PRECIOUS: $(_TEMPDIR)/%.snapshot_id
|
||||||
|
$(_TEMPDIR)/%.snapshot_id: $(_TEMPDIR)/%.import_task_id
|
||||||
|
bash import_images/wait_import_task.sh "$<" > $@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
define _register_sed
|
||||||
|
sed -r \
|
||||||
|
-e 's/@@@NAME@@@/$(1)/' \
|
||||||
|
-e 's/@@@IMPORT_IMG_SFX@@@/$(_IMPORT_IMG_SFX)/' \
|
||||||
|
-e 's/@@@ARCH@@@/$(2)/' \
|
||||||
|
-e 's/@@@SNAPSHOT_ID@@@/$(3)/' \
|
||||||
|
import_images/register.json.in \
|
||||||
|
> $(4)
|
||||||
|
endef
|
||||||
|
|
||||||
|
$(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).register.json: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).snapshot_id import_images/register.json.in
|
||||||
|
$(call _register_sed,fedora-aws,x86_64,$(file <$<),$@)
|
||||||
|
|
||||||
|
$(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).register.json: $(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).snapshot_id import_images/register.json.in
|
||||||
|
$(call _register_sed,fedora-aws-arm64,arm64,$(file <$<),$@)
|
||||||
|
|
||||||
|
# Avoid multiple registrations for the same image
|
||||||
|
.PRECIOUS: $(_TEMPDIR)/%.ami.id
|
||||||
|
$(_TEMPDIR)/%.ami.id: $(_TEMPDIR)/%.register.json
|
||||||
|
$(AWS) ec2 register-image --cli-input-json "$$(<$<)" > $@.tmp.json
|
||||||
|
cat $@.tmp.json
|
||||||
|
jq -r -e .ImageId $@.tmp.json > $@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
$(_TEMPDIR)/%.ami.name: $(_TEMPDIR)/%.register.json
|
||||||
|
jq -r -e .Name $< > $@.tmp
|
||||||
|
mv $@.tmp $@
|
||||||
|
|
||||||
|
$(_TEMPDIR)/%.ami.json: $(_TEMPDIR)/%.ami.id $(_TEMPDIR)/%.ami.name
|
||||||
|
$(AWS) ec2 create-tags \
|
||||||
|
--resources "$$(<$(_TEMPDIR)/$*.ami.id)" \
|
||||||
|
--tags \
|
||||||
|
Key=Name,Value=$$(<$(_TEMPDIR)/$*.ami.name) \
|
||||||
|
Key=automation,Value=false
|
||||||
|
$(AWS) --output table ec2 describe-images --image-ids "$$(<$(_TEMPDIR)/$*.ami.id)" \
|
||||||
|
| tee $@
|
||||||
|
|
||||||
|
.PHONY: import_images
|
||||||
|
import_images: $(_TEMPDIR)/fedora-aws-$(_IMPORT_IMG_SFX).ami.json $(_TEMPDIR)/fedora-aws-arm64-$(_IMPORT_IMG_SFX).ami.json import_images/manifest.json.in ## Import generic Fedora cloud images into AWS EC2.
|
||||||
|
sed -r \
|
||||||
|
-e 's/@@@IMG_SFX@@@/$(_IMPORT_IMG_SFX)/' \
|
||||||
|
-e 's/@@@CIRRUS_TASK_ID@@@/$(CIRRUS_TASK_ID)/' \
|
||||||
|
import_images/manifest.json.in \
|
||||||
|
> import_images/manifest.json
|
||||||
|
@echo "Image import(s) successful!"
|
||||||
|
|
||||||
.PHONY: base_images
|
.PHONY: base_images
|
||||||
# This needs to run in a virt/nested-virt capable environment
|
# This needs to run in a virt/nested-virt capable environment
|
||||||
base_images: base_images/manifest.json ## Create, prepare, and import base-level images into GCE.
|
base_images: base_images/manifest.json ## Create, prepare, and import base-level images into GCE.
|
||||||
|
@ -308,80 +377,77 @@ fedora_podman: ## Build Fedora podman development container
|
||||||
prior-fedora_podman: ## Build Prior-Fedora podman development container
|
prior-fedora_podman: ## Build Prior-Fedora podman development container
|
||||||
$(call build_podman_container,$@,$(PRIOR_FEDORA_RELEASE))
|
$(call build_podman_container,$@,$(PRIOR_FEDORA_RELEASE))
|
||||||
|
|
||||||
$(_TEMPDIR)/%_podman.iid: podman/Containerfile podman/setup.sh $(wildcard base_images/*.sh) $(_TEMPDIR) $(wildcard cache_images/*.sh)
|
$(_TEMPDIR)/%_podman.tar: podman/Containerfile podman/setup.sh $(wildcard base_images/*.sh) $(_TEMPDIR) $(wildcard cache_images/*.sh)
|
||||||
podman build -t $*_podman:$(call err_if_empty,_IMG_SFX) \
|
podman build -t $*_podman:$(call err_if_empty,_IMG_SFX) \
|
||||||
--security-opt seccomp=unconfined \
|
--security-opt seccomp=unconfined \
|
||||||
--iidfile=$@ \
|
|
||||||
--build-arg=BASE_NAME=$(subst prior-,,$*) \
|
--build-arg=BASE_NAME=$(subst prior-,,$*) \
|
||||||
--build-arg=BASE_TAG=$(call err_if_empty,BASE_TAG) \
|
--build-arg=BASE_TAG=$(call err_if_empty,BASE_TAG) \
|
||||||
--build-arg=PACKER_BUILD_NAME=$(subst _podman,,$*) \
|
--build-arg=PACKER_BUILD_NAME=$(subst _podman,,$*) \
|
||||||
--build-arg=IMG_SFX=$(_IMG_SFX) \
|
|
||||||
--build-arg=CIRRUS_TASK_ID=$(CIRRUS_TASK_ID) \
|
|
||||||
--build-arg=GIT_HEAD=$(call err_if_empty,GIT_HEAD) \
|
|
||||||
-f podman/Containerfile .
|
-f podman/Containerfile .
|
||||||
|
rm -f $@
|
||||||
|
podman save --quiet -o $@ $*_podman:$(_IMG_SFX)
|
||||||
|
|
||||||
.PHONY: skopeo_cidev
|
.PHONY: skopeo_cidev
|
||||||
skopeo_cidev: $(_TEMPDIR)/skopeo_cidev.iid ## Build Skopeo development and CI container
|
skopeo_cidev: $(_TEMPDIR)/skopeo_cidev.tar ## Build Skopeo development and CI container
|
||||||
$(_TEMPDIR)/skopeo_cidev.iid: $(_TEMPDIR) $(wildcard skopeo_base/*)
|
$(_TEMPDIR)/skopeo_cidev.tar: $(_TEMPDIR) $(wildcard skopeo_base/*)
|
||||||
podman build -t skopeo_cidev:$(call err_if_empty,_IMG_SFX) \
|
podman build -t skopeo_cidev:$(call err_if_empty,_IMG_SFX) \
|
||||||
--iidfile=$@ \
|
|
||||||
--security-opt seccomp=unconfined \
|
--security-opt seccomp=unconfined \
|
||||||
--build-arg=BASE_TAG=$(FEDORA_RELEASE) \
|
--build-arg=BASE_TAG=$(FEDORA_RELEASE) \
|
||||||
skopeo_cidev
|
skopeo_cidev
|
||||||
|
rm -f $@
|
||||||
|
podman save --quiet -o $@ skopeo_cidev:$(_IMG_SFX)
|
||||||
|
|
||||||
.PHONY: ccia
|
.PHONY: ccia
|
||||||
ccia: $(_TEMPDIR)/ccia.iid ## Build the Cirrus-CI Artifacts container image
|
ccia: $(_TEMPDIR)/ccia.tar ## Build the Cirrus-CI Artifacts container image
|
||||||
$(_TEMPDIR)/ccia.iid: ccia/Containerfile $(_TEMPDIR)
|
$(_TEMPDIR)/ccia.tar: ccia/Containerfile $(_TEMPDIR)
|
||||||
$(call podman_build,$@,ccia:$(call err_if_empty,_IMG_SFX),ccia)
|
$(call podman_build,$@,ccia:$(call err_if_empty,_IMG_SFX),ccia)
|
||||||
|
|
||||||
# Note: This target only builds imgts:c$(_IMG_SFX) it does not push it to
|
.PHONY: bench_stuff
|
||||||
# any container registry which may be required for targets which
|
bench_stuff: $(_TEMPDIR)/bench_stuff.tar ## Build the Cirrus-CI Artifacts container image
|
||||||
# depend on it as a base-image. In CI, pushing is handled automatically
|
$(_TEMPDIR)/bench_stuff.tar: bench_stuff/Containerfile $(_TEMPDIR)
|
||||||
# by the 'ci/make_container_images.sh' script.
|
$(call podman_build,$@,bench_stuff:$(call err_if_empty,_IMG_SFX),bench_stuff)
|
||||||
.PHONY: imgts
|
|
||||||
imgts: imgts/Containerfile imgts/entrypoint.sh imgts/google-cloud-sdk.repo imgts/lib_entrypoint.sh $(_TEMPDIR) ## Build the VM image time-stamping container image
|
.PHONY: imgts
|
||||||
$(call podman_build,/dev/null,imgts:$(call err_if_empty,_IMG_SFX),imgts)
|
imgts: $(_TEMPDIR)/imgts.tar ## Build the VM image time-stamping container image
|
||||||
-rm $(_TEMPDIR)/$@.iid
|
$(_TEMPDIR)/imgts.tar: imgts/Containerfile imgts/entrypoint.sh imgts/google-cloud-sdk.repo imgts/lib_entrypoint.sh $(_TEMPDIR)
|
||||||
|
$(call podman_build,$@,imgts:$(call err_if_empty,_IMG_SFX),imgts)
|
||||||
|
|
||||||
# Helper function to build images which depend on imgts:latest base image
|
|
||||||
# N/B: There is no make dependency resolution on imgts.iid on purpose,
|
|
||||||
# imgts:c$(_IMG_SFX) is assumed to have already been pushed to quay.
|
|
||||||
# See imgts target above.
|
|
||||||
define imgts_base_podman_build
|
define imgts_base_podman_build
|
||||||
podman image exists $(_IMGTS_FQIN) || podman pull $(_IMGTS_FQIN)
|
podman load -i $(_TEMPDIR)/imgts.tar
|
||||||
podman image exists imgts:latest || podman tag $(_IMGTS_FQIN) imgts:latest
|
podman tag imgts:$(call err_if_empty,_IMG_SFX) imgts:latest
|
||||||
$(call podman_build,$@,$(1):$(call err_if_empty,_IMG_SFX),$(1))
|
$(call podman_build,$@,$(1):$(call err_if_empty,_IMG_SFX),$(1))
|
||||||
endef
|
endef
|
||||||
|
|
||||||
.PHONY: imgobsolete
|
.PHONY: imgobsolete
|
||||||
imgobsolete: $(_TEMPDIR)/imgobsolete.iid ## Build the VM Image obsoleting container image
|
imgobsolete: $(_TEMPDIR)/imgobsolete.tar ## Build the VM Image obsoleting container image
|
||||||
$(_TEMPDIR)/imgobsolete.iid: imgts/lib_entrypoint.sh imgobsolete/Containerfile imgobsolete/entrypoint.sh $(_TEMPDIR)
|
$(_TEMPDIR)/imgobsolete.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh imgobsolete/Containerfile imgobsolete/entrypoint.sh $(_TEMPDIR)
|
||||||
$(call imgts_base_podman_build,imgobsolete)
|
$(call imgts_base_podman_build,imgobsolete)
|
||||||
|
|
||||||
.PHONY: imgprune
|
.PHONY: imgprune
|
||||||
imgprune: $(_TEMPDIR)/imgprune.iid ## Build the VM Image pruning container image
|
imgprune: $(_TEMPDIR)/imgprune.tar ## Build the VM Image pruning container image
|
||||||
$(_TEMPDIR)/imgprune.iid: imgts/lib_entrypoint.sh imgprune/Containerfile imgprune/entrypoint.sh $(_TEMPDIR)
|
$(_TEMPDIR)/imgprune.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh imgprune/Containerfile imgprune/entrypoint.sh $(_TEMPDIR)
|
||||||
$(call imgts_base_podman_build,imgprune)
|
$(call imgts_base_podman_build,imgprune)
|
||||||
|
|
||||||
.PHONY: gcsupld
|
.PHONY: gcsupld
|
||||||
gcsupld: $(_TEMPDIR)/gcsupld.iid ## Build the GCS Upload container image
|
gcsupld: $(_TEMPDIR)/gcsupld.tar ## Build the GCS Upload container image
|
||||||
$(_TEMPDIR)/gcsupld.iid: imgts/lib_entrypoint.sh gcsupld/Containerfile gcsupld/entrypoint.sh $(_TEMPDIR)
|
$(_TEMPDIR)/gcsupld.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh gcsupld/Containerfile gcsupld/entrypoint.sh $(_TEMPDIR)
|
||||||
$(call imgts_base_podman_build,gcsupld)
|
$(call imgts_base_podman_build,gcsupld)
|
||||||
|
|
||||||
.PHONY: orphanvms
|
.PHONY: orphanvms
|
||||||
orphanvms: $(_TEMPDIR)/orphanvms.iid ## Build the Orphaned VM container image
|
orphanvms: $(_TEMPDIR)/orphanvms.tar ## Build the Orphaned VM container image
|
||||||
$(_TEMPDIR)/orphanvms.iid: imgts/lib_entrypoint.sh orphanvms/Containerfile orphanvms/entrypoint.sh orphanvms/_gce orphanvms/_ec2 $(_TEMPDIR)
|
$(_TEMPDIR)/orphanvms.tar: $(_TEMPDIR)/imgts.tar imgts/lib_entrypoint.sh orphanvms/Containerfile orphanvms/entrypoint.sh orphanvms/_gce orphanvms/_ec2 $(_TEMPDIR)
|
||||||
$(call imgts_base_podman_build,orphanvms)
|
$(call imgts_base_podman_build,orphanvms)
|
||||||
|
|
||||||
.PHONY: .get_ci_vm
|
.PHONY: .get_ci_vm
|
||||||
get_ci_vm: $(_TEMPDIR)/get_ci_vm.iid ## Build the get_ci_vm container image
|
get_ci_vm: $(_TEMPDIR)/get_ci_vm.tar ## Build the get_ci_vm container image
|
||||||
$(_TEMPDIR)/get_ci_vm.iid: lib.sh get_ci_vm/Containerfile get_ci_vm/entrypoint.sh get_ci_vm/setup.sh $(_TEMPDIR)
|
$(_TEMPDIR)/get_ci_vm.tar: lib.sh get_ci_vm/Containerfile get_ci_vm/entrypoint.sh get_ci_vm/setup.sh $(_TEMPDIR)
|
||||||
podman build --iidfile=$@ -t get_ci_vm:$(call err_if_empty,_IMG_SFX) -f get_ci_vm/Containerfile ./
|
podman build -t get_ci_vm:$(call err_if_empty,_IMG_SFX) -f get_ci_vm/Containerfile .
|
||||||
|
rm -f $@
|
||||||
|
podman save --quiet -o $@ get_ci_vm:$(_IMG_SFX)
|
||||||
|
|
||||||
.PHONY: clean
|
.PHONY: clean
|
||||||
clean: ## Remove all generated files referenced in this Makefile
|
clean: ## Remove all generated files referenced in this Makefile
|
||||||
-rm -rf $(_TEMPDIR)
|
-rm -rf $(_TEMPDIR)
|
||||||
-rm -f image_builder/*.json
|
-rm -f image_builder/*.json
|
||||||
-rm -f *_images/{*.json,cidata*,*-data}
|
-rm -f *_images/{*.json,cidata*,*-data}
|
||||||
-podman rmi imgts:latest
|
-rm -f ci_debug.tar
|
||||||
-podman rmi $(_IMGTS_FQIN)
|
|
||||||
|
|
|
@ -1,108 +0,0 @@
|
||||||
The README here is waaaaaay too complicated for Ed. So here is a
|
|
||||||
simplified version of the typical things you need to do.
|
|
||||||
|
|
||||||
Super Duper Simplest Case
|
|
||||||
=========================
|
|
||||||
|
|
||||||
This is by far the most common case, and the simplest to understand.
|
|
||||||
You do this when you want to build VMs with newer package versions than
|
|
||||||
whatever VMs are currently set up in CI. You really need to
|
|
||||||
understand this before you get into anything more complicated.
|
|
||||||
```
|
|
||||||
$ git checkout -b lets-see-what-happens
|
|
||||||
$ make IMG_SFX
|
|
||||||
$ git commit -asm"Let's just see what happens"
|
|
||||||
```
|
|
||||||
...and push that as a PR.
|
|
||||||
|
|
||||||
If you're lucky, in about an hour you will get an email from `github-actions[bot]`
|
|
||||||
with a nice table of base and cache images, with links. I strongly encourage you
|
|
||||||
to try to get Ed's
|
|
||||||
[cirrus-vm-get-versions](https://github.com/edsantiago/containertools/tree/main/cirrus-vm-get-versions)
|
|
||||||
script working, because this will give you a very quick easy reliable
|
|
||||||
list of what packages have changed. You don't need this, but life will be painful
|
|
||||||
for you without it.
|
|
||||||
|
|
||||||
(If you're not lucky, the build will break. There are infinite ways for
|
|
||||||
this to happen, so you're on your own here. Ask for help! This is a great
|
|
||||||
team, and one or more people may quickly realize the problem.)
|
|
||||||
|
|
||||||
Once you have new VMs built, **test in an actual project**! Usually podman
|
|
||||||
and buildah, but you may want the varks too:
|
|
||||||
```
|
|
||||||
$ cd ~/src/github/containers/podman ! or wherever
|
|
||||||
$ git checkout -b test-new-vms
|
|
||||||
$ vim .cirrus.yml
|
|
||||||
[ search for "c202", and replace with your new IMG_SFX.]
|
|
||||||
[ Don't forget the leading "c"! ]
|
|
||||||
$ git commit -as
|
|
||||||
[ Please include a link to the automation_images PR! ]
|
|
||||||
```
|
|
||||||
Push this PR and see what happens. If you're very lucky, it will
|
|
||||||
pass on this and other repos. Get your podman/buildah/vark PRs
|
|
||||||
reviewed and merged, and then review-merge the automation_images one.
|
|
||||||
|
|
||||||
Pushing (har har) Your Luck
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
Feel lucky? Tag this VM build, so `dependabot` will create PRs
|
|
||||||
on all the myriad container repos:
|
|
||||||
```
|
|
||||||
$ git tag $(<IMG_SFX)
|
|
||||||
$ git push --no-verify upstream $(<IMG_SFX)
|
|
||||||
```
|
|
||||||
|
|
||||||
Within a few hours you'll see a ton of PRs. It is very likely that
|
|
||||||
something will go wrong in one or two, and if so, it's impossible to
|
|
||||||
cover all possibilities. As above, ask for help.
|
|
||||||
|
|
||||||
More Complicated Cases
|
|
||||||
======================
|
|
||||||
|
|
||||||
These are the next two most common.
|
|
||||||
|
|
||||||
Bumping One Package
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Quite often we need an emergency bump of only one package that
|
|
||||||
is not yet stable. Here are examples of the two most typical
|
|
||||||
cases,
|
|
||||||
[crun](https://github.com/containers/automation_images/pull/386/files) and
|
|
||||||
[pasta](https://github.com/containers/automation_images/pull/383/files).
|
|
||||||
Note the `timebomb` directives. Please use these: the time you save
|
|
||||||
may be your own, one future day. And please use 2-6 week times.
|
|
||||||
A timebomb that expires in a year is going to be hard to understand
|
|
||||||
when it goes off.
|
|
||||||
|
|
||||||
Bumping Distros
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Like Fedora 40 to 41. Edit `Makefile`. Change `FEDORA`, `PRIOR_FEDORA`,
|
|
||||||
and `RAWHIDE`, then proceed with Simple Case.
|
|
||||||
|
|
||||||
There is almost zero chance that this will work on the first try.
|
|
||||||
Sorry, that's just the way it is. See the
|
|
||||||
[F40 to F41 PR](https://github.com/containers/automation_images/pull/392/files)
|
|
||||||
for a not-atypical example.
|
|
||||||
|
|
||||||
|
|
||||||
STRONG RECOMMENDATION
|
|
||||||
=====================
|
|
||||||
|
|
||||||
Read [check-imgsfx.sh](check-imgsfx.sh) and follow its instructions. Ed
|
|
||||||
likes to copy that to `.git/hooks/pre-push`, Chris likes using some
|
|
||||||
external tool that Ed doesn't trust. Use your judgment.
|
|
||||||
|
|
||||||
The reason for this is that you are going to forget to `make IMG_SFX`
|
|
||||||
one day, and then you're going to `git push --force` an update and walk
|
|
||||||
away, and come back to a failed run because `IMG_SFX` must always
|
|
||||||
always always be brand new.
|
|
||||||
|
|
||||||
|
|
||||||
Weak Recommendation
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
Ed likes to fiddle with `IMG_SFX`, zeroing out to the nearest
|
|
||||||
quarter hour. Absolutely unnecessary, but easier on the eyes
|
|
||||||
when trying to see which VMs are in use or when comparing
|
|
||||||
diffs.
|
|
60
README.md
60
README.md
|
@ -52,7 +52,7 @@ However, all steps are listed below for completeness.
|
||||||
For more information on the overall process of importing custom GCE VM
|
For more information on the overall process of importing custom GCE VM
|
||||||
Images, please [refer to the documentation](https://cloud.google.com/compute/docs/import/import-existing-image). For references to the latest pre-build AWS
|
Images, please [refer to the documentation](https://cloud.google.com/compute/docs/import/import-existing-image). For references to the latest pre-build AWS
|
||||||
EC2 Fedora AMI's see [the
|
EC2 Fedora AMI's see [the
|
||||||
upstream cloud page](https://fedoraproject.org/cloud/download).
|
upstream cloud page](https://alt.fedoraproject.org/cloud/).
|
||||||
For more information on the primary tool (*packer*) used for this process,
|
For more information on the primary tool (*packer*) used for this process,
|
||||||
please [see it's documentation page](https://www.packer.io/docs).
|
please [see it's documentation page](https://www.packer.io/docs).
|
||||||
|
|
||||||
|
@ -264,11 +264,13 @@ then automatically pushed to:
|
||||||
|
|
||||||
* https://quay.io/repository/libpod/fedora_podman
|
* https://quay.io/repository/libpod/fedora_podman
|
||||||
* https://quay.io/repository/libpod/prior-fedora_podman
|
* https://quay.io/repository/libpod/prior-fedora_podman
|
||||||
|
* https://quay.io/repository/libpod/debian_podman
|
||||||
|
|
||||||
The meaning of *prior* and *current*, is defined by the contents of
|
The meaning of *prior* and *current*, is defined by the contents of
|
||||||
the `*_RELEASE` values in the `Makefile`. The images will be tagged
|
the `*_release` files within the `podman` subdirectory. This is
|
||||||
with the value within the `IMG_SFX` file. Additionally, the most
|
necessary to support the Makefile target being used manually
|
||||||
recently merged PR on this repo will tag its images `latest`.
|
(e.g. debugging). These files must be updated manually when introducing
|
||||||
|
a new VM image version.
|
||||||
|
|
||||||
|
|
||||||
### Tooling
|
### Tooling
|
||||||
|
@ -290,7 +292,8 @@ the following are built:
|
||||||
|
|
||||||
In all cases, when automation runs on a branch (i.e. after a PR is merged)
|
In all cases, when automation runs on a branch (i.e. after a PR is merged)
|
||||||
the actual image tagged `latest` will be pushed. When running in a PR,
|
the actual image tagged `latest` will be pushed. When running in a PR,
|
||||||
only validation and test images are produced.
|
only validation and test images are produced. This behavior is controled
|
||||||
|
by a combination of the `$PUSH_LATEST` and `$CIRRUS_PR` variables.
|
||||||
|
|
||||||
|
|
||||||
## The Base Images (overview step 3)
|
## The Base Images (overview step 3)
|
||||||
|
@ -374,11 +377,10 @@ infinite-growth of the VM image count.
|
||||||
|
|
||||||
# Debugging / Locally driving VM Image production
|
# Debugging / Locally driving VM Image production
|
||||||
|
|
||||||
Much of the CI and image-build process is containerized, so it may be debugged
|
Because the entire automated build process is containerized, it may easily be
|
||||||
locally on your laptop/workstation. However, this process will
|
performed locally on your laptop/workstation. However, this process will
|
||||||
still involve interfacing with GCE and AWS. Therefore, you must be in possession
|
still involve interfacing with GCE and AWS. Therefore, you must be in possession
|
||||||
of a *Google Application Credentials* (GAC) JSON and
|
of a *Google Application Credentials* (GAC) JSON and AWS credentials INI file.
|
||||||
[AWS credentials INI file](https://docs.aws.amazon.com/sdkref/latest/guide/file-format.html#file-format-creds).
|
|
||||||
|
|
||||||
The GAC JSON file should represent a service account (contrasted to a user account,
|
The GAC JSON file should represent a service account (contrasted to a user account,
|
||||||
which always uses OAuth2). The name of the service account doesn't matter,
|
which always uses OAuth2). The name of the service account doesn't matter,
|
||||||
|
@ -399,52 +401,44 @@ one the following (custom) IAM policies enabled:
|
||||||
Somebody familiar with Google and AWS IAM will need to provide you with the
|
Somebody familiar with Google and AWS IAM will need to provide you with the
|
||||||
credential files and ensure correct account configuration. Having these files
|
credential files and ensure correct account configuration. Having these files
|
||||||
stored *in your home directory* on your laptop/workstation, the process of
|
stored *in your home directory* on your laptop/workstation, the process of
|
||||||
building and entering the debug containers is as follows:
|
producing images proceeds as follows:
|
||||||
|
|
||||||
1. Ensure you have podman installed, and lots of available network and CPU
|
1. Ensure you have podman installed, and lots of available network and CPU
|
||||||
resources (i.e. turn off YouTube, shut down background VMs and other hungry
|
resources (i.e. turn off YouTube, shut down background VMs and other hungry
|
||||||
tasks).
|
tasks). Build the image-builder container image, by executing
|
||||||
|
|
||||||
2. Build and enter either the `ci_debug` or the `image_builder_debug` container
|
|
||||||
image, by executing:
|
|
||||||
```
|
```
|
||||||
make <ci_debug|image_builder_debug> \
|
make image_builder_debug GAC_FILEPATH=</home/path/to/gac.json> \
|
||||||
GAC_FILEPATH=</home/path/to/gac.json> \
|
AWS_SHARED_CREDENTIALS_FILE=</path/to/credentials>
|
||||||
AWS_SHARED_CREDENTIALS_FILE=</path/to/credentials>
|
|
||||||
```
|
```
|
||||||
|
|
||||||
* The `ci_debug` image is significantly smaller, and only intended for rudimentary
|
2. You will be dropped into a debugging container, inside a volume-mount of
|
||||||
cases, for example running the scripts under the `ci` subdirectory.
|
the repository root. This container is practically identical to the VM
|
||||||
* The `image_builder_debug` image is larger, and has KVM virtualization enabled.
|
produced and used in *overview step 1*. If changes are made, the container
|
||||||
It's needed for more extensive debugging of the packer-based image builds.
|
image should be re-built to reflect them.
|
||||||
|
|
||||||
3. Both containers will place you in the default shell, inside a volume-mount of
|
3. If you wish to build only a subset of available images, list the names
|
||||||
the repository root. This environment is practically identical to what is
|
you want as comma-separated values of the `PACKER_BUILDS` variable. Be
|
||||||
used in Cirrus-CI.
|
sure you *export* this variable so that `make` has access to it. For
|
||||||
|
example, `export PACKER_BUILDS=debian,prior-fedora`.
|
||||||
|
|
||||||
4. For the `image_builder_debug` container, If you wish to build only a subset
|
4. Still within the container, again ensure you have plenty of network and CPU
|
||||||
of available images, list the names you want as comma-separated values of the
|
|
||||||
`PACKER_BUILDS` variable. Be sure you *export* this variable so that `make`
|
|
||||||
has access to it. For example, `export PACKER_BUILDS=debian,prior-fedora`.
|
|
||||||
|
|
||||||
5. Still within the container, again ensure you have plenty of network and CPU
|
|
||||||
resources available. Build the VM Base images by executing the command
|
resources available. Build the VM Base images by executing the command
|
||||||
``make base_images``. This is the equivalent operation as documented by
|
``make base_images``. This is the equivalent operation as documented by
|
||||||
*overview step 2*. ***N/B*** The GCS -> GCE image conversion can take
|
*overview step 2*. ***N/B*** The GCS -> GCE image conversion can take
|
||||||
some time, be patient. Packer may not produce any output for several minutes
|
some time, be patient. Packer may not produce any output for several minutes
|
||||||
while the conversion is happening.
|
while the conversion is happening.
|
||||||
|
|
||||||
6. When successful, the names of the produced images will all be referenced
|
5. When successful, the names of the produced images will all be referenced
|
||||||
in the `base_images/manifest.json` file. If there are problems, fix them
|
in the `base_images/manifest.json` file. If there are problems, fix them
|
||||||
and remove the `manifest.json` file. Then re-run the same *make* command
|
and remove the `manifest.json` file. Then re-run the same *make* command
|
||||||
as before, packer will force-overwrite any broken/partially created
|
as before, packer will force-overwrite any broken/partially created
|
||||||
images automatically.
|
images automatically.
|
||||||
|
|
||||||
7. Produce the VM Cache Images, equivalent to the operations outlined
|
6. Produce the VM Cache Images, equivalent to the operations outlined
|
||||||
in *overview step 3*. Execute the following command (still within the
|
in *overview step 3*. Execute the following command (still within the
|
||||||
debug image-builder container): ``make cache_images``.
|
debug image-builder container): ``make cache_images``.
|
||||||
|
|
||||||
8. Again when successful, you will find the image names are written into
|
7. Again when successful, you will find the image names are written into
|
||||||
the `cache_images/manifest.json` file. If there is a problem, remove
|
the `cache_images/manifest.json` file. If there is a problem, remove
|
||||||
this file, fix the problem, and re-run the `make` command. No cleanup
|
this file, fix the problem, and re-run the `make` command. No cleanup
|
||||||
is necessary, leftover/disused images will be automatically cleaned up
|
is necessary, leftover/disused images will be automatically cleaned up
|
||||||
|
|
|
@ -26,6 +26,8 @@ variables: # Empty value means it must be passed in on command-line
|
||||||
PRIOR_FEDORA_IMAGE_URL: "{{env `PRIOR_FEDORA_IMAGE_URL`}}"
|
PRIOR_FEDORA_IMAGE_URL: "{{env `PRIOR_FEDORA_IMAGE_URL`}}"
|
||||||
PRIOR_FEDORA_CSUM_URL: "{{env `PRIOR_FEDORA_CSUM_URL`}}"
|
PRIOR_FEDORA_CSUM_URL: "{{env `PRIOR_FEDORA_CSUM_URL`}}"
|
||||||
|
|
||||||
|
FEDORA_IMPORT_IMG_SFX: "{{env `FEDORA_IMPORT_IMG_SFX`}}"
|
||||||
|
|
||||||
DEBIAN_RELEASE: "{{env `DEBIAN_RELEASE`}}"
|
DEBIAN_RELEASE: "{{env `DEBIAN_RELEASE`}}"
|
||||||
DEBIAN_BASE_FAMILY: "{{env `DEBIAN_BASE_FAMILY`}}"
|
DEBIAN_BASE_FAMILY: "{{env `DEBIAN_BASE_FAMILY`}}"
|
||||||
|
|
||||||
|
@ -61,7 +63,6 @@ builders:
|
||||||
type: 'qemu'
|
type: 'qemu'
|
||||||
accelerator: "kvm"
|
accelerator: "kvm"
|
||||||
qemu_binary: '/usr/libexec/qemu-kvm' # Unique to CentOS, not fedora :(
|
qemu_binary: '/usr/libexec/qemu-kvm' # Unique to CentOS, not fedora :(
|
||||||
memory: 12288
|
|
||||||
iso_url: '{{user `FEDORA_IMAGE_URL`}}'
|
iso_url: '{{user `FEDORA_IMAGE_URL`}}'
|
||||||
disk_image: true
|
disk_image: true
|
||||||
format: "raw"
|
format: "raw"
|
||||||
|
@ -74,12 +75,12 @@ builders:
|
||||||
headless: true
|
headless: true
|
||||||
# qemu_binary: "/usr/libexec/qemu-kvm"
|
# qemu_binary: "/usr/libexec/qemu-kvm"
|
||||||
qemuargs: # List-of-list format required to override packer-generated args
|
qemuargs: # List-of-list format required to override packer-generated args
|
||||||
- - "-display"
|
- - "-m"
|
||||||
- "none"
|
- "1024"
|
||||||
- - "-device"
|
- - "-device"
|
||||||
- "virtio-rng-pci"
|
- "virtio-rng-pci"
|
||||||
- - "-chardev"
|
- - "-chardev"
|
||||||
- "file,id=pts,path={{user `TTYDEV`}}"
|
- "tty,id=pts,path={{user `TTYDEV`}}"
|
||||||
- - "-device"
|
- - "-device"
|
||||||
- "isa-serial,chardev=pts"
|
- "isa-serial,chardev=pts"
|
||||||
- - "-netdev"
|
- - "-netdev"
|
||||||
|
@ -107,18 +108,20 @@ builders:
|
||||||
- &fedora-aws
|
- &fedora-aws
|
||||||
name: 'fedora-aws'
|
name: 'fedora-aws'
|
||||||
type: 'amazon-ebs'
|
type: 'amazon-ebs'
|
||||||
source_ami_filter:
|
source_ami_filter: # Will fail if >1 or no AMI found
|
||||||
# Many of these search filter values (like account ID and name) aren't publicized
|
|
||||||
# anywhere. They were found by examining AWS EC2 AMIs published/referenced from
|
|
||||||
# the AWS sections on https://fedoraproject.org/cloud/download
|
|
||||||
owners:
|
owners:
|
||||||
- &fedora_accountid 125523088429
|
# Docs are wrong, specifying the Account ID required to make AMIs private.
|
||||||
most_recent: true # Required b/c >1 search result likely to be returned
|
# The Account ID is hard-coded here out of expediency, since passing in
|
||||||
|
# more packer args from the command-line (in Makefile) is non-trivial.
|
||||||
|
- &accountid '449134212816'
|
||||||
|
# It's necessary to 'search' for the base-image by these criteria. If
|
||||||
|
# more than one image is found, Packer will fail the build (and display
|
||||||
|
# the conflicting AMI IDs).
|
||||||
filters: &ami_filters
|
filters: &ami_filters
|
||||||
architecture: 'x86_64'
|
architecture: 'x86_64'
|
||||||
image-type: 'machine'
|
image-type: 'machine'
|
||||||
is-public: 'true'
|
is-public: 'false'
|
||||||
name: 'Fedora-Cloud-Base*-{{user `FEDORA_RELEASE`}}-*'
|
name: '{{build_name}}-i{{user `FEDORA_IMPORT_IMG_SFX`}}'
|
||||||
root-device-type: 'ebs'
|
root-device-type: 'ebs'
|
||||||
state: 'available'
|
state: 'available'
|
||||||
virtualization-type: 'hvm'
|
virtualization-type: 'hvm'
|
||||||
|
@ -142,6 +145,7 @@ builders:
|
||||||
volume_type: 'gp2'
|
volume_type: 'gp2'
|
||||||
delete_on_termination: true
|
delete_on_termination: true
|
||||||
# These are critical and used by security-polciy to enforce instance launch limits.
|
# These are critical and used by security-polciy to enforce instance launch limits.
|
||||||
|
|
||||||
tags: &awstags
|
tags: &awstags
|
||||||
<<: *imgcpylabels
|
<<: *imgcpylabels
|
||||||
# EC2 expects "Name" to be capitalized
|
# EC2 expects "Name" to be capitalized
|
||||||
|
@ -155,7 +159,7 @@ builders:
|
||||||
# This is necessary for security - The CI service accounts are not permitted
|
# This is necessary for security - The CI service accounts are not permitted
|
||||||
# to use AMI's from any other account, including public ones.
|
# to use AMI's from any other account, including public ones.
|
||||||
ami_users:
|
ami_users:
|
||||||
- &accountid '449134212816'
|
- *accountid
|
||||||
ssh_username: 'fedora'
|
ssh_username: 'fedora'
|
||||||
ssh_clear_authorized_keys: true
|
ssh_clear_authorized_keys: true
|
||||||
# N/B: Required Packer >= 1.8.0
|
# N/B: Required Packer >= 1.8.0
|
||||||
|
@ -166,8 +170,7 @@ builders:
|
||||||
name: 'fedora-aws-arm64'
|
name: 'fedora-aws-arm64'
|
||||||
source_ami_filter:
|
source_ami_filter:
|
||||||
owners:
|
owners:
|
||||||
- *fedora_accountid
|
- *accountid
|
||||||
most_recent: true # Required b/c >1 search result likely to be returned
|
|
||||||
filters:
|
filters:
|
||||||
<<: *ami_filters
|
<<: *ami_filters
|
||||||
architecture: 'arm64'
|
architecture: 'arm64'
|
||||||
|
@ -184,23 +187,23 @@ provisioners: # Debian images come bundled with GCE integrations provisioned
|
||||||
- type: 'shell'
|
- type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- 'mkdir -p /var/tmp/automation_images'
|
- 'mkdir -p /tmp/automation_images'
|
||||||
|
|
||||||
- type: 'file'
|
- type: 'file'
|
||||||
source: '{{ pwd }}/'
|
source: '{{ pwd }}/'
|
||||||
destination: '/var/tmp/automation_images/'
|
destination: '/tmp/automation_images/'
|
||||||
|
|
||||||
- except: ['debian']
|
- except: ['debian']
|
||||||
type: 'shell'
|
type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- '/bin/bash /var/tmp/automation_images/base_images/fedora_base-setup.sh'
|
- '/bin/bash /tmp/automation_images/base_images/fedora_base-setup.sh'
|
||||||
|
|
||||||
- only: ['debian']
|
- only: ['debian']
|
||||||
type: 'shell'
|
type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- 'env DEBIAN_FRONTEND=noninteractive DEBIAN_RELEASE={{user `DEBIAN_RELEASE`}} /bin/bash /var/tmp/automation_images/base_images/debian_base-setup.sh'
|
- 'env DEBIAN_FRONTEND=noninteractive /bin/bash /tmp/automation_images/base_images/debian_base-setup.sh'
|
||||||
|
|
||||||
post-processors:
|
post-processors:
|
||||||
# Must be double-nested to guarantee execution order
|
# Must be double-nested to guarantee execution order
|
||||||
|
|
|
@ -16,15 +16,6 @@ REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||||
# shellcheck source=./lib.sh
|
# shellcheck source=./lib.sh
|
||||||
source "$REPO_DIRPATH/lib.sh"
|
source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
# Cloud-networking in general can sometimes be flaky.
|
|
||||||
# Increase Apt's tolerance levels.
|
|
||||||
cat << EOF | $SUDO tee -a /etc/apt/apt.conf.d/99timeouts
|
|
||||||
// Added during CI VM image build
|
|
||||||
Acquire::Retries "3";
|
|
||||||
Acquire::http::timeout "300";
|
|
||||||
Acquire::https::timeout "300";
|
|
||||||
EOF
|
|
||||||
|
|
||||||
echo "Switch sources to Debian Unstable (SID)"
|
echo "Switch sources to Debian Unstable (SID)"
|
||||||
cat << EOF | $SUDO tee /etc/apt/sources.list
|
cat << EOF | $SUDO tee /etc/apt/sources.list
|
||||||
deb http://deb.debian.org/debian/ unstable main
|
deb http://deb.debian.org/debian/ unstable main
|
||||||
|
@ -37,6 +28,7 @@ PKGS=( \
|
||||||
curl
|
curl
|
||||||
cloud-init
|
cloud-init
|
||||||
gawk
|
gawk
|
||||||
|
git
|
||||||
openssh-client
|
openssh-client
|
||||||
openssh-server
|
openssh-server
|
||||||
rng-tools5
|
rng-tools5
|
||||||
|
@ -44,21 +36,9 @@ PKGS=( \
|
||||||
)
|
)
|
||||||
|
|
||||||
echo "Updating package source lists"
|
echo "Updating package source lists"
|
||||||
( set -x; $SUDO apt-get -q -y update; )
|
( set -x; $SUDO apt-get -qq -y update; )
|
||||||
|
|
||||||
# Only deps for automation tooling
|
|
||||||
( set -x; $SUDO apt-get -q -y install git )
|
|
||||||
install_automation_tooling
|
|
||||||
# Ensure automation library is loaded
|
|
||||||
source "$REPO_DIRPATH/lib.sh"
|
|
||||||
|
|
||||||
# Workaround 12->13 forward-incompatible change in grub scripts.
|
|
||||||
# Without this, updating to the SID kernel may fail.
|
|
||||||
echo "Upgrading grub-common"
|
|
||||||
( set -x; $SUDO apt-get -q -y upgrade grub-common; )
|
|
||||||
|
|
||||||
echo "Upgrading to SID"
|
echo "Upgrading to SID"
|
||||||
( set -x; $SUDO apt-get -q -y full-upgrade; )
|
( set -x; $SUDO apt-get -qq -y full-upgrade; )
|
||||||
echo "Installing basic, necessary packages."
|
echo "Installing basic, necessary packages."
|
||||||
( set -x; $SUDO apt-get -q -y install "${PKGS[@]}"; )
|
( set -x; $SUDO apt-get -q -y install "${PKGS[@]}"; )
|
||||||
|
|
||||||
|
@ -67,15 +47,21 @@ echo "Installing basic, necessary packages."
|
||||||
dpkg-reconfigure dash; )
|
dpkg-reconfigure dash; )
|
||||||
|
|
||||||
# Ref: https://wiki.debian.org/DebianReleases
|
# Ref: https://wiki.debian.org/DebianReleases
|
||||||
# CI automation needs an OS version/release number for a variety of uses.
|
# CI automation needs a *sortable* OS version/release number to select/perform/apply
|
||||||
# However, After switching to Unstable/SID, the value from the usual source
|
# runtime configuration and workarounds. Since switching to Unstable/SID, a
|
||||||
# is not available. Simply use the value passed through packer by the Makefile.
|
# numeric release version is not available. While an imperfect solution,
|
||||||
req_env_vars DEBIAN_RELEASE
|
# base an artificial version off the 'base-files' package version, right-padded with
|
||||||
# shellcheck disable=SC2154
|
# zeros to ensure sortability (i.e. "12.02" < "12.13").
|
||||||
warn "Setting '$DEBIAN_RELEASE' as the release number for CI-automation purposes."
|
base_files_version=$(dpkg -s base-files | awk '/Version:/{print $2}')
|
||||||
( set -x; echo "VERSION_ID=\"$DEBIAN_RELEASE\"" | \
|
base_major=$(cut -d. -f 1 <<<"$base_files_version")
|
||||||
|
base_minor=$(cut -d. -f 2 <<<"$base_files_version")
|
||||||
|
sortable_version=$(printf "%02d.%02d" $base_major $base_minor)
|
||||||
|
echo "WARN: This is NOT an official version number. It's for CI-automation purposes only."
|
||||||
|
( set -x; echo "VERSION_ID=\"$sortable_version\"" | \
|
||||||
$SUDO tee -a /etc/os-release; )
|
$SUDO tee -a /etc/os-release; )
|
||||||
|
|
||||||
|
install_automation_tooling
|
||||||
|
|
||||||
if ! ((CONTAINER)); then
|
if ! ((CONTAINER)); then
|
||||||
custom_cloud_init
|
custom_cloud_init
|
||||||
( set -x; $SUDO systemctl enable rngd; )
|
( set -x; $SUDO systemctl enable rngd; )
|
||||||
|
|
|
@ -18,6 +18,7 @@ source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
declare -a PKGS
|
declare -a PKGS
|
||||||
PKGS=(rng-tools git coreutils cloud-init)
|
PKGS=(rng-tools git coreutils cloud-init)
|
||||||
|
XARGS=--disablerepo=updates
|
||||||
if ! ((CONTAINER)); then
|
if ! ((CONTAINER)); then
|
||||||
# Packer defines this automatically for us
|
# Packer defines this automatically for us
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
|
@ -29,28 +30,20 @@ if ! ((CONTAINER)); then
|
||||||
if ((OS_RELEASE_VER<35)); then
|
if ((OS_RELEASE_VER<35)); then
|
||||||
PKGS+=(google-compute-engine-tools)
|
PKGS+=(google-compute-engine-tools)
|
||||||
else
|
else
|
||||||
PKGS+=(google-compute-engine-guest-configs google-guest-agent)
|
PKGS+=(google-compute-engine-guest-configs)
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# The Fedora CI VM base images are built using nested-virt with
|
# Due to https://bugzilla.redhat.com/show_bug.cgi?id=1907030
|
||||||
# limited resources available. Further, cloud-networking in
|
# updates cannot be installed or even looked at during this stage.
|
||||||
# general can sometimes be flaky. Increase DNF's tolerance
|
# Pawn the problem off to the cache-image stage where more memory
|
||||||
# levels.
|
# is available and debugging is also easier. Try to save some more
|
||||||
cat << EOF | $SUDO tee -a /etc/dnf/dnf.conf
|
# memory by pre-populating repo metadata prior to any transactions.
|
||||||
|
$SUDO dnf makecache $XARGS
|
||||||
# Added during CI VM image build
|
# Updates disable, see comment above
|
||||||
minrate=100
|
# $SUDO dnf -y update $XARGS
|
||||||
timeout=60
|
$SUDO dnf -y install $XARGS "${PKGS[@]}"
|
||||||
EOF
|
|
||||||
|
|
||||||
$SUDO dnf makecache
|
|
||||||
$SUDO dnf -y update
|
|
||||||
$SUDO dnf -y install "${PKGS[@]}"
|
|
||||||
# Occasionally following an install, there are more updates available.
|
|
||||||
# This may be due to activation of suggested/recommended dependency resolution.
|
|
||||||
$SUDO dnf -y update
|
|
||||||
|
|
||||||
if ! ((CONTAINER)); then
|
if ! ((CONTAINER)); then
|
||||||
$SUDO systemctl enable rngd
|
$SUDO systemctl enable rngd
|
||||||
|
@ -90,9 +83,7 @@ if ! ((CONTAINER)); then
|
||||||
# This is necessary to prevent permission-denied errors on service-start
|
# This is necessary to prevent permission-denied errors on service-start
|
||||||
# and also on the off-chance the package gets updated and context reset.
|
# and also on the off-chance the package gets updated and context reset.
|
||||||
$SUDO semanage fcontext --add --type bin_t /usr/bin/cloud-init
|
$SUDO semanage fcontext --add --type bin_t /usr/bin/cloud-init
|
||||||
# This used restorecon before so we don't have to specify the file_contexts.local
|
$SUDO restorecon -v /usr/bin/cloud-init
|
||||||
# manually, however with f42 that stopped working: https://bugzilla.redhat.com/show_bug.cgi?id=2360183
|
|
||||||
$SUDO setfiles -v /etc/selinux/targeted/contexts/files/file_contexts.local /usr/bin/cloud-init
|
|
||||||
else # GCP Image
|
else # GCP Image
|
||||||
echo "Setting GCP startup service (for Cirrus-CI agent) SELinux unconfined"
|
echo "Setting GCP startup service (for Cirrus-CI agent) SELinux unconfined"
|
||||||
# ref: https://cloud.google.com/compute/docs/startupscript
|
# ref: https://cloud.google.com/compute/docs/startupscript
|
||||||
|
@ -104,4 +95,10 @@ if ! ((CONTAINER)); then
|
||||||
/lib/$METADATA_SERVICE_PATH | $SUDO tee -a /etc/$METADATA_SERVICE_PATH
|
/lib/$METADATA_SERVICE_PATH | $SUDO tee -a /etc/$METADATA_SERVICE_PATH
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [[ "$OS_RELEASE_ID" == "fedora" ]] && ((OS_RELEASE_VER>=33)); then
|
||||||
|
# Ref: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=783509
|
||||||
|
echo "Disabling automatic /tmp (tmpfs) mount"
|
||||||
|
$SUDO systemctl mask tmp.mount
|
||||||
|
fi
|
||||||
|
|
||||||
finalize
|
finalize
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is intended to be used from two places only:
|
||||||
|
# 1) When building the build-push VM image, to install the scripts as-is
|
||||||
|
# in a PR in order for CI testing to operate on them.
|
||||||
|
# 2) From the autoupdate.sh script, when $BUILDPUSHAUTOUPDATED is unset
|
||||||
|
# or '0'. This clones the latest repository to install (possibly)
|
||||||
|
# updated scripts.
|
||||||
|
#
|
||||||
|
# WARNING: Use under any other circumstances will probably screw things up.
|
||||||
|
|
||||||
|
if [[ -z "$BUILDPUSHAUTOUPDATED" ]];
|
||||||
|
then
|
||||||
|
echo "This script must only be run under Packer or autoupdate.sh"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
source /etc/automation_environment
|
||||||
|
source "$AUTOMATION_LIB_PATH/common_lib.sh"
|
||||||
|
|
||||||
|
#shellcheck disable=SC2154
|
||||||
|
cd $(dirname "$SCRIPT_FILEPATH") || exit 1
|
||||||
|
# Must be installed into $AUTOMATION_LIB_PATH/../bin which is on $PATH
|
||||||
|
cp ./bin/* $AUTOMATION_LIB_PATH/../bin/
|
||||||
|
cp ./lib/* $AUTOMATION_LIB_PATH/
|
||||||
|
chmod +x $AUTOMATION_LIB_PATH/../bin/*
|
|
@ -0,0 +1,5 @@
|
||||||
|
# DO NOT USE
|
||||||
|
|
||||||
|
This directory contains scripts/data used by the Cirrus-CI
|
||||||
|
`test_build-push` task. It is not intended to be used otherwise
|
||||||
|
and may cause harm.
|
|
@ -0,0 +1,172 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is not intended for humans. It should be run by automation
|
||||||
|
# at the branch-level in automation for the skopeo, buildah, and podman
|
||||||
|
# repositories. It's purpose is to produce a multi-arch container image
|
||||||
|
# based on the contents of context subdirectory. At runtime, $PWD is assumed
|
||||||
|
# to be the root of the cloned git repository.
|
||||||
|
#
|
||||||
|
# The first argument to the script, should be the URL of the git repository
|
||||||
|
# in question. Though at this time, this is only used for labeling the
|
||||||
|
# resulting image.
|
||||||
|
#
|
||||||
|
# The second argument to this script is the relative path to the build context
|
||||||
|
# subdirectory. The basename of this subdirectory may indicates the
|
||||||
|
# image flavor (i.e. `upstream`, `testing`, or `stable`). Depending
|
||||||
|
# on this value, the image may be pushed to multiple container registries
|
||||||
|
# under slightly different rules (see the next option).
|
||||||
|
#
|
||||||
|
# If the basename of the context directory (second argument) does NOT reflect
|
||||||
|
# the image flavor, this name may be passed in as a third argument. Handling
|
||||||
|
# of this argument may be repository-specific, so check the actual code below
|
||||||
|
# to understand it's behavior.
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
if [[ -r "/etc/automation_environment" ]]; then
|
||||||
|
source /etc/automation_environment # defines AUTOMATION_LIB_PATH
|
||||||
|
#shellcheck disable=SC1090,SC2154
|
||||||
|
source "$AUTOMATION_LIB_PATH/common_lib.sh"
|
||||||
|
#shellcheck source=../lib/autoupdate.sh
|
||||||
|
source "$AUTOMATION_LIB_PATH/autoupdate.sh"
|
||||||
|
else
|
||||||
|
echo "Expecting to find automation common library installed."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Careful: Changing the error message below could break auto-update test.
|
||||||
|
if [[ "$#" -lt 2 ]]; then
|
||||||
|
#shellcheck disable=SC2145
|
||||||
|
die "Must be called with at least two arguments, got '$@'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z $(type -P build-push.sh) ]]; then
|
||||||
|
die "It does not appear that build-push.sh is installed properly"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! [[ -d "$PWD/.git" ]]; then
|
||||||
|
die "The current directory ($PWD) does not appear to be the root of a git repo."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Assume transitive debugging state for build-push.sh if set
|
||||||
|
if [[ "$(automation_version | cut -d '.' -f 1)" -ge 4 ]]; then
|
||||||
|
# Valid for version 4.0.0 and above only
|
||||||
|
export A_DEBUG
|
||||||
|
else
|
||||||
|
export DEBUG
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Arches to build by default - may be overridden for testing
|
||||||
|
ARCHES="${ARCHES:-amd64,ppc64le,s390x,arm64}"
|
||||||
|
|
||||||
|
# First arg (REPO_URL) is the clone URL for repository for informational purposes
|
||||||
|
REPO_URL="$1"
|
||||||
|
REPO_NAME=$(basename "${REPO_URL%.git}")
|
||||||
|
# Second arg (CTX_SUB) is the context subdirectory relative to the clone path
|
||||||
|
CTX_SUB="$2"
|
||||||
|
# Historically, the basename of second arg set the image flavor(i.e. `upstream`,
|
||||||
|
# `testing`, or `stable`). For cases where this convention doesn't fit,
|
||||||
|
# it's possible to pass the flavor-name as the third argument. Both methods
|
||||||
|
# will populate a "FLAVOR" build-arg value.
|
||||||
|
if [[ "$#" -lt 3 ]]; then
|
||||||
|
FLAVOR_NAME=$(basename "$CTX_SUB")
|
||||||
|
elif [[ "$#" -ge 3 ]]; then
|
||||||
|
FLAVOR_NAME="$3" # An empty-value is valid
|
||||||
|
else
|
||||||
|
die "Expecting a non-empty third argument indicating the FLAVOR build-arg value."
|
||||||
|
fi
|
||||||
|
_REG="quay.io"
|
||||||
|
if [[ "$REPO_NAME" =~ testing ]]; then
|
||||||
|
_REG="example.com"
|
||||||
|
fi
|
||||||
|
REPO_FQIN="$_REG/$REPO_NAME/$FLAVOR_NAME"
|
||||||
|
req_env_vars REPO_URL REPO_NAME CTX_SUB FLAVOR_NAME
|
||||||
|
|
||||||
|
# Common library defines SCRIPT_FILENAME
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
dbg "$SCRIPT_FILENAME operating constants:
|
||||||
|
REPO_URL=$REPO_URL
|
||||||
|
REPO_NAME=$REPO_NAME
|
||||||
|
CTX_SUB=$CTX_SUB
|
||||||
|
FLAVOR_NAME=$FLAVOR_NAME
|
||||||
|
REPO_FQIN=$REPO_FQIN
|
||||||
|
"
|
||||||
|
|
||||||
|
# Set non-zero to avoid actually executing build-push, simply print
|
||||||
|
# the command-line that would have been executed
|
||||||
|
DRYRUN=${DRYRUN:-0}
|
||||||
|
_DRNOPUSH=""
|
||||||
|
if ((DRYRUN)); then
|
||||||
|
_DRNOPUSH="--nopush"
|
||||||
|
warn "Operating in dry-run mode with $_DRNOPUSH"
|
||||||
|
fi
|
||||||
|
|
||||||
|
### MAIN
|
||||||
|
|
||||||
|
declare -a build_args
|
||||||
|
if [[ -n "$FLAVOR_NAME" ]]; then
|
||||||
|
build_args=(--build-arg "FLAVOR=$FLAVOR_NAME")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Labels to add to all images
|
||||||
|
# N/B: These won't show up in the manifest-list itself, only it's constituents.
|
||||||
|
lblargs="\
|
||||||
|
--label=org.opencontainers.image.source=$REPO_URL \
|
||||||
|
--label=org.opencontainers.image.created=$(date -u --iso-8601=seconds)"
|
||||||
|
dbg "lblargs=$lblargs"
|
||||||
|
|
||||||
|
modcmdarg="tag_version.sh $FLAVOR_NAME"
|
||||||
|
|
||||||
|
# For stable images, the version number of the command is needed for tagging.
|
||||||
|
if [[ "$FLAVOR_NAME" == "stable" ]]; then
|
||||||
|
# only native arch is needed to extract the version
|
||||||
|
dbg "Building local-arch image to extract stable version number"
|
||||||
|
podman build -t $REPO_FQIN "${build_args[@]}" ./$CTX_SUB
|
||||||
|
|
||||||
|
case "$REPO_NAME" in
|
||||||
|
skopeo) version_cmd="--version" ;;
|
||||||
|
buildah) version_cmd="buildah --version" ;;
|
||||||
|
podman) version_cmd="podman --version" ;;
|
||||||
|
testing) version_cmd="cat FAKE_VERSION" ;;
|
||||||
|
*) die "Unknown/unsupported repo '$REPO_NAME'" ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
pvcmd="podman run -i --rm $REPO_FQIN $version_cmd"
|
||||||
|
dbg "Extracting version with command: $pvcmd"
|
||||||
|
version_output=$($pvcmd)
|
||||||
|
dbg "version output:
|
||||||
|
$version_output
|
||||||
|
"
|
||||||
|
img_cmd_version=$(awk -r -e '/^.+ version /{print $3}' <<<"$version_output")
|
||||||
|
dbg "parsed version: $img_cmd_version"
|
||||||
|
test -n "$img_cmd_version"
|
||||||
|
lblargs="$lblargs --label=org.opencontainers.image.version=$img_cmd_version"
|
||||||
|
# Prevent temporary build colliding with multi-arch manifest list (built next)
|
||||||
|
# but preserve image (by ID) for use as cache.
|
||||||
|
dbg "Un-tagging $REPO_FQIN"
|
||||||
|
podman untag $REPO_FQIN
|
||||||
|
|
||||||
|
# tag-version.sh expects this arg. when FLAVOR_NAME=stable
|
||||||
|
modcmdarg+=" $img_cmd_version"
|
||||||
|
|
||||||
|
# Stable images get pushed to 'containers' namespace as latest & version-tagged
|
||||||
|
build-push.sh \
|
||||||
|
$_DRNOPUSH \
|
||||||
|
--arches=$ARCHES \
|
||||||
|
--modcmd="$modcmdarg" \
|
||||||
|
$_REG/containers/$REPO_NAME \
|
||||||
|
./$CTX_SUB \
|
||||||
|
$lblargs \
|
||||||
|
"${build_args[@]}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# All images are pushed to quay.io/<reponame>, both
|
||||||
|
# latest and version-tagged (if available).
|
||||||
|
build-push.sh \
|
||||||
|
$_DRNOPUSH \
|
||||||
|
--arches=$ARCHES \
|
||||||
|
--modcmd="$modcmdarg" \
|
||||||
|
$REPO_FQIN \
|
||||||
|
./$CTX_SUB \
|
||||||
|
$lblargs \
|
||||||
|
"${build_args[@]}"
|
|
@ -0,0 +1,69 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is not intended for humans. It should only be referenced
|
||||||
|
# as an argument to the build-push.sh `--modcmd` option. It's purpose
|
||||||
|
# is to ensure stable images are re-tagged with a verison-number
|
||||||
|
# cooresponding to the included tool's version.
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
if [[ -r "/etc/automation_environment" ]]; then
|
||||||
|
source /etc/automation_environment # defines AUTOMATION_LIB_PATH
|
||||||
|
#shellcheck disable=SC1090,SC2154
|
||||||
|
source "$AUTOMATION_LIB_PATH/common_lib.sh"
|
||||||
|
else
|
||||||
|
echo "Unexpected operating environment"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Vars defined by build-push.sh spec. for mod scripts
|
||||||
|
req_env_vars SCRIPT_FILENAME SCRIPT_FILEPATH RUNTIME PLATFORMOS FQIN CONTEXT \
|
||||||
|
PUSH ARCHES REGSERVER NAMESPACE IMGNAME MODCMD
|
||||||
|
|
||||||
|
if [[ "$#" -ge 1 ]]; then
|
||||||
|
FLAVOR_NAME="$1" # upstream, testing, or stable
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "$#" -ge 2 ]]; then
|
||||||
|
# Enforce all version-tags start with a 'v'
|
||||||
|
VERSION="v${2#v}" # output of $version_cmd
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -z "$FLAVOR_NAME" ]]; then
|
||||||
|
# Defined by common_lib.sh
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
warn "$SCRIPT_FILENAME passed empty flavor-name argument (optional)."
|
||||||
|
elif [[ -z "$VERSION" ]]; then
|
||||||
|
warn "$SCRIPT_FILENAME received empty version argument (req. for FLAVOR_NAME=stable)."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
dbg "Mod-command operating on $FQIN in '$FLAVOR_NAME' flavor"
|
||||||
|
|
||||||
|
if [[ "$FLAVOR_NAME" == "stable" ]]; then
|
||||||
|
# Stable images must all be tagged with a version number.
|
||||||
|
# Confirm this value is passed in by caller.
|
||||||
|
req_env_vars VERSION
|
||||||
|
VERSION=v${VERSION#v}
|
||||||
|
if egrep -q '^v[0-9]+\.[0-9]+\.[0-9]+'<<<"$VERSION"; then
|
||||||
|
msg "Found image command version '$VERSION'"
|
||||||
|
else
|
||||||
|
die "Encountered unexpected/non-conforming version '$VERSION'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
$RUNTIME tag $FQIN:latest $FQIN:$VERSION
|
||||||
|
msg "Successfully tagged $FQIN:$VERSION"
|
||||||
|
|
||||||
|
# Tag as x.y to provide a consistent tag even for a future z+1
|
||||||
|
xy_ver=$(awk -F '.' '{print $1"."$2}'<<<"$VERSION")
|
||||||
|
$RUNTIME tag $FQIN:latest $FQIN:$xy_ver
|
||||||
|
msg "Successfully tagged $FQIN:$xy_ver"
|
||||||
|
|
||||||
|
# Tag as x to provide consistent tag even for a future y+1
|
||||||
|
x_ver=$(awk -F '.' '{print $1}'<<<"$xy_ver")
|
||||||
|
$RUNTIME tag $FQIN:latest $FQIN:$x_ver
|
||||||
|
msg "Successfully tagged $FQIN:$x_ver"
|
||||||
|
else
|
||||||
|
warn "$SCRIPT_FILENAME not version-tagging for '$FLAVOR_NAME' stage of '$FQIN'"
|
||||||
|
fi
|
|
@ -0,0 +1,36 @@
|
||||||
|
|
||||||
|
|
||||||
|
# This script is not intended for humans. It should only be sourced by
|
||||||
|
# main.sh. If BUILDPUSHAUTOUPDATED!=0 this it will be a no-op. Otherwise,
|
||||||
|
# it will download the latest version of the build-push scripts and re-exec
|
||||||
|
# main.sh. This allows the scripts to be updated without requiring new VM
|
||||||
|
# images to be composed and deployed.
|
||||||
|
#
|
||||||
|
# WARNING: Changes to this script _do_ require new VM images as auto-updating
|
||||||
|
# the auto-update script would be complex and hard to test.
|
||||||
|
|
||||||
|
# Must be exported - .install.sh checks this is set.
|
||||||
|
export BUILDPUSHAUTOUPDATED="${BUILDPUSHAUTOUPDATED:-0}"
|
||||||
|
|
||||||
|
if ! ((BUILDPUSHAUTOUPDATED)); then
|
||||||
|
msg "Auto-updating build-push operational scripts..."
|
||||||
|
#shellcheck disable=SC2154
|
||||||
|
GITTMP=$(mktemp -p '' -d "$MKTEMP_FORMAT")
|
||||||
|
trap "rm -rf $GITTMP" EXIT
|
||||||
|
|
||||||
|
msg "Obtaining latest version..."
|
||||||
|
git clone --quiet --depth=1 \
|
||||||
|
https://github.com/containers/automation_images.git \
|
||||||
|
"$GITTMP"
|
||||||
|
msg "Installing..."
|
||||||
|
cd $GITTMP/build-push || exit 1
|
||||||
|
bash ./.install.sh
|
||||||
|
# Important: Return to directory main.sh was started from
|
||||||
|
cd - || exit 1
|
||||||
|
rm -rf "$GITTMP"
|
||||||
|
|
||||||
|
#shellcheck disable=SC2145
|
||||||
|
msg "Re-executing main.sh $@..."
|
||||||
|
export BUILDPUSHAUTOUPDATED=1
|
||||||
|
exec main.sh "$@" # guaranteed on $PATH
|
||||||
|
fi
|
|
@ -0,0 +1,195 @@
|
||||||
|
|
||||||
|
|
||||||
|
# DO NOT USE - This script is intended to be called by the Cirrus-CI
|
||||||
|
# `test_build-push` task. It is not intended to be used otherwise
|
||||||
|
# and may cause harm. It's purpose is to confirm the 'main.sh' script
|
||||||
|
# behaves in an expected way, given a local test repository as input.
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIRPATH=$(dirname $(realpath "${BASH_SOURCE[0]}"))
|
||||||
|
source $SCRIPT_DIRPATH/../lib.sh
|
||||||
|
|
||||||
|
req_env_vars CIRRUS_CI
|
||||||
|
|
||||||
|
# No need to test if image wasn't built
|
||||||
|
if TARGET_NAME=build-push skip_on_pr_label; then exit 0; fi
|
||||||
|
|
||||||
|
# Architectures to test with (golang standard names)
|
||||||
|
TESTARCHES="amd64 arm64"
|
||||||
|
# main.sh is sensitive to this value
|
||||||
|
ARCHES=$(tr " " ","<<<"$TESTARCHES")
|
||||||
|
export ARCHES
|
||||||
|
# Contrived "version" for testing purposes
|
||||||
|
FAKE_VER_X=$RANDOM
|
||||||
|
FAKE_VER_Y=$RANDOM
|
||||||
|
FAKE_VER_Z=$RANDOM
|
||||||
|
FAKE_VERSION="$FAKE_VER_X.$FAKE_VER_Y.$FAKE_VER_Z"
|
||||||
|
# Contrived source repository for testing
|
||||||
|
SRC_TMP=$(mktemp -p '' -d tmp-build-push-test-XXXX)
|
||||||
|
# Do not change, main.sh is sensitive to the 'testing' name
|
||||||
|
TEST_FQIN=example.com/testing/stable
|
||||||
|
# Stable build should result in manifest list tagged this
|
||||||
|
TEST_FQIN2=example.com/containers/testing
|
||||||
|
# Don't allow main.sh or tag_version.sh to auto-update at runtime
|
||||||
|
export BUILDPUSHAUTOUPDATED=1
|
||||||
|
|
||||||
|
trap "rm -rf $SRC_TMP" EXIT
|
||||||
|
|
||||||
|
# main.sh expects $PWD to be a git repository.
|
||||||
|
msg "
|
||||||
|
##### Constructing local test repository #####"
|
||||||
|
cd $SRC_TMP
|
||||||
|
showrun git init -b main testing
|
||||||
|
cd testing
|
||||||
|
git config --local user.name "Testy McTestface"
|
||||||
|
git config --local user.email "test@example.com"
|
||||||
|
git config --local advice.detachedHead "false"
|
||||||
|
git config --local commit.gpgsign "false"
|
||||||
|
# The following paths match the style of sub-dir in the actual
|
||||||
|
# skopeo/buildah/podman repositories. Only the 'stable' flavor
|
||||||
|
# is tested here, since it involves the most complex workflow.
|
||||||
|
mkdir -vp "contrib/testimage/stable"
|
||||||
|
cd "contrib/testimage/stable"
|
||||||
|
echo "build-push-test version v$FAKE_VERSION" | tee "FAKE_VERSION"
|
||||||
|
cat <<EOF | tee "Containerfile"
|
||||||
|
FROM registry.fedoraproject.org/fedora:latest
|
||||||
|
ARG FLAVOR
|
||||||
|
ADD /FAKE_VERSION /
|
||||||
|
RUN echo "FLAVOUR=\$FLAVOR" > /FLAVOUR
|
||||||
|
EOF
|
||||||
|
# As an additional test, build and check images when pasing
|
||||||
|
# the 'stable' flavor name as a command-line arg instead
|
||||||
|
# of using the subdirectory dirname (old method).
|
||||||
|
cd $SRC_TMP/testing/contrib/testimage
|
||||||
|
cp stable/* ./
|
||||||
|
cd $SRC_TMP/testing
|
||||||
|
# The images will have the repo & commit ID set as labels
|
||||||
|
git add --all
|
||||||
|
git commit -m 'test repo initial commit'
|
||||||
|
TEST_REVISION=$(git rev-parse HEAD)
|
||||||
|
|
||||||
|
# Given the flavor-name as the first argument, verify built image
|
||||||
|
# expectations. For 'stable' image, verify that main.sh will properly
|
||||||
|
# version-tagged both FQINs. For other flavors, verify expected labels
|
||||||
|
# on the `latest` tagged FQINs.
|
||||||
|
verify_built_images() {
|
||||||
|
local _fqin _arch xy_ver x_ver img_ver img_src img_rev _fltr
|
||||||
|
local _test_tag expected_flavor _test_fqins
|
||||||
|
expected_flavor="$1"
|
||||||
|
msg "
|
||||||
|
##### Testing execution of '$expected_flavor' images for arches $TESTARCHES #####"
|
||||||
|
podman --version
|
||||||
|
req_env_vars TESTARCHES FAKE_VERSION TEST_FQIN TEST_FQIN2
|
||||||
|
|
||||||
|
declare -a _test_fqins
|
||||||
|
_test_fqins=("${TEST_FQIN%stable}$expected_flavor")
|
||||||
|
if [[ "$expected_flavor" == "stable" ]]; then
|
||||||
|
_test_fqins+=("$TEST_FQIN2")
|
||||||
|
test_tag="v$FAKE_VERSION"
|
||||||
|
xy_ver="v$FAKE_VER_X.$FAKE_VER_Y"
|
||||||
|
x_ver="v$FAKE_VER_X"
|
||||||
|
else
|
||||||
|
test_tag="latest"
|
||||||
|
xy_ver="latest"
|
||||||
|
x_ver="latest"
|
||||||
|
fi
|
||||||
|
|
||||||
|
for _fqin in "${_test_fqins[@]}"; do
|
||||||
|
for _arch in $TESTARCHES; do
|
||||||
|
msg "Testing container can execute '/bin/true'"
|
||||||
|
showrun podman run -i --arch=$_arch --rm "$_fqin:$test_tag" /bin/true
|
||||||
|
|
||||||
|
msg "Testing container FLAVOR build-arg passed correctly"
|
||||||
|
showrun podman run -i --arch=$_arch --rm "$_fqin:$test_tag" \
|
||||||
|
cat /FLAVOUR | tee /dev/stderr | fgrep -xq "FLAVOUR=$expected_flavor"
|
||||||
|
|
||||||
|
if [[ "$expected_flavor" == "stable" ]]; then
|
||||||
|
msg "Testing tag '$xy_ver'"
|
||||||
|
if ! showrun podman manifest exists $_fqin:$xy_ver; then
|
||||||
|
die "Failed to find manifest-list tagged '$xy_ver'"
|
||||||
|
fi
|
||||||
|
|
||||||
|
msg "Testing tag '$x_ver'"
|
||||||
|
if ! showrun podman manifest exists $_fqin:$x_ver; then
|
||||||
|
die "Failed to find manifest-list tagged '$x_ver'"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ "$expected_flavor" == "stable" ]]; then
|
||||||
|
msg "Testing image $_fqin:$test_tag version label"
|
||||||
|
_fltr='.[].Config.Labels."org.opencontainers.image.version"'
|
||||||
|
img_ver=$(podman inspect $_fqin:$test_tag | jq -r -e "$_fltr")
|
||||||
|
showrun test "$img_ver" == "v$FAKE_VERSION"
|
||||||
|
fi
|
||||||
|
|
||||||
|
msg "Testing image $_fqin:$test_tag source label"
|
||||||
|
_fltr='.[].Config.Labels."org.opencontainers.image.source"'
|
||||||
|
img_src=$(podman inspect $_fqin:$test_tag | jq -r -e "$_fltr")
|
||||||
|
showrun test "$img_src" == "git://testing"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
remove_built_images() {
|
||||||
|
buildah --version
|
||||||
|
for _fqin in $TEST_FQIN $TEST_FQIN2; do
|
||||||
|
for tag in latest v$FAKE_VERSION v$FAKE_VER_X.$FAKE_VER_Y v$FAKE_VER_X; do
|
||||||
|
# Don't care if this fails
|
||||||
|
podman manifest rm $_fqin:$tag || true
|
||||||
|
done
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
msg "
|
||||||
|
##### Testing build-push subdir-flavor run of '$TEST_FQIN' & '$TEST_FQIN2' #####"
|
||||||
|
cd $SRC_TMP/testing
|
||||||
|
export DRYRUN=1 # Force main.sh not to push anything
|
||||||
|
req_env_vars ARCHES DRYRUN
|
||||||
|
# main.sh is sensitive to 'testing' value.
|
||||||
|
# Also confirms main.sh is on $PATH
|
||||||
|
env A_DEBUG=1 main.sh git://testing contrib/testimage/stable
|
||||||
|
verify_built_images stable
|
||||||
|
|
||||||
|
msg "
|
||||||
|
##### Testing build-push flavour-arg run for '$TEST_FQIN' & '$TEST_FQIN2' #####"
|
||||||
|
remove_built_images
|
||||||
|
env A_DEBUG=1 main.sh git://testing contrib/testimage foobarbaz
|
||||||
|
verify_built_images foobarbaz
|
||||||
|
|
||||||
|
# This script verifies it's only/ever running inside CI. Use a fake
|
||||||
|
# main.sh to verify it auto-updates itself w/o actually performing
|
||||||
|
# a build. N/B: This test must be run last, in a throw-away environment,
|
||||||
|
# it _WILL_ modify on-disk contents!
|
||||||
|
msg "
|
||||||
|
##### Testing auto-update capability #####"
|
||||||
|
cd $SRC_TMP
|
||||||
|
#shellcheck disable=SC2154
|
||||||
|
cat >main.sh<< EOF
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
source /etc/automation_environment # defines AUTOMATION_LIB_PATH
|
||||||
|
source "$AUTOMATION_LIB_PATH/common_lib.sh"
|
||||||
|
source "$AUTOMATION_LIB_PATH/autoupdate.sh"
|
||||||
|
EOF
|
||||||
|
chmod +x main.sh
|
||||||
|
# Back to where we were
|
||||||
|
cd -
|
||||||
|
|
||||||
|
# Expect the real main.sh to bark one of two error messages
|
||||||
|
# and exit non-zero.
|
||||||
|
EXP_RX1="Must.be.called.with.at.least.two.arguments"
|
||||||
|
EXP_RX2="does.not.appear.to.be.the.root.of.a.git.repo"
|
||||||
|
if output=$(env --ignore-environment \
|
||||||
|
BUILDPUSHAUTOUPDATED=0 \
|
||||||
|
AUTOMATION_LIB_PATH=$AUTOMATION_LIB_PATH \
|
||||||
|
$SRC_TMP/main.sh 2>&1); then
|
||||||
|
die "Fail. Expected main.sh to exit non-zero"
|
||||||
|
else
|
||||||
|
if [[ "$output" =~ $EXP_RX1 ]] || [[ "$output" =~ $EXP_RX2 ]]; then
|
||||||
|
echo "PASS"
|
||||||
|
else
|
||||||
|
die "Fail. Expecting match to '$EXP_RX1' or '$EXP_RX2', got:
|
||||||
|
$output"
|
||||||
|
fi
|
||||||
|
fi
|
|
@ -27,10 +27,8 @@ INSTALL_PACKAGES=(\
|
||||||
git
|
git
|
||||||
jq
|
jq
|
||||||
podman
|
podman
|
||||||
python3-pip
|
|
||||||
qemu-user-static
|
qemu-user-static
|
||||||
skopeo
|
skopeo
|
||||||
unzip
|
|
||||||
)
|
)
|
||||||
|
|
||||||
echo "Installing general build/test dependencies"
|
echo "Installing general build/test dependencies"
|
||||||
|
@ -39,7 +37,11 @@ bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}"
|
||||||
# It was observed in F33, dnf install doesn't always get you the latest/greatest
|
# It was observed in F33, dnf install doesn't always get you the latest/greatest
|
||||||
lilto $SUDO dnf update -y
|
lilto $SUDO dnf update -y
|
||||||
|
|
||||||
# Re-install would append to this, making a mess.
|
# Re-install with the 'build-push' component
|
||||||
$SUDO rm -f /etc/automation_environment
|
install_automation_tooling build-push
|
||||||
# Re-install the latest version with the 'build-push' component
|
|
||||||
install_automation_tooling latest build-push
|
# Install main scripts into directory on $PATH
|
||||||
|
cd $REPO_DIRPATH/build-push
|
||||||
|
set -x
|
||||||
|
# Do not auto-update to allow testing inside a PR
|
||||||
|
$SUDO env BUILDPUSHAUTOUPDATED=1 bash ./.install.sh
|
||||||
|
|
|
@ -75,6 +75,9 @@ builders:
|
||||||
source_image_family: 'fedora-base'
|
source_image_family: 'fedora-base'
|
||||||
labels: *fedora_gce_labels
|
labels: *fedora_gce_labels
|
||||||
|
|
||||||
|
- <<: *aux_fed_img
|
||||||
|
name: 'fedora-podman-py'
|
||||||
|
|
||||||
- <<: *aux_fed_img
|
- <<: *aux_fed_img
|
||||||
name: 'fedora-netavark'
|
name: 'fedora-netavark'
|
||||||
|
|
||||||
|
@ -180,30 +183,30 @@ provisioners:
|
||||||
- type: 'shell'
|
- type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- 'mkdir -p /var/tmp/automation_images'
|
- 'mkdir -p /tmp/automation_images'
|
||||||
|
|
||||||
- type: 'file'
|
- type: 'file'
|
||||||
source: '{{ pwd }}/'
|
source: '{{ pwd }}/'
|
||||||
destination: "/var/tmp/automation_images"
|
destination: "/tmp/automation_images"
|
||||||
|
|
||||||
- only: ['rawhide']
|
- only: ['rawhide']
|
||||||
type: 'shell'
|
type: 'shell'
|
||||||
expect_disconnect: true # VM will be rebooted at end of script
|
expect_disconnect: true # VM will be rebooted at end of script
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- '/bin/bash /var/tmp/automation_images/cache_images/rawhide_setup.sh'
|
- '/bin/bash /tmp/automation_images/cache_images/rawhide_setup.sh'
|
||||||
|
|
||||||
- except: ['debian']
|
- except: ['debian']
|
||||||
type: 'shell'
|
type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- '/bin/bash /var/tmp/automation_images/cache_images/fedora_setup.sh'
|
- '/bin/bash /tmp/automation_images/cache_images/fedora_setup.sh'
|
||||||
|
|
||||||
- only: ['debian']
|
- only: ['debian']
|
||||||
type: 'shell'
|
type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- 'env DEBIAN_FRONTEND=noninteractive /bin/bash /var/tmp/automation_images/cache_images/debian_setup.sh'
|
- 'env DEBIAN_FRONTEND=noninteractive /bin/bash /tmp/automation_images/cache_images/debian_setup.sh'
|
||||||
|
|
||||||
post-processors:
|
post-processors:
|
||||||
# This is critical for human-interaction. Copntents will be used
|
# This is critical for human-interaction. Copntents will be used
|
||||||
|
|
|
@ -15,8 +15,8 @@ REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||||
source "$REPO_DIRPATH/lib.sh"
|
source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
msg "Updating/Installing repos and packages for $OS_REL_VER"
|
msg "Updating/Installing repos and packages for $OS_REL_VER"
|
||||||
lilto ooe.sh $SUDO apt-get -q -y update
|
lilto ooe.sh $SUDO apt-get -qq -y update
|
||||||
bigto ooe.sh $SUDO apt-get -q -y upgrade
|
bigto ooe.sh $SUDO apt-get -qq -y upgrade
|
||||||
|
|
||||||
INSTALL_PACKAGES=(\
|
INSTALL_PACKAGES=(\
|
||||||
apache2-utils
|
apache2-utils
|
||||||
|
@ -39,12 +39,13 @@ INSTALL_PACKAGES=(\
|
||||||
crun
|
crun
|
||||||
dnsmasq
|
dnsmasq
|
||||||
e2fslibs-dev
|
e2fslibs-dev
|
||||||
|
emacs-nox
|
||||||
file
|
file
|
||||||
fuse3
|
fuse3
|
||||||
fuse-overlayfs
|
fuse-overlayfs
|
||||||
gcc
|
gcc
|
||||||
gettext
|
gettext
|
||||||
git
|
git-daemon-run
|
||||||
gnupg2
|
gnupg2
|
||||||
go-md2man
|
go-md2man
|
||||||
golang
|
golang
|
||||||
|
@ -59,6 +60,7 @@ INSTALL_PACKAGES=(\
|
||||||
libdevmapper-dev
|
libdevmapper-dev
|
||||||
libdevmapper1.02.1
|
libdevmapper1.02.1
|
||||||
libfuse-dev
|
libfuse-dev
|
||||||
|
libfuse2
|
||||||
libfuse3-dev
|
libfuse3-dev
|
||||||
libglib2.0-dev
|
libglib2.0-dev
|
||||||
libgpgme11-dev
|
libgpgme11-dev
|
||||||
|
@ -103,8 +105,6 @@ INSTALL_PACKAGES=(\
|
||||||
skopeo
|
skopeo
|
||||||
slirp4netns
|
slirp4netns
|
||||||
socat
|
socat
|
||||||
libsqlite3-0
|
|
||||||
libsqlite3-dev
|
|
||||||
systemd-container
|
systemd-container
|
||||||
sudo
|
sudo
|
||||||
time
|
time
|
||||||
|
@ -118,18 +118,18 @@ INSTALL_PACKAGES=(\
|
||||||
zstd
|
zstd
|
||||||
)
|
)
|
||||||
|
|
||||||
# bpftrace is only needed on the host as containers cannot run ebpf
|
|
||||||
# programs anyway and it is very big so we should not bloat the container
|
|
||||||
# images unnecessarily.
|
|
||||||
if ! ((CONTAINER)); then
|
|
||||||
INSTALL_PACKAGES+=( \
|
|
||||||
bpftrace
|
|
||||||
)
|
|
||||||
fi
|
|
||||||
|
|
||||||
msg "Installing general build/testing dependencies"
|
msg "Installing general build/testing dependencies"
|
||||||
bigto $SUDO apt-get -q -y install "${INSTALL_PACKAGES[@]}"
|
bigto $SUDO apt-get -q -y install "${INSTALL_PACKAGES[@]}"
|
||||||
|
|
||||||
|
msg "Enabling contrib source & installing ZFS support (for containers/storage CI)"
|
||||||
|
ZFS_PACKAGES=(\
|
||||||
|
linux-headers-cloud-amd64
|
||||||
|
zfsutils
|
||||||
|
)
|
||||||
|
$SUDO sed -i -r 's/^(deb.*)/\1 contrib/g' /etc/apt/sources.list
|
||||||
|
lilto ooe.sh $SUDO apt-get -qq -y update
|
||||||
|
bigto $SUDO apt-get -q -y install "${ZFS_PACKAGES[@]}"
|
||||||
|
|
||||||
# The nc installed by default is missing many required options
|
# The nc installed by default is missing many required options
|
||||||
$SUDO update-alternatives --set nc /usr/bin/ncat
|
$SUDO update-alternatives --set nc /usr/bin/ncat
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ echo "deb https://download.docker.com/linux/debian $docker_debian_release stable
|
||||||
if ((CONTAINER==0)) && [[ ${#DOWNLOAD_PACKAGES[@]} -gt 0 ]]; then
|
if ((CONTAINER==0)) && [[ ${#DOWNLOAD_PACKAGES[@]} -gt 0 ]]; then
|
||||||
$SUDO apt-get clean # no reason to keep previous downloads around
|
$SUDO apt-get clean # no reason to keep previous downloads around
|
||||||
# Needed to install .deb files + resolve dependencies
|
# Needed to install .deb files + resolve dependencies
|
||||||
lilto $SUDO apt-get -q -y update
|
lilto $SUDO apt-get -qq -y update
|
||||||
echo "Downloading packages for optional installation at runtime."
|
echo "Downloading packages for optional installation at runtime."
|
||||||
$SUDO ln -s /var/cache/apt/archives "$PACKAGE_DOWNLOAD_DIR"
|
$SUDO ln -s /var/cache/apt/archives "$PACKAGE_DOWNLOAD_DIR"
|
||||||
bigto $SUDO apt-get -q -y install --download-only "${DOWNLOAD_PACKAGES[@]}"
|
bigto $SUDO apt-get -q -y install --download-only "${DOWNLOAD_PACKAGES[@]}"
|
||||||
|
|
|
@ -17,44 +17,14 @@ fi
|
||||||
# shellcheck source=./lib.sh
|
# shellcheck source=./lib.sh
|
||||||
source "$REPO_DIRPATH/lib.sh"
|
source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
# Generate en_US.UTF-8 locale as this is required for a podman test (https://github.com/containers/podman/pull/19635).
|
|
||||||
$SUDO sed -i '/en_US.UTF-8/s/^#//g' /etc/locale.gen
|
|
||||||
$SUDO locale-gen
|
|
||||||
|
|
||||||
# Debian doesn't mount tmpfs on /tmp as default but we want this to speed tests up so
|
|
||||||
# they don't have to write to persistent disk.
|
|
||||||
# https://github.com/containers/podman/pull/22533
|
|
||||||
$SUDO mkdir -p /etc/systemd/system/local-fs.target.wants/
|
|
||||||
cat <<EOF | $SUDO tee /etc/systemd/system/tmp.mount
|
|
||||||
[Unit]
|
|
||||||
Description=Temporary Directory /tmp
|
|
||||||
ConditionPathIsSymbolicLink=!/tmp
|
|
||||||
DefaultDependencies=no
|
|
||||||
Conflicts=umount.target
|
|
||||||
Before=local-fs.target umount.target
|
|
||||||
After=swap.target
|
|
||||||
|
|
||||||
[Mount]
|
|
||||||
What=tmpfs
|
|
||||||
Where=/tmp
|
|
||||||
Type=tmpfs
|
|
||||||
Options=size=75%%,mode=1777
|
|
||||||
EOF
|
|
||||||
# enable the unit by default
|
|
||||||
$SUDO ln -s ../tmp.mount /etc/systemd/system/local-fs.target.wants/tmp.mount
|
|
||||||
|
|
||||||
req_env_vars PACKER_BUILD_NAME
|
req_env_vars PACKER_BUILD_NAME
|
||||||
|
|
||||||
bash $SCRIPT_DIRPATH/debian_packaging.sh
|
bash $SCRIPT_DIRPATH/debian_packaging.sh
|
||||||
|
|
||||||
# dnsmasq is set to bind 0.0.0.0:53, that will conflict with our dns tests.
|
|
||||||
# We don't need a local resolver.
|
|
||||||
$SUDO systemctl disable dnsmasq.service
|
|
||||||
$SUDO systemctl mask dnsmasq.service
|
|
||||||
|
|
||||||
if ! ((CONTAINER)); then
|
if ! ((CONTAINER)); then
|
||||||
warn "Making Debian kernel enable cgroup swap accounting"
|
warn "Making Debian kernel enable cgroup swap accounting"
|
||||||
SEDCMD='s/^GRUB_CMDLINE_LINUX="(.*)"/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1"/'
|
warn "Forcing CgroupsV1"
|
||||||
|
SEDCMD='s/^GRUB_CMDLINE_LINUX="(.*)"/GRUB_CMDLINE_LINUX="\1 cgroup_enable=memory swapaccount=1 systemd.unified_cgroup_hierarchy=0"/'
|
||||||
ooe.sh $SUDO sed -re "$SEDCMD" -i /etc/default/grub.d/*
|
ooe.sh $SUDO sed -re "$SEDCMD" -i /etc/default/grub.d/*
|
||||||
ooe.sh $SUDO sed -re "$SEDCMD" -i /etc/default/grub
|
ooe.sh $SUDO sed -re "$SEDCMD" -i /etc/default/grub
|
||||||
ooe.sh $SUDO update-grub
|
ooe.sh $SUDO update-grub
|
||||||
|
@ -62,10 +32,6 @@ fi
|
||||||
|
|
||||||
nm_ignore_cni
|
nm_ignore_cni
|
||||||
|
|
||||||
if ! ((CONTAINER)); then
|
|
||||||
initialize_local_cache_registry
|
|
||||||
fi
|
|
||||||
|
|
||||||
finalize
|
finalize
|
||||||
|
|
||||||
echo "SUCCESS!"
|
echo "SUCCESS!"
|
||||||
|
|
|
@ -88,9 +88,8 @@ if [[ $(uname -m) == "aarch64" ]]; then
|
||||||
$SUDO env PATH=$PATH CARGO_HOME=$CARGO_HOME rustup target add aarch64-unknown-linux-gnu
|
$SUDO env PATH=$PATH CARGO_HOME=$CARGO_HOME rustup target add aarch64-unknown-linux-gnu
|
||||||
fi
|
fi
|
||||||
|
|
||||||
msg "Install tool to generate man pages"
|
msg "Install mandown to generate man pages"
|
||||||
$SUDO go install github.com/cpuguy83/go-md2man/v2@latest
|
$SUDO env PATH=$PATH CARGO_HOME=$CARGO_HOME cargo install mandown
|
||||||
$SUDO install /root/go/bin/go-md2man /usr/local/bin/
|
|
||||||
|
|
||||||
# Downstream users of this image are specifically testing netavark & aardvark-dns
|
# Downstream users of this image are specifically testing netavark & aardvark-dns
|
||||||
# code changes. We want to start with using the RPMs because they deal with any
|
# code changes. We want to start with using the RPMs because they deal with any
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is called from fedora_setup.sh and various Dockerfiles.
|
||||||
|
# It's not intended to be used outside of those contexts. It assumes the lib.sh
|
||||||
|
# library has already been sourced, and that all "ground-up" package-related activity
|
||||||
|
# needs to be done, including repository setup and initial update.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
||||||
|
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
||||||
|
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||||
|
|
||||||
|
# shellcheck source=./lib.sh
|
||||||
|
source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
warn "Enabling updates-testing repository for $PACKER_BUILD_NAME"
|
||||||
|
lilto ooe.sh $SUDO dnf install -y 'dnf-command(config-manager)'
|
||||||
|
lilto ooe.sh $SUDO dnf config-manager --set-enabled updates-testing
|
||||||
|
|
||||||
|
msg "Updating/Installing repos and packages for $OS_REL_VER"
|
||||||
|
|
||||||
|
bigto ooe.sh $SUDO dnf update -y
|
||||||
|
|
||||||
|
INSTALL_PACKAGES=(\
|
||||||
|
bash-completion
|
||||||
|
bridge-utils
|
||||||
|
buildah
|
||||||
|
bzip2
|
||||||
|
curl
|
||||||
|
findutils
|
||||||
|
fuse3
|
||||||
|
gcc
|
||||||
|
git
|
||||||
|
git-daemon
|
||||||
|
glib2-devel
|
||||||
|
glibc-devel
|
||||||
|
hostname
|
||||||
|
httpd-tools
|
||||||
|
iproute
|
||||||
|
iptables
|
||||||
|
jq
|
||||||
|
libtool
|
||||||
|
lsof
|
||||||
|
make
|
||||||
|
nmap-ncat
|
||||||
|
openssl
|
||||||
|
openssl-devel
|
||||||
|
pkgconfig
|
||||||
|
podman
|
||||||
|
policycoreutils
|
||||||
|
protobuf
|
||||||
|
protobuf-devel
|
||||||
|
python-pip-wheel
|
||||||
|
python-setuptools-wheel
|
||||||
|
python-toml
|
||||||
|
python-wheel-wheel
|
||||||
|
python3-PyYAML
|
||||||
|
python3-coverage
|
||||||
|
python3-dateutil
|
||||||
|
python3-docker
|
||||||
|
python3-fixtures
|
||||||
|
python3-libselinux
|
||||||
|
python3-libsemanage
|
||||||
|
python3-libvirt
|
||||||
|
python3-pip
|
||||||
|
python3-psutil
|
||||||
|
python3-pylint
|
||||||
|
python3-pytest
|
||||||
|
python3-pyxdg
|
||||||
|
python3-requests
|
||||||
|
python3-requests-mock
|
||||||
|
python3-virtualenv
|
||||||
|
python3.6
|
||||||
|
python3.8
|
||||||
|
python3.9
|
||||||
|
redhat-rpm-config
|
||||||
|
rsync
|
||||||
|
sed
|
||||||
|
skopeo
|
||||||
|
socat
|
||||||
|
tar
|
||||||
|
time
|
||||||
|
tox
|
||||||
|
unzip
|
||||||
|
vim
|
||||||
|
wget
|
||||||
|
xz
|
||||||
|
zip
|
||||||
|
zstd
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "Installing general build/test dependencies"
|
||||||
|
bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}"
|
||||||
|
|
||||||
|
# It was observed in F33, dnf install doesn't always get you the latest/greatest
|
||||||
|
lilto $SUDO dnf update -y
|
|
@ -28,7 +28,7 @@ req_env_vars PACKER_BUILD_NAME
|
||||||
if [[ "$PACKER_BUILD_NAME" == "fedora" ]] && [[ ! "$PACKER_BUILD_NAME" =~ "prior" ]]; then
|
if [[ "$PACKER_BUILD_NAME" == "fedora" ]] && [[ ! "$PACKER_BUILD_NAME" =~ "prior" ]]; then
|
||||||
warn "Enabling updates-testing repository for $PACKER_BUILD_NAME"
|
warn "Enabling updates-testing repository for $PACKER_BUILD_NAME"
|
||||||
lilto ooe.sh $SUDO dnf install -y 'dnf-command(config-manager)'
|
lilto ooe.sh $SUDO dnf install -y 'dnf-command(config-manager)'
|
||||||
lilto ooe.sh $SUDO dnf config-manager setopt updates-testing.enabled=1
|
lilto ooe.sh $SUDO dnf config-manager --set-enabled updates-testing
|
||||||
else
|
else
|
||||||
warn "NOT enabling updates-testing repository for $PACKER_BUILD_NAME"
|
warn "NOT enabling updates-testing repository for $PACKER_BUILD_NAME"
|
||||||
fi
|
fi
|
||||||
|
@ -56,7 +56,6 @@ INSTALL_PACKAGES=(\
|
||||||
curl
|
curl
|
||||||
device-mapper-devel
|
device-mapper-devel
|
||||||
dnsmasq
|
dnsmasq
|
||||||
docker-distribution
|
|
||||||
e2fsprogs-devel
|
e2fsprogs-devel
|
||||||
emacs-nox
|
emacs-nox
|
||||||
fakeroot
|
fakeroot
|
||||||
|
@ -65,12 +64,10 @@ INSTALL_PACKAGES=(\
|
||||||
fuse3
|
fuse3
|
||||||
fuse3-devel
|
fuse3-devel
|
||||||
gcc
|
gcc
|
||||||
gh
|
|
||||||
git
|
git
|
||||||
git-daemon
|
git-daemon
|
||||||
glib2-devel
|
glib2-devel
|
||||||
glibc-devel
|
glibc-devel
|
||||||
glibc-langpack-en
|
|
||||||
glibc-static
|
glibc-static
|
||||||
gnupg
|
gnupg
|
||||||
go-md2man
|
go-md2man
|
||||||
|
@ -83,7 +80,6 @@ INSTALL_PACKAGES=(\
|
||||||
iproute
|
iproute
|
||||||
iptables
|
iptables
|
||||||
jq
|
jq
|
||||||
koji
|
|
||||||
krb5-workstation
|
krb5-workstation
|
||||||
libassuan
|
libassuan
|
||||||
libassuan-devel
|
libassuan-devel
|
||||||
|
@ -103,7 +99,7 @@ INSTALL_PACKAGES=(\
|
||||||
libxslt-devel
|
libxslt-devel
|
||||||
lsof
|
lsof
|
||||||
make
|
make
|
||||||
man-db
|
mlocate
|
||||||
msitools
|
msitools
|
||||||
nfs-utils
|
nfs-utils
|
||||||
nmap-ncat
|
nmap-ncat
|
||||||
|
@ -113,31 +109,22 @@ INSTALL_PACKAGES=(\
|
||||||
pandoc
|
pandoc
|
||||||
parallel
|
parallel
|
||||||
passt
|
passt
|
||||||
perl-Clone
|
|
||||||
perl-FindBin
|
perl-FindBin
|
||||||
pigz
|
|
||||||
pkgconfig
|
pkgconfig
|
||||||
podman
|
podman
|
||||||
podman-remote
|
|
||||||
pre-commit
|
|
||||||
procps-ng
|
procps-ng
|
||||||
protobuf
|
protobuf
|
||||||
protobuf-c
|
protobuf-c
|
||||||
protobuf-c-devel
|
protobuf-c-devel
|
||||||
protobuf-devel
|
protobuf-devel
|
||||||
python3-fedora-distro-aliases
|
|
||||||
python3-koji-cli-plugins
|
|
||||||
redhat-rpm-config
|
redhat-rpm-config
|
||||||
rpcbind
|
rpcbind
|
||||||
rsync
|
rsync
|
||||||
runc
|
runc
|
||||||
sed
|
sed
|
||||||
ShellCheck
|
|
||||||
skopeo
|
skopeo
|
||||||
slirp4netns
|
slirp4netns
|
||||||
socat
|
socat
|
||||||
sqlite-libs
|
|
||||||
sqlite-devel
|
|
||||||
squashfs-tools
|
squashfs-tools
|
||||||
tar
|
tar
|
||||||
time
|
time
|
||||||
|
@ -151,13 +138,21 @@ INSTALL_PACKAGES=(\
|
||||||
zstd
|
zstd
|
||||||
)
|
)
|
||||||
|
|
||||||
# Rawhide images don't need these packages
|
# Test with CNI in Fedora N-1
|
||||||
|
EXARG=""
|
||||||
|
if [[ "$PACKER_BUILD_NAME" =~ prior ]]; then
|
||||||
|
EXARG="--exclude=netavark --exclude=aardvark-dns"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Rawhide images don't need these pacakges
|
||||||
if [[ "$PACKER_BUILD_NAME" =~ fedora ]]; then
|
if [[ "$PACKER_BUILD_NAME" =~ fedora ]]; then
|
||||||
INSTALL_PACKAGES+=( \
|
INSTALL_PACKAGES+=( \
|
||||||
|
docker-compose
|
||||||
python-pip-wheel
|
python-pip-wheel
|
||||||
python-setuptools-wheel
|
python-setuptools-wheel
|
||||||
python-toml
|
python-toml
|
||||||
python-wheel-wheel
|
python-wheel-wheel
|
||||||
|
python2
|
||||||
python3-PyYAML
|
python3-PyYAML
|
||||||
python3-coverage
|
python3-coverage
|
||||||
python3-dateutil
|
python3-dateutil
|
||||||
|
@ -174,38 +169,24 @@ if [[ "$PACKER_BUILD_NAME" =~ fedora ]]; then
|
||||||
python3-requests
|
python3-requests
|
||||||
python3-requests-mock
|
python3-requests-mock
|
||||||
)
|
)
|
||||||
else # podman-sequoia is only available in Rawhide
|
|
||||||
timebomb 20251101 "Also install the package in future Fedora releases, and enable Sequoia support in users of the images."
|
|
||||||
INSTALL_PACKAGES+=( \
|
|
||||||
podman-sequoia
|
|
||||||
)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Workarond: Around the time of this commit, the `criu` package
|
||||||
|
# was found to be missing a recommends-dependency on criu-libs.
|
||||||
|
# Until a fixed rpm lands in the Fedora repositories, manually
|
||||||
|
# include it here. This workaround should be removed once the
|
||||||
|
# package is corrected (likely > 3.17.1-3).
|
||||||
|
INSTALL_PACKAGES+=(criu-libs)
|
||||||
|
|
||||||
# When installing during a container-build, having this present
|
# When installing during a container-build, having this present
|
||||||
# will seriously screw up future dnf operations in very non-obvious ways.
|
# will seriously screw up future dnf operations in very non-obvious ways.
|
||||||
# bpftrace is only needed on the host as containers cannot run ebpf
|
|
||||||
# programs anyway and it is very big so we should not bloat the container
|
|
||||||
# images unnecessarily.
|
|
||||||
if ! ((CONTAINER)); then
|
if ! ((CONTAINER)); then
|
||||||
INSTALL_PACKAGES+=( \
|
INSTALL_PACKAGES+=( \
|
||||||
bpftrace
|
|
||||||
composefs
|
|
||||||
container-selinux
|
container-selinux
|
||||||
fuse-overlayfs
|
|
||||||
libguestfs-tools
|
libguestfs-tools
|
||||||
selinux-policy-devel
|
selinux-policy-devel
|
||||||
policycoreutils
|
policycoreutils
|
||||||
)
|
)
|
||||||
|
|
||||||
# Extra packages needed by podman-machine-os
|
|
||||||
INSTALL_PACKAGES+=( \
|
|
||||||
podman-machine
|
|
||||||
osbuild
|
|
||||||
osbuild-tools
|
|
||||||
osbuild-ostree
|
|
||||||
xfsprogs
|
|
||||||
e2fsprogs
|
|
||||||
)
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
@ -214,14 +195,13 @@ fi
|
||||||
DOWNLOAD_PACKAGES=(\
|
DOWNLOAD_PACKAGES=(\
|
||||||
parallel
|
parallel
|
||||||
podman-docker
|
podman-docker
|
||||||
python3-devel
|
podman-plugins
|
||||||
python3-pip
|
|
||||||
python3-pytest
|
python3-pytest
|
||||||
python3-virtualenv
|
python3-virtualenv
|
||||||
)
|
)
|
||||||
|
|
||||||
msg "Installing general build/test dependencies"
|
msg "Installing general build/test dependencies"
|
||||||
bigto $SUDO dnf install -y "${INSTALL_PACKAGES[@]}"
|
bigto $SUDO dnf install -y $EXARG "${INSTALL_PACKAGES[@]}"
|
||||||
|
|
||||||
msg "Downloading packages for optional installation at runtime, as needed."
|
msg "Downloading packages for optional installation at runtime, as needed."
|
||||||
$SUDO mkdir -p "$PACKAGE_DOWNLOAD_DIR"
|
$SUDO mkdir -p "$PACKAGE_DOWNLOAD_DIR"
|
||||||
|
@ -235,6 +215,5 @@ $SUDO curl --fail --silent --location -O \
|
||||||
https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm
|
https://storage.googleapis.com/minikube/releases/latest/minikube-latest.x86_64.rpm
|
||||||
cd -
|
cd -
|
||||||
|
|
||||||
# Occasionally following an install, there are more updates available.
|
# It was observed in F33, dnf install doesn't always get you the latest/greatest
|
||||||
# This may be due to activation of suggested/recommended dependency resolution.
|
|
||||||
lilto $SUDO dnf update -y
|
lilto $SUDO dnf update -y
|
||||||
|
|
|
@ -17,12 +17,6 @@ fi
|
||||||
# shellcheck source=./lib.sh
|
# shellcheck source=./lib.sh
|
||||||
source "$REPO_DIRPATH/lib.sh"
|
source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
# Make /tmp tmpfs bigger, by default we only get 50%. Bump it to 75% so the tests have more storage.
|
|
||||||
# Do not use 100% so we do not run out of memory for the process itself if tests start leaking big
|
|
||||||
# files on /tmp.
|
|
||||||
$SUDO mkdir -p /etc/systemd/system/tmp.mount.d
|
|
||||||
echo -e "[Mount]\nOptions=size=75%%,mode=1777\n" | $SUDO tee /etc/systemd/system/tmp.mount.d/override.conf
|
|
||||||
|
|
||||||
# packer and/or a --build-arg define this envar value uniformly
|
# packer and/or a --build-arg define this envar value uniformly
|
||||||
# for both VM and container image build workflows.
|
# for both VM and container image build workflows.
|
||||||
req_env_vars PACKER_BUILD_NAME
|
req_env_vars PACKER_BUILD_NAME
|
||||||
|
@ -30,10 +24,17 @@ req_env_vars PACKER_BUILD_NAME
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
if [[ "$PACKER_BUILD_NAME" =~ "netavark" ]]; then
|
if [[ "$PACKER_BUILD_NAME" =~ "netavark" ]]; then
|
||||||
bash $SCRIPT_DIRPATH/fedora-netavark_packaging.sh
|
bash $SCRIPT_DIRPATH/fedora-netavark_packaging.sh
|
||||||
|
elif [[ "$PACKER_BUILD_NAME" =~ "podman-py" ]]; then
|
||||||
|
bash $SCRIPT_DIRPATH/fedora-podman-py_packaging.sh
|
||||||
elif [[ "$PACKER_BUILD_NAME" =~ "build-push" ]]; then
|
elif [[ "$PACKER_BUILD_NAME" =~ "build-push" ]]; then
|
||||||
bash $SCRIPT_DIRPATH/build-push_packaging.sh
|
bash $SCRIPT_DIRPATH/build-push_packaging.sh
|
||||||
# Registers qemu emulation for non-native execution
|
# Registers qemu emulation for non-native execution
|
||||||
$SUDO systemctl enable systemd-binfmt
|
$SUDO systemctl enable systemd-binfmt
|
||||||
|
for arch in amd64 s390x ppc64le arm64; do
|
||||||
|
msg "Caching latest $arch fedora image..."
|
||||||
|
$SUDO podman pull --quiet --arch=$arch \
|
||||||
|
registry.fedoraproject.org/fedora:$OS_RELEASE_VER
|
||||||
|
done
|
||||||
else
|
else
|
||||||
bash $SCRIPT_DIRPATH/fedora_packaging.sh
|
bash $SCRIPT_DIRPATH/fedora_packaging.sh
|
||||||
fi
|
fi
|
||||||
|
@ -47,8 +48,6 @@ if ! ((CONTAINER)); then
|
||||||
else
|
else
|
||||||
msg "Enabling cgroup management from containers"
|
msg "Enabling cgroup management from containers"
|
||||||
ooe.sh $SUDO setsebool -P container_manage_cgroup true
|
ooe.sh $SUDO setsebool -P container_manage_cgroup true
|
||||||
|
|
||||||
initialize_local_cache_registry
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -1,345 +0,0 @@
|
||||||
#! /bin/bash
|
|
||||||
#
|
|
||||||
# local-cache-registry - set up and manage a local registry with cached images
|
|
||||||
#
|
|
||||||
# Used in containers CI, to reduce exposure to registry flakes.
|
|
||||||
#
|
|
||||||
# We start with the docker registry image. Pull it, extract the registry
|
|
||||||
# binary and config, tweak the config, and create a systemd unit file that
|
|
||||||
# will start the registry at boot.
|
|
||||||
#
|
|
||||||
# We also populate that registry with a (hardcoded) list of container
|
|
||||||
# images used in CI tests. That way a CI VM comes up alreay ready,
|
|
||||||
# and CI tests do not need to do remote pulls. The image list is
|
|
||||||
# hardcoded right here in this script file, in the automation_images
|
|
||||||
# repo. See below for reasons.
|
|
||||||
#
|
|
||||||
ME=$(basename $0)
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# BEGIN defaults
|
|
||||||
|
|
||||||
# FQIN of registry image. From this image, we extract the registry to run.
|
|
||||||
PODMAN_REGISTRY_IMAGE=quay.io/libpod/registry:2.8.2
|
|
||||||
|
|
||||||
# Fixed path to registry setup. This is the directory used by the registry.
|
|
||||||
PODMAN_REGISTRY_WORKDIR=/var/cache/local-registry
|
|
||||||
|
|
||||||
# Fixed port on which registry listens. This is hardcoded and must be
|
|
||||||
# shared knowledge among all CI repos that use this registry.
|
|
||||||
REGISTRY_PORT=60333
|
|
||||||
|
|
||||||
# Podman binary to run
|
|
||||||
PODMAN=${PODMAN:-/usr/bin/podman}
|
|
||||||
|
|
||||||
# Temporary directories for podman, so we don't clobber any system files.
|
|
||||||
# Wipe them upon script exit.
|
|
||||||
PODMAN_TMPROOT=$(mktemp -d --tmpdir $ME.XXXXXXX)
|
|
||||||
trap 'status=$?; rm -rf $PODMAN_TMPROOT && exit $status' 0
|
|
||||||
|
|
||||||
# Images to cache. Default prefix is "quay.io/libpod/"
|
|
||||||
#
|
|
||||||
# It seems evil to hardcode this list as part of the script itself
|
|
||||||
# instead of a separate file or resource but there's a good reason:
|
|
||||||
# keeping code and data together in one place makes it possible for
|
|
||||||
# a podman (and some day other repo?) developer to run a single
|
|
||||||
# command, contrib/cirrus/get-local-registry-script, which will
|
|
||||||
# fetch this script and allow the dev to run it to start a local
|
|
||||||
# registry on their system.
|
|
||||||
#
|
|
||||||
# As of 2024-07-02 this list includes podman and buildah images
|
|
||||||
#
|
|
||||||
# FIXME: periodically run this to look for no-longer-needed images:
|
|
||||||
#
|
|
||||||
# for i in $(sed -ne '/IMAGELIST=/,/^[^ ]/p' <cache_images/local-cache-registry | sed -ne 's/^ *//p');do grep -q -R $i ../podman/test ../buildah/tests || echo "unused $i";done
|
|
||||||
#
|
|
||||||
declare -a IMAGELIST=(
|
|
||||||
alpine:3.10.2
|
|
||||||
alpine:latest
|
|
||||||
alpine_healthcheck:latest
|
|
||||||
alpine_nginx:latest
|
|
||||||
alpine@sha256:634a8f35b5f16dcf4aaa0822adc0b1964bb786fca12f6831de8ddc45e5986a00
|
|
||||||
alpine@sha256:f270dcd11e64b85919c3bab66886e59d677cf657528ac0e4805d3c71e458e525
|
|
||||||
alpine@sha256:fa93b01658e3a5a1686dc3ae55f170d8de487006fb53a28efcd12ab0710a2e5f
|
|
||||||
autoupdatebroken:latest
|
|
||||||
badhealthcheck:latest
|
|
||||||
busybox:1.30.1
|
|
||||||
busybox:glibc
|
|
||||||
busybox:latest
|
|
||||||
busybox:musl
|
|
||||||
cirros:latest
|
|
||||||
fedora/python-311:latest
|
|
||||||
healthcheck:config-only
|
|
||||||
k8s-pause:3.5
|
|
||||||
podman_python:latest
|
|
||||||
redis:alpine
|
|
||||||
registry:2.8.2
|
|
||||||
registry:volume_omitted
|
|
||||||
systemd-image:20240124
|
|
||||||
testartifact:20250206-single
|
|
||||||
testartifact:20250206-multi
|
|
||||||
testartifact:20250206-multi-no-title
|
|
||||||
testartifact:20250206-evil
|
|
||||||
testdigest_v2s2
|
|
||||||
testdigest_v2s2:20200210
|
|
||||||
testimage:00000000
|
|
||||||
testimage:00000004
|
|
||||||
testimage:20221018
|
|
||||||
testimage:20241011
|
|
||||||
testimage:multiimage
|
|
||||||
testimage@sha256:1385ce282f3a959d0d6baf45636efe686c1e14c3e7240eb31907436f7bc531fa
|
|
||||||
testdigest_v2s2:20200210
|
|
||||||
testdigest_v2s2@sha256:755f4d90b3716e2bf57060d249e2cd61c9ac089b1233465c5c2cb2d7ee550fdb
|
|
||||||
volume-plugin-test-img:20220623
|
|
||||||
podman/stable:v4.3.1
|
|
||||||
podman/stable:v4.8.0
|
|
||||||
skopeo/stable:latest
|
|
||||||
ubuntu:latest
|
|
||||||
)
|
|
||||||
|
|
||||||
# END defaults
|
|
||||||
###############################################################################
|
|
||||||
# BEGIN help messages
|
|
||||||
|
|
||||||
missing=" argument is missing; see $ME -h for details"
|
|
||||||
usage="Usage: $ME [options] [initialize | cache IMAGE...]
|
|
||||||
|
|
||||||
$ME manages a local instance of a container registry.
|
|
||||||
|
|
||||||
When called to initialize a registry, $ME will pull
|
|
||||||
this image into a local temporary directory:
|
|
||||||
|
|
||||||
$PODMAN_REGISTRY_IMAGE
|
|
||||||
|
|
||||||
...then extract the registry binary and config, tweak the config,
|
|
||||||
start the registry, and populate it with a list of images needed by tests:
|
|
||||||
|
|
||||||
\$ $ME initialize
|
|
||||||
|
|
||||||
To fetch individual images into the cache:
|
|
||||||
|
|
||||||
\$ $ME cache libpod/testimage:21120101
|
|
||||||
|
|
||||||
Override the default image and/or port with:
|
|
||||||
|
|
||||||
-i IMAGE registry image to pull (default: $PODMAN_REGISTRY_IMAGE)
|
|
||||||
-P PORT port to bind to (on 127.0.0.1) (default: $REGISTRY_PORT)
|
|
||||||
|
|
||||||
Other options:
|
|
||||||
|
|
||||||
-h display usage message
|
|
||||||
"
|
|
||||||
|
|
||||||
die () {
|
|
||||||
echo "$ME: $*" >&2
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
# END help messages
|
|
||||||
###############################################################################
|
|
||||||
# BEGIN option processing
|
|
||||||
|
|
||||||
while getopts "i:P:hv" opt; do
|
|
||||||
case "$opt" in
|
|
||||||
i) PODMAN_REGISTRY_IMAGE=$OPTARG ;;
|
|
||||||
P) REGISTRY_PORT=$OPTARG ;;
|
|
||||||
h) echo "$usage"; exit 0;;
|
|
||||||
v) verbose=1 ;;
|
|
||||||
\?) echo "Run '$ME -h' for help" >&2; exit 1;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
shift $((OPTIND-1))
|
|
||||||
|
|
||||||
# END option processing
|
|
||||||
###############################################################################
|
|
||||||
# BEGIN helper functions
|
|
||||||
|
|
||||||
function podman() {
|
|
||||||
${PODMAN} --root ${PODMAN_TMPROOT}/root \
|
|
||||||
--runroot ${PODMAN_TMPROOT}/runroot \
|
|
||||||
--tmpdir ${PODMAN_TMPROOT}/tmp \
|
|
||||||
"$@"
|
|
||||||
}
|
|
||||||
|
|
||||||
###############
|
|
||||||
# must_pass # Run a command quietly; abort with error on failure
|
|
||||||
###############
|
|
||||||
function must_pass() {
|
|
||||||
local log=${PODMAN_TMPROOT}/log
|
|
||||||
|
|
||||||
"$@" &> $log
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "$ME: Command failed: $*" >&2
|
|
||||||
cat $log >&2
|
|
||||||
|
|
||||||
# If we ever get here, it's a given that the registry is not running.
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
###################
|
|
||||||
# wait_for_port # Returns once port is available on localhost
|
|
||||||
###################
|
|
||||||
function wait_for_port() {
|
|
||||||
local port=$1 # Numeric port
|
|
||||||
|
|
||||||
local host=127.0.0.1
|
|
||||||
local _timeout=5
|
|
||||||
|
|
||||||
# Wait
|
|
||||||
while [ $_timeout -gt 0 ]; do
|
|
||||||
{ exec {unused_fd}<> /dev/tcp/$host/$port; } &>/dev/null && return
|
|
||||||
sleep 1
|
|
||||||
_timeout=$(( $_timeout - 1 ))
|
|
||||||
done
|
|
||||||
|
|
||||||
die "Timed out waiting for port $port"
|
|
||||||
}
|
|
||||||
|
|
||||||
#################
|
|
||||||
# cache_image # (singular) fetch one remote image
|
|
||||||
#################
|
|
||||||
function cache_image() {
|
|
||||||
local img=$1
|
|
||||||
|
|
||||||
# Almost all our images are under libpod; no need to repeat that part
|
|
||||||
if ! expr "$img" : "^\(.*\)/" >/dev/null; then
|
|
||||||
img="libpod/$img"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Almost all our images are from quay.io, but "domain.tld" prefix overrides
|
|
||||||
registry=$(expr "$img" : "^\([^/.]\+\.[^/]\+\)/" || true)
|
|
||||||
if [[ -n "$registry" ]]; then
|
|
||||||
img=$(expr "$img" : "[^/]\+/\(.*\)")
|
|
||||||
else
|
|
||||||
registry=quay.io
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo
|
|
||||||
echo "...caching: $registry / $img"
|
|
||||||
|
|
||||||
# FIXME: inspect, and only pull if missing?
|
|
||||||
|
|
||||||
for retry in 1 2 3 0;do
|
|
||||||
skopeo --registries-conf /dev/null \
|
|
||||||
copy --all --dest-tls-verify=false \
|
|
||||||
docker://$registry/$img \
|
|
||||||
docker://127.0.0.1:${REGISTRY_PORT}/$img \
|
|
||||||
&& return
|
|
||||||
|
|
||||||
sleep $((retry * 30))
|
|
||||||
done
|
|
||||||
|
|
||||||
die "Too many retries; unable to cache $registry/$img"
|
|
||||||
}
|
|
||||||
|
|
||||||
##################
|
|
||||||
# cache_images # (plural) fetch all remote images
|
|
||||||
##################
|
|
||||||
function cache_images() {
|
|
||||||
for img in "${IMAGELIST[@]}"; do
|
|
||||||
cache_image "$img"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# END helper functions
|
|
||||||
###############################################################################
|
|
||||||
# BEGIN action processing
|
|
||||||
|
|
||||||
###################
|
|
||||||
# do_initialize # Start, then cache images
|
|
||||||
###################
|
|
||||||
#
|
|
||||||
# Intended to be run only from automation_images repo, or by developer
|
|
||||||
# on local workstation. This should never be run from podman/buildah/etc
|
|
||||||
# because it defeats the entire purpose of the cache -- a dead registry
|
|
||||||
# will cause this to fail.
|
|
||||||
#
|
|
||||||
function do_initialize() {
|
|
||||||
# This action can only be run as root
|
|
||||||
if [[ "$(id -u)" != "0" ]]; then
|
|
||||||
die "this script must be run as root"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# For the next few commands, die on any error
|
|
||||||
set -e
|
|
||||||
|
|
||||||
mkdir -p ${PODMAN_REGISTRY_WORKDIR}
|
|
||||||
|
|
||||||
# Copy of this script
|
|
||||||
if ! [[ $0 =~ ${PODMAN_REGISTRY_WORKDIR} ]]; then
|
|
||||||
rm -f ${PODMAN_REGISTRY_WORKDIR}/$ME
|
|
||||||
cp $0 ${PODMAN_REGISTRY_WORKDIR}/$ME
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Give it three tries, to compensate for flakes
|
|
||||||
podman pull ${PODMAN_REGISTRY_IMAGE} &>/dev/null ||
|
|
||||||
podman pull ${PODMAN_REGISTRY_IMAGE} &>/dev/null ||
|
|
||||||
must_pass podman pull ${PODMAN_REGISTRY_IMAGE}
|
|
||||||
|
|
||||||
# Mount the registry image...
|
|
||||||
registry_root=$(podman image mount ${PODMAN_REGISTRY_IMAGE})
|
|
||||||
|
|
||||||
# ...copy the registry binary into our own bin...
|
|
||||||
cp ${registry_root}/bin/registry /usr/bin/docker-registry
|
|
||||||
|
|
||||||
# ...and copy the config, making a few adjustments to it.
|
|
||||||
sed -e "s;/var/lib/registry;${PODMAN_REGISTRY_WORKDIR};" \
|
|
||||||
-e "s;:5000;127.0.0.1:${REGISTRY_PORT};" \
|
|
||||||
< ${registry_root}/etc/docker/registry/config.yml \
|
|
||||||
> /etc/local-registry.yml
|
|
||||||
podman image umount -a
|
|
||||||
|
|
||||||
# Create a systemd unit file. Enable it (so it starts at boot)
|
|
||||||
# and also start it --now.
|
|
||||||
cat > /etc/systemd/system/$ME.service <<EOF
|
|
||||||
[Unit]
|
|
||||||
Description=Local Cache Registry for CI tests
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
ExecStart=/usr/bin/docker-registry serve /etc/local-registry.yml
|
|
||||||
Type=exec
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
EOF
|
|
||||||
|
|
||||||
systemctl daemon-reload
|
|
||||||
systemctl enable --now $ME.service
|
|
||||||
|
|
||||||
wait_for_port ${REGISTRY_PORT}
|
|
||||||
|
|
||||||
cache_images
|
|
||||||
}
|
|
||||||
|
|
||||||
##############
|
|
||||||
# do_cache # Cache one or more images
|
|
||||||
##############
|
|
||||||
function do_cache() {
|
|
||||||
if [[ -z "$*" ]]; then
|
|
||||||
die "missing args to 'cache'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for img in "$@"; do
|
|
||||||
cache_image "$img"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
# END action processing
|
|
||||||
###############################################################################
|
|
||||||
# BEGIN command-line processing
|
|
||||||
|
|
||||||
# First command-line arg must be an action
|
|
||||||
action=${1?ACTION$missing}
|
|
||||||
shift
|
|
||||||
|
|
||||||
case "$action" in
|
|
||||||
init|initialize) do_initialize ;;
|
|
||||||
cache) do_cache "$@" ;;
|
|
||||||
*) die "Unknown action '$action'; must be init | cache IMAGE" ;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# END command-line processing
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
exit 0
|
|
|
@ -16,9 +16,18 @@ source "$REPO_DIRPATH/lib.sh"
|
||||||
# for both VM and container image build workflows.
|
# for both VM and container image build workflows.
|
||||||
req_env_vars PACKER_BUILD_NAME
|
req_env_vars PACKER_BUILD_NAME
|
||||||
|
|
||||||
warn "Upgrading Fedora '$OS_RELEASE_VER' to rawhide, this might break."
|
# Going from F38 -> rawhide requires some special handling WRT DNF upgrade to DNF5
|
||||||
# shellcheck disable=SC2154
|
if [[ "$OS_RELEASE_VER" -eq 38 ]]; then
|
||||||
warn "If so, this script may be found in the repo. as '$SCRIPT_DIRPATH/$SCRIPT_FILENAME'."
|
warn "Upgrading dnf -> dnf5"
|
||||||
|
showrun $SUDO dnf update -y dnf
|
||||||
|
showrun $SUDO dnf install -y dnf5
|
||||||
|
# Even dnf5 refuses to remove the 'dnf' package.
|
||||||
|
showrun $SUDO rpm -e yum dnf
|
||||||
|
else
|
||||||
|
warn "Upgrading Fedora '$OS_RELEASE_VER' to rawhide, this might break."
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
warn "If so, this script may be found in the repo. as '$SCRIPT_DIRPATH/$SCRIPT_FILENAME'."
|
||||||
|
fi
|
||||||
|
|
||||||
# Show what's happening
|
# Show what's happening
|
||||||
set -x
|
set -x
|
||||||
|
@ -30,9 +39,6 @@ $SUDO sed -i -r -e 's/^gpgcheck=.+/gpgcheck=0/' /etc/yum.repos.d/*.repo
|
||||||
$SUDO dnf5 -y distro-sync --releasever=rawhide --allowerasing
|
$SUDO dnf5 -y distro-sync --releasever=rawhide --allowerasing
|
||||||
$SUDO dnf5 upgrade -y
|
$SUDO dnf5 upgrade -y
|
||||||
|
|
||||||
# A shared fedora_packaging.sh script is called next that doesn't always support dnf5
|
|
||||||
$SUDO ln -s $(type -P dnf5) /usr/local/bin/dnf
|
|
||||||
|
|
||||||
# Packer will try to run 'cache_images/fedora_setup.sh' next, make sure the system
|
# Packer will try to run 'cache_images/fedora_setup.sh' next, make sure the system
|
||||||
# is actually running rawhide (and verify it boots).
|
# is actually running rawhide (and verify it boots).
|
||||||
$SUDO reboot
|
$SUDO reboot
|
||||||
|
|
|
@ -1,10 +1,5 @@
|
||||||
ARG BASE_NAME=registry.fedoraproject.org/fedora-minimal
|
ARG BASE_NAME=registry.fedoraproject.org/fedora-minimal
|
||||||
# FIXME FIXME FIXME! 2023-11-16: revert "38" to "latest"
|
ARG BASE_TAG=latest
|
||||||
# ...38 is because as of this moment, latest is 39, which
|
|
||||||
# has python-3.12, which causes something to barf:
|
|
||||||
# aiohttp/_websocket.c:3744:45: error: ‘PyLongObject’ {aka ‘struct _longobject’} has no member named ‘ob_digit’
|
|
||||||
# Possible cause: https://github.com/cython/cython/issues/5238
|
|
||||||
ARG BASE_TAG=38
|
|
||||||
FROM ${BASE_NAME}:${BASE_TAG} as updated_base
|
FROM ${BASE_NAME}:${BASE_TAG} as updated_base
|
||||||
|
|
||||||
RUN microdnf upgrade -y && \
|
RUN microdnf upgrade -y && \
|
||||||
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
{
|
||||||
|
"builds": [
|
||||||
|
{
|
||||||
|
"name": "fedora-podman-py",
|
||||||
|
"builder_type": "googlecompute",
|
||||||
|
"build_time": 1658176090,
|
||||||
|
"files": null,
|
||||||
|
"artifact_id": "fedora-podman-py-c5419329914142720",
|
||||||
|
"packer_run_uuid": "e5b1e6ab-37a5-a695-624d-47bf0060b272",
|
||||||
|
"custom_data": {
|
||||||
|
"IMG_SFX": "5419329914142720",
|
||||||
|
"STAGE": "cache"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"last_run_uuid": "e5b1e6ab-37a5-a695-624d-47bf0060b272"
|
||||||
|
}
|
|
@ -1,36 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# 2024-01-25 esm
|
|
||||||
# 2024-06-28 cevich
|
|
||||||
#
|
|
||||||
# This script is intended to be used by the `pre-commit` utility, or it may
|
|
||||||
# be manually copied (or symlinked) as local `.git/hooks/pre-push` file.
|
|
||||||
# It's purpose is to keep track of image-suffix values which have already
|
|
||||||
# been pushed, to avoid them being immediately rejected by CI validation.
|
|
||||||
# To use it with the `pre-commit` utility, simply add something like this
|
|
||||||
# to your `.pre-commit-config.yaml`:
|
|
||||||
#
|
|
||||||
# ---
|
|
||||||
# repos:
|
|
||||||
# - repo: https://github.com/containers/automation_images.git
|
|
||||||
# rev: <tag or commit sha>
|
|
||||||
# hooks:
|
|
||||||
# - id: check-imgsfx
|
|
||||||
|
|
||||||
set -eo pipefail
|
|
||||||
|
|
||||||
# Ensure CWD is the repo root
|
|
||||||
cd $(dirname "${BASH_SOURCE[0]}")
|
|
||||||
imgsfx=$(<IMG_SFX)
|
|
||||||
|
|
||||||
imgsfx_history=".git/hooks/imgsfx.history"
|
|
||||||
|
|
||||||
if [[ -e $imgsfx_history ]]; then
|
|
||||||
if grep -q "$imgsfx" $imgsfx_history; then
|
|
||||||
echo "FATAL: $imgsfx has already been used" >&2
|
|
||||||
echo "Please rerun 'make IMG_SFX'" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo $imgsfx >>$imgsfx_history
|
|
|
@ -1,4 +1,4 @@
|
||||||
# This Containerfile defines the environment for Cirrus-CI when
|
# This dockerfile defines the environment for Cirrus-CI when
|
||||||
# running automated checks and tests. It may also be used
|
# running automated checks and tests. It may also be used
|
||||||
# for development/debugging or manually building most
|
# for development/debugging or manually building most
|
||||||
# Makefile targets.
|
# Makefile targets.
|
||||||
|
@ -8,16 +8,16 @@ FROM registry.fedoraproject.org/fedora:${FEDORA_RELEASE}
|
||||||
ARG PACKER_VERSION
|
ARG PACKER_VERSION
|
||||||
MAINTAINER https://github.com/containers/automation_images/ci
|
MAINTAINER https://github.com/containers/automation_images/ci
|
||||||
|
|
||||||
ENV CIRRUS_WORKING_DIR=/var/tmp/automation_images \
|
ENV CIRRUS_WORKING_DIR=/tmp/automation_images \
|
||||||
PACKER_INSTALL_DIR=/usr/local/bin \
|
PACKER_INSTALL_DIR=/usr/local/bin \
|
||||||
PACKER_VERSION=$PACKER_VERSION \
|
PACKER_VERSION=$PACKER_VERSION \
|
||||||
CONTAINER=1
|
CONTAINER=1
|
||||||
|
|
||||||
# When using the containerfile-as-ci feature of Cirrus-CI, it's unsafe
|
# When using the dockerfile-as-ci feature of Cirrus-CI, it's unsafe
|
||||||
# to rely on COPY or ADD instructions. See documentation for warning.
|
# to rely on COPY or ADD instructions. See documentation for warning.
|
||||||
RUN test -n "$PACKER_VERSION"
|
RUN test -n "$PACKER_VERSION"
|
||||||
RUN dnf update -y && \
|
RUN dnf update -y && \
|
||||||
dnf -y mark dependency $(rpm -qa | grep -Ev '(gpg-pubkey)|(dnf)|(sudo)') && \
|
dnf mark remove $(rpm -qa | grep -Ev '(gpg-pubkey)|(dnf)|(sudo)') && \
|
||||||
dnf install -y \
|
dnf install -y \
|
||||||
ShellCheck \
|
ShellCheck \
|
||||||
bash-completion \
|
bash-completion \
|
||||||
|
@ -38,7 +38,7 @@ RUN dnf update -y && \
|
||||||
util-linux \
|
util-linux \
|
||||||
unzip \
|
unzip \
|
||||||
&& \
|
&& \
|
||||||
dnf -y mark user dnf sudo $_ && \
|
dnf mark install dnf sudo $_ && \
|
||||||
dnf autoremove -y && \
|
dnf autoremove -y && \
|
||||||
dnf clean all
|
dnf clean all
|
||||||
|
|
||||||
|
|
|
@ -35,14 +35,6 @@ if [[ -n "$AWS_INI" ]]; then
|
||||||
set_aws_filepath
|
set_aws_filepath
|
||||||
fi
|
fi
|
||||||
|
|
||||||
id
|
|
||||||
# FIXME: ssh-keygen seems to fail to create keys with Permission denied
|
|
||||||
# in the base_images make target, I have no idea why but all CI jobs are
|
|
||||||
# broken because of this. Let's try without selinux.
|
|
||||||
if [[ "$(getenforce)" == "Enforcing" ]]; then
|
|
||||||
setenforce 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
cd "$REPO_DIRPATH"
|
cd "$REPO_DIRPATH"
|
||||||
export IMG_SFX=$IMG_SFX
|
export IMG_SFX=$IMG_SFX
|
||||||
|
|
|
@ -44,6 +44,13 @@ SRC_FQIN="$TARGET_NAME:$IMG_SFX"
|
||||||
|
|
||||||
make "$TARGET_NAME" IMG_SFX=$IMG_SFX
|
make "$TARGET_NAME" IMG_SFX=$IMG_SFX
|
||||||
|
|
||||||
|
# Prevent pushing 'latest' images from PRs, only branches and tags
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
if [[ $PUSH_LATEST -eq 1 ]] && [[ -n "$CIRRUS_PR" ]]; then
|
||||||
|
echo -e "\nWarning: Refusing to push 'latest' images when testing from a PR.\n"
|
||||||
|
PUSH_LATEST=0
|
||||||
|
fi
|
||||||
|
|
||||||
# Don't leave credential file sticking around anywhere
|
# Don't leave credential file sticking around anywhere
|
||||||
trap "podman logout --all" EXIT INT CONT
|
trap "podman logout --all" EXIT INT CONT
|
||||||
set +x # protect username/password values
|
set +x # protect username/password values
|
||||||
|
@ -57,3 +64,9 @@ set -x # Easier than echo'ing out status for everything
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
podman tag "$SRC_FQIN" "$DEST_FQIN"
|
podman tag "$SRC_FQIN" "$DEST_FQIN"
|
||||||
podman push "$DEST_FQIN"
|
podman push "$DEST_FQIN"
|
||||||
|
|
||||||
|
if ((PUSH_LATEST)); then
|
||||||
|
LATEST_FQIN="${DEST_FQIN%:*}:latest"
|
||||||
|
podman tag "$SRC_FQIN" "$LATEST_FQIN"
|
||||||
|
podman push "$LATEST_FQIN"
|
||||||
|
fi
|
||||||
|
|
|
@ -1,36 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eo pipefail
|
|
||||||
|
|
||||||
if [[ -z "$CI" ]] || [[ "$CI" != "true" ]] || [[ -z "$IMG_SFX" ]]; then
|
|
||||||
echo "This script is intended to be run by CI and nowhere else."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# This envar is set by the CI system
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
if [[ "$CIRRUS_CHANGE_MESSAGE" =~ .*CI:DOCS.* ]]; then
|
|
||||||
echo "This script must never tag anything after a [CI:DOCS] PR merge"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Ensure no secrets leak via debugging var expansion
|
|
||||||
set +x
|
|
||||||
# This secret envar is set by the CI system
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
echo "$REG_PASSWORD" | \
|
|
||||||
skopeo login --password-stdin --username "$REG_USERNAME" "$REGPFX"
|
|
||||||
|
|
||||||
declare -a imgnames
|
|
||||||
imgnames=( imgts imgobsolete imgprune gcsupld get_ci_vm orphanvms ccia )
|
|
||||||
# A [CI:TOOLING] build doesn't produce CI VM images
|
|
||||||
if [[ ! "$CIRRUS_CHANGE_MESSAGE" =~ .*CI:TOOLING.* ]]; then
|
|
||||||
imgnames+=( skopeo_cidev fedora_podman prior-fedora_podman )
|
|
||||||
fi
|
|
||||||
|
|
||||||
for imgname in "${imgnames[@]}"; do
|
|
||||||
echo "##### Tagging $imgname -> latest"
|
|
||||||
# IMG_SFX is defined by CI system
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
skopeo copy "docker://$REGPFX/$imgname:c${IMG_SFX}" "docker://$REGPFX/${imgname}:latest"
|
|
||||||
done
|
|
|
@ -13,7 +13,7 @@ REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||||
# shellcheck source=./lib.sh
|
# shellcheck source=./lib.sh
|
||||||
source "$REPO_DIRPATH/lib.sh"
|
source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
req_env_vars CIRRUS_PR CIRRUS_PR_TITLE CIRRUS_USER_PERMISSION CIRRUS_BASE_BRANCH
|
req_env_vars CIRRUS_PR CIRRUS_BASE_SHA CIRRUS_PR_TITLE
|
||||||
|
|
||||||
show_env_vars
|
show_env_vars
|
||||||
|
|
||||||
|
@ -21,16 +21,6 @@ show_env_vars
|
||||||
[[ "$CIRRUS_CI" == "true" ]] || \
|
[[ "$CIRRUS_CI" == "true" ]] || \
|
||||||
die "This script is only/ever intended to be run by Cirrus-CI."
|
die "This script is only/ever intended to be run by Cirrus-CI."
|
||||||
|
|
||||||
# This is imperfect security-wise, but attempt to catch an accidental
|
|
||||||
# change in Cirrus-CI Repository settings. Namely the hard-to-read
|
|
||||||
# "slider" that enables non-contributors to run jobs. We don't want
|
|
||||||
# that on this repo, ever. because there are sensitive secrets in use.
|
|
||||||
# This variable is set by CI and validated non-empty above
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
if [[ "$CIRRUS_USER_PERMISSION" != "write" ]] && [[ "$CIRRUS_USER_PERMISSION" != "admin" ]]; then
|
|
||||||
die "CI Execution not supported with permission level '$CIRRUS_USER_PERMISSION'"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for target in image_builder/gce.json base_images/cloud.json \
|
for target in image_builder/gce.json base_images/cloud.json \
|
||||||
cache_images/cloud.json win_images/win-server-wsl.json; do
|
cache_images/cloud.json win_images/win-server-wsl.json; do
|
||||||
if ! make $target; then
|
if ! make $target; then
|
||||||
|
@ -52,20 +42,17 @@ if [[ "$CIRRUS_PR_TITLE" =~ CI:DOCS ]]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Fix "Not a valid object name main" error from Cirrus's
|
# Variable is defined by Cirrus-CI at runtime
|
||||||
# incomplete checkout.
|
|
||||||
git remote update origin
|
|
||||||
# Determine where PR branched off of $CIRRUS_BASE_BRANCH
|
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
base_sha=$(git merge-base origin/${CIRRUS_BASE_BRANCH:-main} HEAD)
|
if ! git diff --name-only ${CIRRUS_BASE_SHA}..HEAD | grep -q IMG_SFX; then
|
||||||
|
|
||||||
if ! git diff --name-only ${base_sha}..HEAD | grep -q IMG_SFX; then
|
|
||||||
die "Every PR that builds images must include an updated IMG_SFX file.
|
die "Every PR that builds images must include an updated IMG_SFX file.
|
||||||
Simply run 'make IMG_SFX', commit the result, and re-push."
|
Simply run 'make IMG_SFX', commit the result, and re-push."
|
||||||
else
|
else
|
||||||
IMG_SFX="$(<./IMG_SFX)"
|
IMG_SFX="$(<./IMG_SFX)"
|
||||||
# IMG_SFX was modified vs PR's base-branch, confirm version moved forward
|
# IMG_SFX was modified vs PR's base-branch, confirm version moved forward
|
||||||
v_prev=$(git show ${base_sha}:IMG_SFX 2>&1 || true)
|
# shellcheck disable=SC2154
|
||||||
|
v_prev=$(git show ${CIRRUS_BASE_SHA}:IMG_SFX 2>&1 || true)
|
||||||
# Verify new IMG_SFX value always version-sorts later than previous value.
|
# Verify new IMG_SFX value always version-sorts later than previous value.
|
||||||
# This prevents screwups due to local timezone, bad, or unset clocks, etc.
|
# This prevents screwups due to local timezone, bad, or unset clocks, etc.
|
||||||
new_img_ver=$(awk -F 't' '{print $1"."$2}'<<<"$IMG_SFX" | cut -dz -f1)
|
new_img_ver=$(awk -F 't' '{print $1"."$2}'<<<"$IMG_SFX" | cut -dz -f1)
|
||||||
|
|
|
@ -1,43 +0,0 @@
|
||||||
# See https://pre-commit.com for more information
|
|
||||||
# See https://pre-commit.com/hooks.html for more hooks
|
|
||||||
repos:
|
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
|
||||||
rev: v4.6.0
|
|
||||||
hooks:
|
|
||||||
- id: trailing-whitespace
|
|
||||||
- id: end-of-file-fixer
|
|
||||||
- id: check-yaml
|
|
||||||
- id: check-added-large-files
|
|
||||||
- id: check-symlinks
|
|
||||||
- id: mixed-line-ending
|
|
||||||
- id: no-commit-to-branch
|
|
||||||
args: [--branch, main]
|
|
||||||
- repo: https://github.com/codespell-project/codespell
|
|
||||||
rev: v2.3.0
|
|
||||||
hooks:
|
|
||||||
- id: codespell
|
|
||||||
args: [--config, .codespellrc]
|
|
||||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
|
||||||
rev: 3.0.0
|
|
||||||
hooks:
|
|
||||||
- id: forbid-binary
|
|
||||||
exclude: >
|
|
||||||
(?x)^(
|
|
||||||
get_ci_vm/good_repo_test/dot_git.tar.gz
|
|
||||||
)$
|
|
||||||
- id: script-must-have-extension
|
|
||||||
- id: shellcheck
|
|
||||||
# These come from ci/shellcheck.sh
|
|
||||||
args:
|
|
||||||
- --color=always
|
|
||||||
- --format=tty
|
|
||||||
- --shell=bash
|
|
||||||
- --external-sources
|
|
||||||
- --enable=add-default-case,avoid-nullary-conditions,check-unassigned-uppercase
|
|
||||||
- --exclude=SC2046,SC2034,SC2090,SC2064
|
|
||||||
- --wiki-link-count=0
|
|
||||||
- --severity=warning
|
|
||||||
- repo: https://github.com/containers/automation_images.git
|
|
||||||
rev: 2e5a2acfe21cc4b13511b453733b8875e592ad9c
|
|
||||||
hooks:
|
|
||||||
- id: check-imgsfx
|
|
|
@ -1,13 +1,14 @@
|
||||||
# This is a listing of Google Cloud Platform Project IDs for
|
# This is a listing of GCP Project IDs which use images produced by
|
||||||
# orphan VM monitoring and possibly other automation tasks.
|
# this repo. It's used by the "Orphan VMs" github action to monitor
|
||||||
# Note: CI VM images produced by this repo are all stored within
|
# for any leftover/lost VMs.
|
||||||
# the libpod-218412 project (in addition to some AWS EC2)
|
|
||||||
buildah
|
buildah
|
||||||
conmon-222014
|
conmon-222014
|
||||||
containers-build-source-image
|
containers-build-source-image
|
||||||
|
dnsname-8675309
|
||||||
libpod-218412
|
libpod-218412
|
||||||
netavark-2021
|
netavark-2021
|
||||||
oci-seccomp-bpf-hook
|
oci-seccomp-bpf-hook
|
||||||
|
podman-py
|
||||||
skopeo
|
skopeo
|
||||||
storage-240716
|
storage-240716
|
||||||
udica-247612
|
udica-247612
|
||||||
|
|
|
@ -5,36 +5,6 @@ This directory contains the source for building [the
|
||||||
This image image is used by many containers-org repos. `hack/get_ci_vm.sh` script.
|
This image image is used by many containers-org repos. `hack/get_ci_vm.sh` script.
|
||||||
It is not intended to be called via any other mechanism.
|
It is not intended to be called via any other mechanism.
|
||||||
|
|
||||||
In general/high-level terms, the architecture and operation is:
|
|
||||||
|
|
||||||
1. [containers/automation hosts cirrus-ci_env](https://github.com/containers/automation/tree/main/cirrus-ci_env),
|
|
||||||
a python mini-implementation of a `.cirrus.yml` parser. It's only job is to extract all required envars,
|
|
||||||
given a task name (including from a matrix element). It's highly dependent on
|
|
||||||
[certain YAML formatting requirements](README.md#downstream-repository-cirrusyml-requirements). If the target
|
|
||||||
repo. doesn't follow those standards, nasty/ugly python errors will vomit forth. Mainly this has to do with
|
|
||||||
Cirrus-CI's use of a non-standard YAML parser, allowing things like certain duplicate dictionary keys.
|
|
||||||
1. [containers/automation_images hosts get_ci_vm](https://github.com/containers/automation_images/tree/main/get_ci_vm),
|
|
||||||
a bundling of the `cirrus-ci_env` python script with an `entrypoint.sh` script inside a container image.
|
|
||||||
1. When a user runs `hack/get_ci_vm.sh` inside a target repo, the container image is entered, and `.cirrus.yml`
|
|
||||||
is parsed based on the CLI task-name. A VM is then provisioned based on specific envars (see the "Env. Vars."
|
|
||||||
entries in the sections for [APIv1](README.md#env-vars) and [APIv2](README.md#env-vars-1) sections below).
|
|
||||||
This is the most complex part of the process.
|
|
||||||
1. The remote system will not have **any** of the otherwise automatic Cirrus-CI operations performed (like "clone")
|
|
||||||
nor any magic CI variables defined. Having a VM ready, the container entrypoint script transfers a copy of
|
|
||||||
the local repo (including any uncommited changes).
|
|
||||||
1. The container entrypoint script then performs **_remote_** execution of the `hack/get_ci_vm.sh` script
|
|
||||||
including the magic `--setup` parameter. Though it varies by repo, typically this will establish everything
|
|
||||||
necessary to simulate a CI environment, via a call to the repo's own `setup.sh` or equivalent. Typically
|
|
||||||
The repo's setup scripts will persist any required envars into a `/etc/ci_environment` or similar. Though
|
|
||||||
this isn't universal.
|
|
||||||
1. Lastly, the user is dropped into a shell on the VM, inside the repo copy, with all envars defined and
|
|
||||||
ready to start running tests.
|
|
||||||
|
|
||||||
_Note_: If there are any envars found to be missing, they must be defined by updating either the repo normal CI
|
|
||||||
setup scripts (preferred), or in the `hack/get_ci_vm.sh` `--setup` section.
|
|
||||||
|
|
||||||
# Building
|
|
||||||
|
|
||||||
Example build (from repository root):
|
Example build (from repository root):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
|
@ -66,9 +66,9 @@ delvm() {
|
||||||
}
|
}
|
||||||
|
|
||||||
image_hints() {
|
image_hints() {
|
||||||
_BIS=$(grep -E -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' \
|
_BIS=$(egrep -m 1 '_BUILT_IMAGE_SUFFIX:[[:space:]+"[[:print:]]+"' \
|
||||||
"$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]')
|
"$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]')
|
||||||
grep -E '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \
|
egrep '[[:space:]]+[[:alnum:]].+_CACHE_IMAGE_NAME:[[:space:]+"[[:print:]]+"' \
|
||||||
"$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \
|
"$SECCOMPHOOKROOT/.cirrus.yml" | cut -d: -f 2 | tr -d '"[:blank:]' | \
|
||||||
sed -r -e "s/\\\$[{]_BUILT_IMAGE_SUFFIX[}]/$_BIS/" | sort -u
|
sed -r -e "s/\\\$[{]_BUILT_IMAGE_SUFFIX[}]/$_BIS/" | sort -u
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ cd $SECCOMPHOOKROOT
|
||||||
|
|
||||||
# Attempt to determine if named 'oci-seccomp-bpf-hook' gcloud configuration exists
|
# Attempt to determine if named 'oci-seccomp-bpf-hook' gcloud configuration exists
|
||||||
showrun $PGCLOUD info > $TMPDIR/gcloud-info
|
showrun $PGCLOUD info > $TMPDIR/gcloud-info
|
||||||
if grep -E -q "Account:.*None" $TMPDIR/gcloud-info
|
if egrep -q "Account:.*None" $TMPDIR/gcloud-info
|
||||||
then
|
then
|
||||||
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for 'oci-seccomp-bpf-hook', running init.${NOR}"
|
echo -e "\n${YEL}WARNING: Can't find gcloud configuration for 'oci-seccomp-bpf-hook', running init.${NOR}"
|
||||||
echo -e " ${RED}Please choose '#1: Re-initialize' and 'login' if asked.${NOR}"
|
echo -e " ${RED}Please choose '#1: Re-initialize' and 'login' if asked.${NOR}"
|
||||||
|
@ -151,7 +151,7 @@ then
|
||||||
|
|
||||||
# Verify it worked (account name == someone@example.com)
|
# Verify it worked (account name == someone@example.com)
|
||||||
$PGCLOUD info > $TMPDIR/gcloud-info-after-init
|
$PGCLOUD info > $TMPDIR/gcloud-info-after-init
|
||||||
if grep -E -q "Account:.*None" $TMPDIR/gcloud-info-after-init
|
if egrep -q "Account:.*None" $TMPDIR/gcloud-info-after-init
|
||||||
then
|
then
|
||||||
echo -e "${RED}ERROR: Could not initialize 'oci-seccomp-bpf-hook' configuration in gcloud.${NOR}"
|
echo -e "${RED}ERROR: Could not initialize 'oci-seccomp-bpf-hook' configuration in gcloud.${NOR}"
|
||||||
exit 5
|
exit 5
|
||||||
|
|
|
@ -235,7 +235,7 @@ has_valid_aws_credentials() {
|
||||||
_awsoutput=$($AWSCLI configure list 2>&1 || true)
|
_awsoutput=$($AWSCLI configure list 2>&1 || true)
|
||||||
dbg "$AWSCLI configure list"
|
dbg "$AWSCLI configure list"
|
||||||
dbg "$_awsoutput"
|
dbg "$_awsoutput"
|
||||||
if grep -E -qx 'The config profile.+could not be found'<<<"$_awsoutput"; then
|
if egrep -qx 'The config profile.+could not be found'<<<"$_awsoutput"; then
|
||||||
dbg "AWS config/credentials are missing"
|
dbg "AWS config/credentials are missing"
|
||||||
return 1
|
return 1
|
||||||
elif [[ ! -r "$EC2_SSH_KEY" ]] || [[ ! -r "${EC2_SSH_KEY}.pub" ]]; then
|
elif [[ ! -r "$EC2_SSH_KEY" ]] || [[ ! -r "${EC2_SSH_KEY}.pub" ]]; then
|
||||||
|
@ -413,9 +413,6 @@ make_setup_tarball() {
|
||||||
status "Preparing setup tarball for instance."
|
status "Preparing setup tarball for instance."
|
||||||
req_env_vars DESTDIR _TMPDIR SRCDIR UPSTREAM_REPO
|
req_env_vars DESTDIR _TMPDIR SRCDIR UPSTREAM_REPO
|
||||||
mkdir -p "${_TMPDIR}$DESTDIR"
|
mkdir -p "${_TMPDIR}$DESTDIR"
|
||||||
# Mark the volume-mounted source repo as safe system-wide (w/in the container)
|
|
||||||
git config --global --add safe.directory "$SRCDIR"
|
|
||||||
git config --global --add safe.directory "$SRCDIR/.git"
|
|
||||||
# We have no way of knowing what state or configuration the user's
|
# We have no way of knowing what state or configuration the user's
|
||||||
# local repository is in. Work from a local clone, so we can
|
# local repository is in. Work from a local clone, so we can
|
||||||
# specify our own setup and prevent unexpected script breakage.
|
# specify our own setup and prevent unexpected script breakage.
|
||||||
|
|
|
@ -2,9 +2,9 @@
|
||||||
|
|
||||||
# This script is intended to be executed as part of the container
|
# This script is intended to be executed as part of the container
|
||||||
# image build process. Using it under any other context is virtually
|
# image build process. Using it under any other context is virtually
|
||||||
# guaranteed to cause you much pain and suffering.
|
# guarantied to cause you much pain and suffering.
|
||||||
|
|
||||||
set -xeo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
||||||
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
||||||
|
@ -14,7 +14,6 @@ source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
declare -a PKGS
|
declare -a PKGS
|
||||||
PKGS=( \
|
PKGS=( \
|
||||||
aws-cli
|
|
||||||
coreutils
|
coreutils
|
||||||
curl
|
curl
|
||||||
gawk
|
gawk
|
||||||
|
@ -31,7 +30,9 @@ apk upgrade
|
||||||
apk add --no-cache "${PKGS[@]}"
|
apk add --no-cache "${PKGS[@]}"
|
||||||
rm -rf /var/cache/apk/*
|
rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
aws --version # Confirm that aws actually runs
|
pip3 install --upgrade pip
|
||||||
|
pip3 install --no-cache-dir awscli
|
||||||
|
aws --version # Confirm it actually runs
|
||||||
|
|
||||||
install_automation_tooling cirrus-ci_env
|
install_automation_tooling cirrus-ci_env
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ testf() {
|
||||||
echo "# $@" > /dev/stderr
|
echo "# $@" > /dev/stderr
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Using grep -E vs file safer than shell builtin test
|
# Using egrep vs file safer than shell builtin test
|
||||||
local a_out_f
|
local a_out_f
|
||||||
local a_exit=0
|
local a_exit=0
|
||||||
a_out_f=$(mktemp -p '' "tmp_${FUNCNAME[0]}_XXXXXXXX")
|
a_out_f=$(mktemp -p '' "tmp_${FUNCNAME[0]}_XXXXXXXX")
|
||||||
|
@ -109,7 +109,7 @@ testf() {
|
||||||
if ((TEST_DEBUG)); then
|
if ((TEST_DEBUG)); then
|
||||||
echo "Received $(wc -l $a_out_f | awk '{print $1}') output lines of $(wc -c $a_out_f | awk '{print $1}') bytes total"
|
echo "Received $(wc -l $a_out_f | awk '{print $1}') output lines of $(wc -c $a_out_f | awk '{print $1}') bytes total"
|
||||||
fi
|
fi
|
||||||
if grep -E -q "$e_out_re" "${a_out_f}.oneline"; then
|
if egrep -q "$e_out_re" "${a_out_f}.oneline"; then
|
||||||
_test_report "Command $1 exited as expected with expected output" "0" "$a_out_f"
|
_test_report "Command $1 exited as expected with expected output" "0" "$a_out_f"
|
||||||
else
|
else
|
||||||
_test_report "Expecting regex '$e_out_re' match to (whitespace-squashed) output" "1" "$a_out_f"
|
_test_report "Expecting regex '$e_out_re' match to (whitespace-squashed) output" "1" "$a_out_f"
|
||||||
|
|
|
@ -67,7 +67,7 @@ else
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Support both '.CHECKSUM' and '-CHECKSUM' at the end
|
# Support both '.CHECKSUM' and '-CHECKSUM' at the end
|
||||||
filename=$(grep -E -i -m 1 -- "$extension$" <<<"$by_arch" || true)
|
filename=$(egrep -i -m 1 -- "$extension$" <<<"$by_arch" || true)
|
||||||
[[ -n "$filename" ]] || \
|
[[ -n "$filename" ]] || \
|
||||||
die "No '$extension' targets among $by_arch"
|
die "No '$extension' targets among $by_arch"
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# at the root of this repository. It should be built with
|
# at the root of this repository. It should be built with
|
||||||
# the repository root as the context directory.
|
# the repository root as the context directory.
|
||||||
|
|
||||||
ARG CENTOS_STREAM_RELEASE=9
|
ARG CENTOS_STREAM_RELEASE=8
|
||||||
FROM quay.io/centos/centos:stream${CENTOS_STREAM_RELEASE}
|
FROM quay.io/centos/centos:stream${CENTOS_STREAM_RELEASE}
|
||||||
ARG PACKER_VERSION
|
ARG PACKER_VERSION
|
||||||
MAINTAINER https://github.com/containers/automation_images/image_builder
|
MAINTAINER https://github.com/containers/automation_images/image_builder
|
||||||
|
|
|
@ -45,16 +45,16 @@ provisioners:
|
||||||
- type: 'shell'
|
- type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- 'mkdir -p /var/tmp/automation_images'
|
- 'mkdir -p /tmp/automation_images'
|
||||||
|
|
||||||
- type: 'file'
|
- type: 'file'
|
||||||
source: '{{ pwd }}/'
|
source: '{{ pwd }}/'
|
||||||
destination: '/var/tmp/automation_images/'
|
destination: '/tmp/automation_images/'
|
||||||
|
|
||||||
- type: 'shell'
|
- type: 'shell'
|
||||||
inline:
|
inline:
|
||||||
- 'set -e'
|
- 'set -e'
|
||||||
- '/bin/bash /var/tmp/automation_images/image_builder/setup.sh'
|
- '/bin/bash /tmp/automation_images/image_builder/setup.sh'
|
||||||
|
|
||||||
post-processors:
|
post-processors:
|
||||||
# Must be double-nested to guarantee execution order
|
# Must be double-nested to guarantee execution order
|
||||||
|
|
|
@ -1,9 +1,16 @@
|
||||||
# Copy-pasted from https://cloud.google.com/sdk/docs/install#red-hatfedoracentos
|
[google-compute-engine]
|
||||||
|
name=Google Compute Engine
|
||||||
[google-cloud-cli]
|
baseurl=https://packages.cloud.google.com/yum/repos/google-compute-engine-el8-x86_64-stable
|
||||||
name=Google Cloud CLI
|
|
||||||
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el9-x86_64
|
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
repo_gpgcheck=0
|
repo_gpgcheck=1
|
||||||
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||||
|
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
||||||
|
[google-cloud-sdk]
|
||||||
|
name=Google Cloud SDK
|
||||||
|
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el8-x86_64
|
||||||
|
enabled=1
|
||||||
|
gpgcheck=1
|
||||||
|
repo_gpgcheck=1
|
||||||
|
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||||
|
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
||||||
|
|
|
@ -23,19 +23,6 @@ source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
dnf update -y
|
dnf update -y
|
||||||
dnf -y install epel-release
|
dnf -y install epel-release
|
||||||
# Allow erasing pre-installed curl-minimal package
|
dnf install -y $(<"$INST_PKGS_FP")
|
||||||
dnf install -y --allowerasing $(<"$INST_PKGS_FP")
|
|
||||||
|
|
||||||
# As of 2024-04-24 installing the EPEL `awscli` package results in error:
|
|
||||||
# nothing provides python3.9dist(docutils) >= 0.10
|
|
||||||
# Grab the binary directly from amazon instead
|
|
||||||
# https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html
|
|
||||||
AWSURL="https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip"
|
|
||||||
cd /tmp
|
|
||||||
curl --fail --location -O "${AWSURL}"
|
|
||||||
# There's little reason to see every single file extracted
|
|
||||||
unzip -q awscli*.zip
|
|
||||||
./aws/install -i /usr/local/share/aws-cli -b /usr/local/bin
|
|
||||||
rm -rf awscli*.zip ./aws
|
|
||||||
|
|
||||||
install_automation_tooling
|
install_automation_tooling
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
awscli
|
||||||
buildah
|
buildah
|
||||||
bash-completion
|
bash-completion
|
||||||
curl
|
curl
|
||||||
|
@ -5,13 +6,12 @@ findutils
|
||||||
gawk
|
gawk
|
||||||
genisoimage
|
genisoimage
|
||||||
git
|
git
|
||||||
google-cloud-cli
|
google-cloud-sdk
|
||||||
jq
|
jq
|
||||||
libvirt
|
libvirt
|
||||||
libvirt-admin
|
libvirt-admin
|
||||||
libvirt-client
|
libvirt-client
|
||||||
libvirt-daemon
|
libvirt-daemon
|
||||||
libxcrypt-compat
|
|
||||||
make
|
make
|
||||||
openssh
|
openssh
|
||||||
openssl
|
openssl
|
||||||
|
@ -24,7 +24,6 @@ rng-tools
|
||||||
rootfiles
|
rootfiles
|
||||||
rsync
|
rsync
|
||||||
sed
|
sed
|
||||||
skopeo
|
|
||||||
tar
|
tar
|
||||||
unzip
|
unzip
|
||||||
util-linux
|
util-linux
|
||||||
|
|
|
@ -11,13 +11,13 @@ set -eo pipefail
|
||||||
# shellcheck source=imgts/lib_entrypoint.sh
|
# shellcheck source=imgts/lib_entrypoint.sh
|
||||||
source /usr/local/bin/lib_entrypoint.sh
|
source /usr/local/bin/lib_entrypoint.sh
|
||||||
|
|
||||||
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX
|
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI
|
||||||
|
|
||||||
gcloud_init
|
gcloud_init
|
||||||
|
|
||||||
# Set this to 1 for testing
|
# Set this to 1 for testing
|
||||||
DRY_RUN="${DRY_RUN:-0}"
|
DRY_RUN="${DRY_RUN:-0}"
|
||||||
OBSOLETE_LIMIT=50
|
OBSOLETE_LIMIT=10
|
||||||
THEFUTURE=$(date --date='+1 hour' +%s)
|
THEFUTURE=$(date --date='+1 hour' +%s)
|
||||||
TOO_OLD_DAYS='30'
|
TOO_OLD_DAYS='30'
|
||||||
TOO_OLD_DESC="$TOO_OLD_DAYS days ago"
|
TOO_OLD_DESC="$TOO_OLD_DAYS days ago"
|
||||||
|
@ -40,8 +40,8 @@ $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
|
||||||
count_image
|
count_image
|
||||||
reason=""
|
reason=""
|
||||||
created_ymd=$(date --date=$creationTimestamp --iso-8601=date)
|
created_ymd=$(date --date=$creationTimestamp --iso-8601=date)
|
||||||
permanent=$(grep -E --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
|
permanent=$(egrep --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
|
||||||
last_used=$(grep -E --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true)
|
last_used=$(egrep --only-matching --max-count=1 'last-used=[[:digit:]]+' <<< $labels || true)
|
||||||
|
|
||||||
LABELSFX="labels: '$labels'"
|
LABELSFX="labels: '$labels'"
|
||||||
|
|
||||||
|
@ -54,14 +54,6 @@ $GCLOUD compute images list --format="$FORMAT" --filter="$FILTER" | \
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Any image matching the currently in-use IMG_SFX must always be preserved
|
|
||||||
# Value is defined in cirrus.yml
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
if [[ "$name" =~ $IMG_SFX ]]; then
|
|
||||||
msg "Retaining current (latest) image $name | $labels"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# No label was set
|
# No label was set
|
||||||
if [[ -z "$last_used" ]]
|
if [[ -z "$last_used" ]]
|
||||||
then # image lacks any tracking labels
|
then # image lacks any tracking labels
|
||||||
|
@ -147,9 +139,9 @@ for (( i=nr_amis ; i ; i-- )); do
|
||||||
done
|
done
|
||||||
|
|
||||||
unset automation permanent reason
|
unset automation permanent reason
|
||||||
automation=$(grep -E --only-matching --max-count=1 \
|
automation=$(egrep --only-matching --max-count=1 \
|
||||||
--ignore-case 'automation=true' <<< $tags || true)
|
--ignore-case 'automation=true' <<< $tags || true)
|
||||||
permanent=$(grep -E --only-matching --max-count=1 \
|
permanent=$(egrep --only-matching --max-count=1 \
|
||||||
--ignore-case 'permanent=true' <<< $tags || true)
|
--ignore-case 'permanent=true' <<< $tags || true)
|
||||||
|
|
||||||
if [[ -n "$permanent" ]]; then
|
if [[ -n "$permanent" ]]; then
|
||||||
|
@ -159,14 +151,6 @@ for (( i=nr_amis ; i ; i-- )); do
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Any image matching the currently in-use IMG_SFX
|
|
||||||
# must always be preserved. Values are defined in cirrus.yml
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
if [[ "$name" =~ $IMG_SFX ]]; then
|
|
||||||
msg "Retaining current (latest) image $name | $tags"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# For IAM (security) policy, an "automation" tag is always required
|
# For IAM (security) policy, an "automation" tag is always required
|
||||||
if [[ -z "$automation" ]]
|
if [[ -z "$automation" ]]
|
||||||
then
|
then
|
||||||
|
@ -201,15 +185,14 @@ for (( i=nr_amis ; i ; i-- )); do
|
||||||
done
|
done
|
||||||
|
|
||||||
COUNT=$(<"$IMGCOUNT")
|
COUNT=$(<"$IMGCOUNT")
|
||||||
CANDIDATES=$(wc -l <$TOOBSOLETE)
|
|
||||||
msg "########################################################################"
|
msg "########################################################################"
|
||||||
msg "Obsoleting $OBSOLETE_LIMIT random image candidates ($CANDIDATES/$COUNT total):"
|
msg "Obsoleting $OBSOLETE_LIMIT random images of $COUNT examined:"
|
||||||
|
|
||||||
# Require a minimum number of images to exist. Also if there is some
|
# Require a minimum number of images to exist. Also if there is some
|
||||||
# horrible scripting accident, this limits the blast-radius.
|
# horrible scripting accident, this limits the blast-radius.
|
||||||
if [[ "$CANDIDATES" -lt $OBSOLETE_LIMIT ]]
|
if [[ "$COUNT" -lt $OBSOLETE_LIMIT ]]
|
||||||
then
|
then
|
||||||
die 0 "Safety-net Insufficient images ($CANDIDATES) to process ($OBSOLETE_LIMIT required)"
|
die 0 "Safety-net Insufficient images ($COUNT) to process ($OBSOLETE_LIMIT required)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Don't let one bad apple ruin the whole bunch
|
# Don't let one bad apple ruin the whole bunch
|
||||||
|
|
|
@ -11,14 +11,14 @@ set -e
|
||||||
# shellcheck source=imgts/lib_entrypoint.sh
|
# shellcheck source=imgts/lib_entrypoint.sh
|
||||||
source /usr/local/bin/lib_entrypoint.sh
|
source /usr/local/bin/lib_entrypoint.sh
|
||||||
|
|
||||||
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI IMG_SFX
|
req_env_vars GCPJSON GCPNAME GCPPROJECT AWSINI
|
||||||
|
|
||||||
gcloud_init
|
gcloud_init
|
||||||
|
|
||||||
# Set this to 1 for testing
|
# Set this to 1 for testing
|
||||||
DRY_RUN="${DRY_RUN:-0}"
|
DRY_RUN="${DRY_RUN:-0}"
|
||||||
# For safety's sake limit nr deletions
|
# For safety's sake limit nr deletions
|
||||||
DELETE_LIMIT=50
|
DELETE_LIMIT=10
|
||||||
ABOUTNOW=$(date --iso-8601=date) # precision is not needed for this use
|
ABOUTNOW=$(date --iso-8601=date) # precision is not needed for this use
|
||||||
# Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats
|
# Format Ref: https://cloud.google.com/sdk/gcloud/reference/topic/formats
|
||||||
# Field list from `gcloud compute images list --limit=1 --format=text`
|
# Field list from `gcloud compute images list --limit=1 --format=text`
|
||||||
|
@ -39,20 +39,11 @@ $GCLOUD compute images list --show-deprecated \
|
||||||
do
|
do
|
||||||
count_image
|
count_image
|
||||||
reason=""
|
reason=""
|
||||||
permanent=$(grep -E --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
|
permanent=$(egrep --only-matching --max-count=1 --ignore-case 'permanent=true' <<< $labels || true)
|
||||||
[[ -z "$permanent" ]] || \
|
[[ -z "$permanent" ]] || \
|
||||||
die 1 "Refusing to delete a deprecated image labeled permanent=true. Please use gcloud utility to set image active, then research the cause of deprecation."
|
die 1 "Refusing to delete a deprecated image labeled permanent=true. Please use gcloud utility to set image active, then research the cause of deprecation."
|
||||||
[[ "$dep_state" == "OBSOLETE" ]] || \
|
[[ "$dep_state" == "OBSOLETE" ]] || \
|
||||||
die 1 "Unexpected depreciation-state encountered for $name: $dep_state; labels: $labels"
|
die 1 "Unexpected depreciation-state encountered for $name: $dep_state; labels: $labels"
|
||||||
|
|
||||||
# Any image matching the currently in-use IMG_SFX must always be preserved.
|
|
||||||
# Values are defined in cirrus.yml
|
|
||||||
# shellcheck disable=SC2154
|
|
||||||
if [[ "$name" =~ $IMG_SFX ]]; then
|
|
||||||
msg " Skipping current (latest) image $name"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
reason="Obsolete as of $del_date; labels: $labels"
|
reason="Obsolete as of $del_date; labels: $labels"
|
||||||
echo "GCP $name $reason" >> $TODELETE
|
echo "GCP $name $reason" >> $TODELETE
|
||||||
done
|
done
|
||||||
|
@ -86,19 +77,6 @@ for (( i=nr_amis ; i ; i-- )); do
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
unset name
|
|
||||||
if ! name=$(get_tag_value "Name" "$ami"); then
|
|
||||||
warn 0 " EC2 AMI ID '$ami_id' is missing a 'Name' tag"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Any image matching the currently in-use IMG_SFX
|
|
||||||
# must always be preserved.
|
|
||||||
if [[ "$name" =~ $IMG_SFX ]]; then
|
|
||||||
warn 0 " Retaining current (latest) image $name id $ami_id"
|
|
||||||
$AWS ec2 disable-image-deprecation --image-id "$ami_id" > /dev/null
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ $(echo -e "$ABOUTNOW\n$dep_ymd" | sort | tail -1) == "$ABOUTNOW" ]]; then
|
if [[ $(echo -e "$ABOUTNOW\n$dep_ymd" | sort | tail -1) == "$ABOUTNOW" ]]; then
|
||||||
reason="Obsolete as of '$dep_ymd'; snap=$snap"
|
reason="Obsolete as of '$dep_ymd'; snap=$snap"
|
||||||
echo "EC2 $ami_id $reason" >> $TODELETE
|
echo "EC2 $ami_id $reason" >> $TODELETE
|
||||||
|
@ -106,14 +84,13 @@ for (( i=nr_amis ; i ; i-- )); do
|
||||||
done
|
done
|
||||||
|
|
||||||
COUNT=$(<"$IMGCOUNT")
|
COUNT=$(<"$IMGCOUNT")
|
||||||
CANDIDATES=$(wc -l <$TODELETE)
|
|
||||||
msg "########################################################################"
|
msg "########################################################################"
|
||||||
msg "Deleting up to $DELETE_LIMIT random image candidates ($CANDIDATES/$COUNT total)::"
|
msg "Deleting up to $DELETE_LIMIT random images of $COUNT examined:"
|
||||||
|
|
||||||
# Require a minimum number of images to exist
|
# Require a minimum number of images to exist
|
||||||
if [[ "$CANDIDATES" -lt $DELETE_LIMIT ]]
|
if [[ "$COUNT" -lt $DELETE_LIMIT ]]
|
||||||
then
|
then
|
||||||
die 0 "Safety-net Insufficient images ($CANDIDATES) to process deletions ($DELETE_LIMIT required)"
|
die 0 "Safety-net Insufficient images ($COUNT) to process deletions ($DELETE_LIMIT required)"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sort --random-sort $TODELETE | tail -$DELETE_LIMIT | \
|
sort --random-sort $TODELETE | tail -$DELETE_LIMIT | \
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
ARG CENTOS_STREAM_RELEASE=9
|
ARG CENTOS_STREAM_RELEASE=8
|
||||||
FROM quay.io/centos/centos:stream${CENTOS_STREAM_RELEASE}
|
FROM quay.io/centos/centos:stream${CENTOS_STREAM_RELEASE}
|
||||||
|
|
||||||
# Only needed for installing build-time dependencies
|
# Only needed for installing build-time dependencies
|
||||||
COPY /imgts/google-cloud-sdk.repo /etc/yum.repos.d/google-cloud-sdk.repo
|
COPY /imgts/google-cloud-sdk.repo /etc/yum.repos.d/google-cloud-sdk.repo
|
||||||
RUN dnf -y update && \
|
RUN dnf -y update && \
|
||||||
dnf -y install epel-release && \
|
dnf -y install epel-release && \
|
||||||
dnf -y install python3 jq libxcrypt-compat && \
|
dnf -y install python3 jq && \
|
||||||
dnf -y install google-cloud-sdk && \
|
dnf -y install google-cloud-sdk && \
|
||||||
dnf clean all
|
dnf clean all
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,19 @@
|
||||||
# Copy-pasted from https://cloud.google.com/sdk/docs/install#red-hatfedoracentos
|
# From https://github.com/GoogleCloudPlatform/compute-image-packages
|
||||||
|
[google-compute-engine]
|
||||||
|
name=Google Compute Engine
|
||||||
|
baseurl=https://packages.cloud.google.com/yum/repos/google-compute-engine-el8-x86_64-stable
|
||||||
|
enabled=1
|
||||||
|
gpgcheck=1
|
||||||
|
repo_gpgcheck=1
|
||||||
|
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||||
|
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
||||||
|
|
||||||
[google-cloud-cli]
|
# From https://cloud.google.com/sdk/docs/install#rpm
|
||||||
name=Google Cloud CLI
|
[google-cloud-sdk]
|
||||||
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el9-x86_64
|
name=Google Cloud SDK
|
||||||
|
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el8-x86_64
|
||||||
enabled=1
|
enabled=1
|
||||||
gpgcheck=1
|
gpgcheck=1
|
||||||
repo_gpgcheck=0
|
repo_gpgcheck=0
|
||||||
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||||
|
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
||||||
|
|
|
@ -5,7 +5,7 @@ set -e
|
||||||
RED="\e[1;31m"
|
RED="\e[1;31m"
|
||||||
YEL="\e[1;33m"
|
YEL="\e[1;33m"
|
||||||
NOR="\e[0m"
|
NOR="\e[0m"
|
||||||
SENTINEL="__unknown__" # default set in Containerfile
|
SENTINEL="__unknown__" # default set in dockerfile
|
||||||
# Disable all input prompts
|
# Disable all input prompts
|
||||||
# https://cloud.google.com/sdk/docs/scripting-gcloud
|
# https://cloud.google.com/sdk/docs/scripting-gcloud
|
||||||
GCLOUD="gcloud --quiet"
|
GCLOUD="gcloud --quiet"
|
||||||
|
@ -55,7 +55,7 @@ gcloud_init() {
|
||||||
then
|
then
|
||||||
TMPF="$1"
|
TMPF="$1"
|
||||||
else
|
else
|
||||||
TMPF=$(mktemp -p '' .XXXXXXXX)
|
TMPF=$(mktemp -p '' .$(uuidgen)_XXXX.json)
|
||||||
trap "rm -f $TMPF &> /dev/null" EXIT
|
trap "rm -f $TMPF &> /dev/null" EXIT
|
||||||
# Required variable must be set by caller
|
# Required variable must be set by caller
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
|
@ -77,7 +77,7 @@ aws_init() {
|
||||||
then
|
then
|
||||||
TMPF="$1"
|
TMPF="$1"
|
||||||
else
|
else
|
||||||
TMPF=$(mktemp -p '' .XXXXXXXX)
|
TMPF=$(mktemp -p '' .$(uuidgen)_XXXX.ini)
|
||||||
fi
|
fi
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
echo "$AWSINI" > $TMPF
|
echo "$AWSINI" > $TMPF
|
||||||
|
|
|
@ -0,0 +1,91 @@
|
||||||
|
# Semi-manual image imports
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
[Due to a bug in
|
||||||
|
packer](https://github.com/hashicorp/packer-plugin-amazon/issues/264) and
|
||||||
|
the sheer complexity of EC2 image imports, this process is impractical for
|
||||||
|
full automation. It tends toward nearly always requiring supervision of a
|
||||||
|
human:
|
||||||
|
|
||||||
|
* There are multiple failure-points, some are not well reported to
|
||||||
|
the user by tools here or by AWS itself.
|
||||||
|
* The upload of the image to s3 can be unreliable. Silently corrupting image
|
||||||
|
data.
|
||||||
|
* The import-process is managed by a hosted AWS service which can be slow
|
||||||
|
and is occasionally unreliable.
|
||||||
|
* Failure often results in one or more leftover/incomplete resources
|
||||||
|
(s3 objects, EC2 snapshots, and AMIs)
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
* You're generally familiar with the (manual)
|
||||||
|
[EC2 snapshot import process](https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-import-snapshot.html).
|
||||||
|
* You are in possession of an AWS EC2 account, with the [IAM policy
|
||||||
|
`vmimport`](https://docs.aws.amazon.com/vm-import/latest/userguide/required-permissions.html#vmimport-role) attached.
|
||||||
|
* Both "Access Key" and "Secret Access Key" values set in [a credentials
|
||||||
|
file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html).
|
||||||
|
* Podman is installed and functional
|
||||||
|
* At least 10gig free space under `/tmp`, more if there are failures / multiple runs.
|
||||||
|
* *Network bandwidth sufficient for downloading and uploading many GBs of
|
||||||
|
data, potentially multiple times.*
|
||||||
|
|
||||||
|
## Process
|
||||||
|
|
||||||
|
Unless there is a problem with the current contents or age of the
|
||||||
|
imported images, this process does not need to be followed. The
|
||||||
|
normal PR-based build workflow can simply be followed as usual.
|
||||||
|
This process is only needed to bring newly updated Fedora images into
|
||||||
|
AWS to build CI images from. For example, due to a new Beta or GA release.
|
||||||
|
|
||||||
|
***Note:*** Most of the steps below will happen within a container environment.
|
||||||
|
Any exceptions are noted in the individual steps below with *[HOST]*
|
||||||
|
|
||||||
|
1. *[HOST]* Edit the `Makefile`, update the Fedora release numbers
|
||||||
|
under the section
|
||||||
|
`##### Important image release and source details #####`
|
||||||
|
1. *[HOST]* Run `make IMPORT_IMG_SFX`
|
||||||
|
1. *[HOST]* Run
|
||||||
|
```bash
|
||||||
|
$ make image_builder_debug \
|
||||||
|
GAC_FILEPATH=/dev/null \
|
||||||
|
AWS_SHARED_CREDENTIALS_FILE=/path/to/.aws/credentials
|
||||||
|
```
|
||||||
|
1. Run `make import_images` (or `make --jobs=4 import_images` if you're brave).
|
||||||
|
1. The following steps should all occur successfully for each imported image.
|
||||||
|
1. Image is downloaded.
|
||||||
|
1. Image checksum is downloaded.
|
||||||
|
1. Image is verified against the checksum.
|
||||||
|
1. Image is converted to `VHDX` format.
|
||||||
|
1. The `VHDX` image is uploaded to the `packer-image-import` S3 bucket.
|
||||||
|
1. AWS `import-snapshot` process is started (uses AWS vmimport service)
|
||||||
|
1. Progress of snapshot import is monitored until completion or failure.
|
||||||
|
1. The imported snapshot is converted into an AMI
|
||||||
|
1. Essential tags are added to the AMI
|
||||||
|
1. Details ascii-table about the new AMI is printed on success.
|
||||||
|
1. Assuming all image imports were successful, a final success message will be
|
||||||
|
printed by `make`.
|
||||||
|
|
||||||
|
## Failure responses
|
||||||
|
|
||||||
|
This list is not exhaustive, and only represents common/likely failures.
|
||||||
|
Normally there is no need to exit the build container.
|
||||||
|
|
||||||
|
* If image download fails, double-check any error output, run `make clean`
|
||||||
|
and retry.
|
||||||
|
* If checksum validation fails,
|
||||||
|
run `make clean`.
|
||||||
|
Retry `make import_images`.
|
||||||
|
* If s3 upload fails,
|
||||||
|
Confirm service availability,
|
||||||
|
retry `make import_images`.
|
||||||
|
* If snapshot import fails with a `Disk validation failed` error,
|
||||||
|
Retry `make import_images`.
|
||||||
|
* If snapshot import fails with non-validation error,
|
||||||
|
find snapshot in EC2 and delete it manually.
|
||||||
|
Retry `make import_images`.
|
||||||
|
* If AMI registration fails, remove any conflicting AMIs *and* snapshots.
|
||||||
|
Retry `make import_images`.
|
||||||
|
* If import was successful but AMI tagging failed, manually add
|
||||||
|
the required tags to AMI: `automation=false` and `Name=<name>-i${IMG_SFX}`.
|
||||||
|
Where `<name>` is `fedora-aws` or `fedora-aws-arm64`.
|
|
@ -0,0 +1,45 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is intended to be run by packer, usage under any other
|
||||||
|
# environment may behave badly. Its purpose is to download a VM
|
||||||
|
# image and a checksum file. Verify the image's checksum matches.
|
||||||
|
# If it does, convert the downloaded image into the format indicated
|
||||||
|
# by the first argument's `.extension`.
|
||||||
|
#
|
||||||
|
# The first argument is the file path and name for the output image,
|
||||||
|
# the second argument is the image download URL (ending in a filename).
|
||||||
|
# The third argument is the download URL for a checksum file containing
|
||||||
|
# details necessary to verify vs filename included in image download URL.
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
SCRIPT_FILEPATH=$(realpath "${BASH_SOURCE[0]}")
|
||||||
|
SCRIPT_DIRPATH=$(dirname "$SCRIPT_FILEPATH")
|
||||||
|
REPO_DIRPATH=$(realpath "$SCRIPT_DIRPATH/../")
|
||||||
|
|
||||||
|
# shellcheck source=./lib.sh
|
||||||
|
source "$REPO_DIRPATH/lib.sh"
|
||||||
|
|
||||||
|
[[ "$#" -eq 3 ]] || \
|
||||||
|
die "Expected to be called with three arguments, not: $#"
|
||||||
|
|
||||||
|
# Packer needs to provide the desired filename as it's unable to parse
|
||||||
|
# a filename out of the URL or interpret output from this script.
|
||||||
|
dest_dirpath=$(dirname "$1")
|
||||||
|
dest_filename=$(basename "$1")
|
||||||
|
dest_format=$(cut -d. -f2<<<"$dest_filename")
|
||||||
|
src_url="$2"
|
||||||
|
src_filename=$(basename "$src_url")
|
||||||
|
cs_url="$3"
|
||||||
|
|
||||||
|
req_env_vars dest_dirpath dest_filename dest_format src_url src_filename cs_url
|
||||||
|
|
||||||
|
mkdir -p "$dest_dirpath"
|
||||||
|
cd "$dest_dirpath"
|
||||||
|
[[ -r "$src_filename" ]] || \
|
||||||
|
curl --fail --location -O "$src_url"
|
||||||
|
echo "Downloading & verifying checksums in $cs_url"
|
||||||
|
curl --fail --location "$cs_url" -o - | \
|
||||||
|
sha256sum --ignore-missing --check -
|
||||||
|
echo "Converting '$src_filename' to ($dest_format format) '$dest_filename'"
|
||||||
|
qemu-img convert "$src_filename" -O "$dest_format" "${dest_filename}"
|
|
@ -0,0 +1,31 @@
|
||||||
|
{
|
||||||
|
"builds": [
|
||||||
|
{
|
||||||
|
"name": "fedora-aws",
|
||||||
|
"builder_type": "hamsterwheel",
|
||||||
|
"build_time": 0,
|
||||||
|
"files": null,
|
||||||
|
"artifact_id": "",
|
||||||
|
"packer_run_uuid": null,
|
||||||
|
"custom_data": {
|
||||||
|
"IMG_SFX": "fedora-aws-i@@@IMPORT_IMG_SFX@@@",
|
||||||
|
"STAGE": "import",
|
||||||
|
"TASK": "@@@CIRRUS_TASK_ID@@@"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "fedora-aws-arm64",
|
||||||
|
"builder_type": "hamsterwheel",
|
||||||
|
"build_time": 0,
|
||||||
|
"files": null,
|
||||||
|
"artifact_id": "",
|
||||||
|
"packer_run_uuid": null,
|
||||||
|
"custom_data": {
|
||||||
|
"IMG_SFX": "fedora-aws-arm64-i@@@IMPORT_IMG_SFX@@@",
|
||||||
|
"STAGE": "import",
|
||||||
|
"TASK": "@@@CIRRUS_TASK_ID@@@"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"last_run_uuid": "00000000-0000-0000-0000-000000000000"
|
||||||
|
}
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"Name": "@@@NAME@@@-i@@@IMPORT_IMG_SFX@@@",
|
||||||
|
"VirtualizationType": "hvm",
|
||||||
|
"Architecture": "@@@ARCH@@@",
|
||||||
|
"EnaSupport": true,
|
||||||
|
"RootDeviceName": "/dev/sda1",
|
||||||
|
"BlockDeviceMappings": [
|
||||||
|
{
|
||||||
|
"DeviceName": "/dev/sda1",
|
||||||
|
"Ebs": {
|
||||||
|
"DeleteOnTermination": true,
|
||||||
|
"SnapshotId": "@@@SNAPSHOT_ID@@@",
|
||||||
|
"VolumeSize": 10,
|
||||||
|
"VolumeType": "gp2"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
|
@ -0,0 +1,84 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is intended to be called by the main Makefile
|
||||||
|
# to wait for and confirm successful import and conversion
|
||||||
|
# of an uploaded image object from S3 into EC2. It expects
|
||||||
|
# the path to a file containing the import task ID as the
|
||||||
|
# first argument.
|
||||||
|
#
|
||||||
|
# If the import is successful, the snapshot ID is written
|
||||||
|
# to stdout. Otherwise, all output goes to stderr, and
|
||||||
|
# the script exits non-zero on failure or timeout. On
|
||||||
|
# failure, the file containing the import task ID will
|
||||||
|
# be removed.
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
AWS="${AWS:-aws --output json --region us-east-1}"
|
||||||
|
|
||||||
|
# The import/conversion process can take a LONG time, have observed
|
||||||
|
# > 10 minutes on occasion. Normally, takes 2-5 minutes.
|
||||||
|
SLEEP_SECONDS=10
|
||||||
|
TIMEOUT_SECONDS=720
|
||||||
|
|
||||||
|
TASK_ID_FILE="$1"
|
||||||
|
|
||||||
|
tmpfile=$(mktemp -p '' tmp.$(basename ${BASH_SOURCE[0]}).XXXX)
|
||||||
|
|
||||||
|
die() { echo "ERROR: ${1:-No error message provided}" > /dev/stderr; exit 1; }
|
||||||
|
|
||||||
|
msg() { echo "${1:-No error message provided}" > /dev/stderr; }
|
||||||
|
|
||||||
|
unset snapshot_id
|
||||||
|
handle_exit() {
|
||||||
|
set +e
|
||||||
|
rm -f "$tmpfile" &> /dev/null
|
||||||
|
if [[ -n "$snapshot_id" ]]; then
|
||||||
|
msg "Success ($task_id): $snapshot_id"
|
||||||
|
echo -n "$snapshot_id" > /dev/stdout
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
rm -f "$TASK_ID_FILE"
|
||||||
|
die "Timeout or other error reported while waiting for snapshot import"
|
||||||
|
}
|
||||||
|
trap handle_exit EXIT
|
||||||
|
|
||||||
|
[[ -n "$AWS_SHARED_CREDENTIALS_FILE" ]] || \
|
||||||
|
die "\$AWS_SHARED_CREDENTIALS_FILE must not be unset/empty."
|
||||||
|
|
||||||
|
[[ -r "$1" ]] || \
|
||||||
|
die "Can't read task id from file '$TASK_ID_FILE'"
|
||||||
|
|
||||||
|
task_id=$(<$TASK_ID_FILE)
|
||||||
|
|
||||||
|
msg "Waiting up to $TIMEOUT_SECONDS seconds for '$task_id' import. Checking progress every $SLEEP_SECONDS seconds."
|
||||||
|
for (( i=$TIMEOUT_SECONDS ; i ; i=i-$SLEEP_SECONDS )); do \
|
||||||
|
|
||||||
|
# Sleep first, to give AWS time to start meaningful work.
|
||||||
|
sleep ${SLEEP_SECONDS}s
|
||||||
|
|
||||||
|
$AWS ec2 describe-import-snapshot-tasks \
|
||||||
|
--import-task-ids $task_id > $tmpfile
|
||||||
|
|
||||||
|
if ! st_msg=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.StatusMessage?' $tmpfile) && \
|
||||||
|
[[ -n $st_msg ]] && \
|
||||||
|
[[ ! "$st_msg" =~ null ]]
|
||||||
|
then
|
||||||
|
die "Unexpected result: $st_msg"
|
||||||
|
elif grep -Eiq '(error)|(fail)' <<<"$st_msg"; then
|
||||||
|
die "$task_id: $st_msg"
|
||||||
|
fi
|
||||||
|
|
||||||
|
msg "$task_id: $st_msg (${i}s remaining)"
|
||||||
|
|
||||||
|
# Why AWS you use StatusMessage && Status? Bad names! WHY!?!?!?!
|
||||||
|
if status=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.Status?' $tmpfile) && \
|
||||||
|
[[ "$status" == "completed" ]] && \
|
||||||
|
snapshot_id=$(jq -r -e '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId?' $tmpfile)
|
||||||
|
then
|
||||||
|
msg "Import complete to: $snapshot_id"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
unset snapshot_id
|
||||||
|
fi
|
||||||
|
done
|
38
lib.sh
38
lib.sh
|
@ -19,8 +19,9 @@ OS_REL_VER="$OS_RELEASE_ID-$OS_RELEASE_VER"
|
||||||
# This location is checked by automation in other repos, please do not change.
|
# This location is checked by automation in other repos, please do not change.
|
||||||
PACKAGE_DOWNLOAD_DIR=/var/cache/download
|
PACKAGE_DOWNLOAD_DIR=/var/cache/download
|
||||||
|
|
||||||
# N/B: This is managed by renovate
|
INSTALL_AUTOMATION_VERSION="4.2.1"
|
||||||
INSTALL_AUTOMATION_VERSION="5.0.1"
|
|
||||||
|
PUSH_LATEST="${PUSH_LATEST:-0}"
|
||||||
|
|
||||||
# Mask secrets in show_env_vars() from automation library
|
# Mask secrets in show_env_vars() from automation library
|
||||||
SECRET_ENV_RE='(^PATH$)|(^BASH_FUNC)|(^_.*)|(.*PASSWORD.*)|(.*TOKEN.*)|(.*SECRET.*)|(.*ACCOUNT.*)|(.+_JSON)|(AWS.+)|(.*SSH.*)|(.*GCP.*)'
|
SECRET_ENV_RE='(^PATH$)|(^BASH_FUNC)|(^_.*)|(.*PASSWORD.*)|(.*TOKEN.*)|(.*SECRET.*)|(.*ACCOUNT.*)|(.+_JSON)|(AWS.+)|(.*SSH.*)|(.*GCP.*)'
|
||||||
|
@ -48,20 +49,12 @@ if [[ "$UID" -ne 0 ]]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
install_automation_tooling() {
|
install_automation_tooling() {
|
||||||
local version_arg
|
|
||||||
version_arg="$INSTALL_AUTOMATION_VERSION"
|
|
||||||
|
|
||||||
if [[ "$1" == "latest" ]]; then
|
|
||||||
version_arg="latest"
|
|
||||||
shift
|
|
||||||
fi
|
|
||||||
|
|
||||||
# This script supports installing all current and previous versions
|
# This script supports installing all current and previous versions
|
||||||
local installer_url="https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh"
|
local installer_url="https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh"
|
||||||
curl --silent --show-error --location \
|
curl --silent --show-error --location \
|
||||||
--url "$installer_url" | \
|
--url "$installer_url" | \
|
||||||
$SUDO env INSTALL_PREFIX=/usr/share /bin/bash -s - \
|
$SUDO env INSTALL_PREFIX=/usr/share /bin/bash -s - \
|
||||||
"$version_arg" "$@"
|
"$INSTALL_AUTOMATION_VERSION" "$@"
|
||||||
# This defines AUTOMATION_LIB_PATH
|
# This defines AUTOMATION_LIB_PATH
|
||||||
source /usr/share/automation/environment
|
source /usr/share/automation/environment
|
||||||
#shellcheck disable=SC1090
|
#shellcheck disable=SC1090
|
||||||
|
@ -286,16 +279,6 @@ unmanaged-devices=interface-name:*podman*;interface-name:veth*
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
# Create a local registry, seed it with remote images
|
|
||||||
initialize_local_cache_registry() {
|
|
||||||
msg "Initializing local cache registry"
|
|
||||||
#shellcheck disable=SC2154
|
|
||||||
$SUDO ${SCRIPT_DIRPATH}/local-cache-registry initialize
|
|
||||||
|
|
||||||
msg "du -sh /var/cache/local-registry"
|
|
||||||
du -sh /var/cache/local-registry
|
|
||||||
}
|
|
||||||
|
|
||||||
common_finalize() {
|
common_finalize() {
|
||||||
set -x # extra detail is no-longer necessary
|
set -x # extra detail is no-longer necessary
|
||||||
cd /
|
cd /
|
||||||
|
@ -308,7 +291,7 @@ common_finalize() {
|
||||||
$SUDO rm -rf /var/lib/cloud/instanc*
|
$SUDO rm -rf /var/lib/cloud/instanc*
|
||||||
$SUDO rm -rf /root/.ssh/*
|
$SUDO rm -rf /root/.ssh/*
|
||||||
$SUDO rm -rf /etc/ssh/*key*
|
$SUDO rm -rf /etc/ssh/*key*
|
||||||
$SUDO rm -rf /tmp/* /var/tmp/automation_images
|
$SUDO rm -rf /tmp/*
|
||||||
$SUDO rm -rf /tmp/.??*
|
$SUDO rm -rf /tmp/.??*
|
||||||
echo -n "" | $SUDO tee /etc/machine-id
|
echo -n "" | $SUDO tee /etc/machine-id
|
||||||
$SUDO sync
|
$SUDO sync
|
||||||
|
@ -330,10 +313,7 @@ rh_finalize() {
|
||||||
# Packaging cache is preserved across builds of container images
|
# Packaging cache is preserved across builds of container images
|
||||||
$SUDO rm -f /etc/udev/rules.d/*-persistent-*.rules
|
$SUDO rm -f /etc/udev/rules.d/*-persistent-*.rules
|
||||||
$SUDO touch /.unconfigured # force firstboot to run
|
$SUDO touch /.unconfigured # force firstboot to run
|
||||||
|
common_finalize
|
||||||
echo
|
|
||||||
echo "# PACKAGE LIST"
|
|
||||||
rpm -qa | sort
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Called during VM Image setup, not intended for general use.
|
# Called during VM Image setup, not intended for general use.
|
||||||
|
@ -349,9 +329,7 @@ debian_finalize() {
|
||||||
fi
|
fi
|
||||||
set -x
|
set -x
|
||||||
# Packaging cache is preserved across builds of container images
|
# Packaging cache is preserved across builds of container images
|
||||||
# pipe-cat is not a NOP! It prevents using $PAGER and then hanging
|
common_finalize
|
||||||
echo "# PACKAGE LIST"
|
|
||||||
dpkg -l | cat
|
|
||||||
}
|
}
|
||||||
|
|
||||||
finalize() {
|
finalize() {
|
||||||
|
@ -364,6 +342,4 @@ finalize() {
|
||||||
else
|
else
|
||||||
die "Unknown/Unsupported Distro '$OS_RELEASE_ID'"
|
die "Unknown/Unsupported Distro '$OS_RELEASE_ID'"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
common_finalize
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,10 +40,8 @@ fi
|
||||||
# I don't expect there will ever be more than maybe 0-20 instances at any time.
|
# I don't expect there will ever be more than maybe 0-20 instances at any time.
|
||||||
for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do
|
for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do
|
||||||
instance=$(jq -e ".[$instance_index - 1]"<<<"$simple_inst_list")
|
instance=$(jq -e ".[$instance_index - 1]"<<<"$simple_inst_list")
|
||||||
# aws commands require an instance ID
|
|
||||||
instid=$(jq -r ".ID"<<<"$instance")
|
|
||||||
# A Name-tag isn't guaranteed, default to stupid, unreadable, generated ID
|
# A Name-tag isn't guaranteed, default to stupid, unreadable, generated ID
|
||||||
name=$instid
|
name=$(jq -r ".ID"<<<"$instance")
|
||||||
if name_tag=$(get_tag_value "Name" "$instance"); then
|
if name_tag=$(get_tag_value "Name" "$instance"); then
|
||||||
# This is MUCH more human-friendly and easier to find in the WebUI.
|
# This is MUCH more human-friendly and easier to find in the WebUI.
|
||||||
# If it was an instance leaked by Cirrus-CI, it may even include the
|
# If it was an instance leaked by Cirrus-CI, it may even include the
|
||||||
|
@ -71,7 +69,6 @@ for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# First part of the status line item to append in the e-mail
|
|
||||||
line="* VM $name running $age_days days"
|
line="* VM $name running $age_days days"
|
||||||
|
|
||||||
# It would be nice to list all the tags like we do for GCE VMs,
|
# It would be nice to list all the tags like we do for GCE VMs,
|
||||||
|
@ -79,39 +76,7 @@ for instance_index in $(seq 1 $(jq -e 'length'<<<"$simple_inst_list")); do
|
||||||
# Only print this handy-one (set by get_ci_vm) if it's there.
|
# Only print this handy-one (set by get_ci_vm) if it's there.
|
||||||
if inuseby_tag=$(get_tag_value "in-use-by" "$instance"); then
|
if inuseby_tag=$(get_tag_value "in-use-by" "$instance"); then
|
||||||
dbg "Found instance '$name' tagged in-use-by=$inuseby_tag."
|
dbg "Found instance '$name' tagged in-use-by=$inuseby_tag."
|
||||||
line+="; likely get_ci_vm, in-use-by=$inuseby_tag"
|
line+=" tagged in-use-by=$inuseby_tag"
|
||||||
elif ((DRY_RUN==0)); then # NOT a persistent or a get_ci_vm instance
|
|
||||||
# Around Jun/Jul '23 an annoyingly steady stream of EC2 orphans were
|
|
||||||
# reported to Cirrus-support. They've taken actions to resolve,
|
|
||||||
# but the failure-modes are many and complex. Since most of the EC2
|
|
||||||
# instances are rather expensive to keep needlessly running, and manual
|
|
||||||
# cleanup is annoying, try to terminate them automatically.
|
|
||||||
dbg "Attempting to terminate instance '$name'"
|
|
||||||
|
|
||||||
# Operation runs asynchronously, no error reported for already terminated instance.
|
|
||||||
# Any stdout/stderr here would make the eventual e-mail unreadable.
|
|
||||||
if ! termout=$(aws ec2 terminate-instances --no-paginate --output json --instance-ids "$instid" 2>&1)
|
|
||||||
then
|
|
||||||
echo "::error::Auto-term. of '$instid' failed, 'aws' output: $termout" > /dev/stderr
|
|
||||||
|
|
||||||
# Catch rare TOCTOU race, instance was running, terminated, and pruned while looping.
|
|
||||||
# (terminated instances stick around for a while until purged automatically)
|
|
||||||
if [[ "$termout" =~ InvalidInstanceID ]]; then
|
|
||||||
line+="; auto-term. failed, instance vanished"
|
|
||||||
else # Something else horrible broke, let the operators know.
|
|
||||||
line+="; auto-term. failed, see GHA workflow log"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
dbg "Successful term. command output: '$termout'"
|
|
||||||
# At this point, the script could sit around in a poll-loop, waiting to confirm
|
|
||||||
# the `$termout` JSON contains `CurrentState: { Code: 48, Name: terminated }`.
|
|
||||||
# However this could take _minutes_, and there may be a LOT of instances left
|
|
||||||
# to process. Do the next best thing: Hope the termination eventually works,
|
|
||||||
# but also let the operator know an attempt was made.
|
|
||||||
line+="; probably successful auto-termination"
|
|
||||||
fi
|
|
||||||
else # no in-use-by tag, DRY_RUN==1
|
|
||||||
dbg "DRY_RUN: Would normally have tried to terminate instance '$name' (ID $instid)"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "$line" >> "$OUTPUT"
|
echo "$line" >> "$OUTPUT"
|
||||||
|
|
|
@ -18,9 +18,7 @@ req_env_vars GCPJSON GCPNAME GCPPROJECT GCPPROJECTS AWSINI
|
||||||
NOW=$(date +%s)
|
NOW=$(date +%s)
|
||||||
TOO_OLD='3 days ago' # Detect Friday Orphans on Monday
|
TOO_OLD='3 days ago' # Detect Friday Orphans on Monday
|
||||||
EVERYTHING=${EVERYTHING:-0} # set to '1' for testing
|
EVERYTHING=${EVERYTHING:-0} # set to '1' for testing
|
||||||
DRY_RUN=${DRY_RUN:-0}
|
|
||||||
if ((EVERYTHING)); then
|
if ((EVERYTHING)); then
|
||||||
DRY_RUN=1
|
|
||||||
TOO_OLD="3 seconds ago"
|
TOO_OLD="3 seconds ago"
|
||||||
fi
|
fi
|
||||||
# Anything older than this is "too old"
|
# Anything older than this is "too old"
|
||||||
|
|
|
@ -15,16 +15,6 @@ ARG PACKER_BUILD_NAME=
|
||||||
ENV AI_PATH=/usr/src/automation_images \
|
ENV AI_PATH=/usr/src/automation_images \
|
||||||
CONTAINER=1
|
CONTAINER=1
|
||||||
|
|
||||||
ARG IMG_SFX=
|
|
||||||
ARG CIRRUS_TASK_ID=
|
|
||||||
ARG GIT_HEAD=
|
|
||||||
# Ref: https://github.com/opencontainers/image-spec/blob/main/annotations.md
|
|
||||||
LABEL org.opencontainers.image.url="https://cirrus-ci.com/task/${CIRRUS_TASK_ID}"
|
|
||||||
LABEL org.opencontainers.image.documentation="https://github.com/containers/automation_images/blob/${GIT_HEAD}/README.md#container-images-overview-step-2"
|
|
||||||
LABEL org.opencontainers.image.source="https://github.com/containers/automation_images/blob/${GIT_HEAD}/podman/Containerfile"
|
|
||||||
LABEL org.opencontainers.image.version="${IMG_SFX}"
|
|
||||||
LABEL org.opencontainers.image.revision="${GIT_HEAD}"
|
|
||||||
|
|
||||||
# Only add needed files to avoid invalidating build cache
|
# Only add needed files to avoid invalidating build cache
|
||||||
ADD /lib.sh "$AI_PATH/"
|
ADD /lib.sh "$AI_PATH/"
|
||||||
ADD /podman/* "$AI_PATH/podman/"
|
ADD /podman/* "$AI_PATH/podman/"
|
||||||
|
|
|
@ -12,6 +12,7 @@ RUN dnf -y update && \
|
||||||
dnf clean all
|
dnf clean all
|
||||||
|
|
||||||
ENV REG_REPO="https://github.com/docker/distribution.git" \
|
ENV REG_REPO="https://github.com/docker/distribution.git" \
|
||||||
|
REG_COMMIT="b5ca020cfbe998e5af3457fda087444cf5116496" \
|
||||||
REG_COMMIT_SCHEMA1="ec87e9b6971d831f0eff752ddb54fb64693e51cd" \
|
REG_COMMIT_SCHEMA1="ec87e9b6971d831f0eff752ddb54fb64693e51cd" \
|
||||||
OSO_REPO="https://github.com/openshift/origin.git" \
|
OSO_REPO="https://github.com/openshift/origin.git" \
|
||||||
OSO_TAG="v1.5.0-alpha.3"
|
OSO_TAG="v1.5.0-alpha.3"
|
||||||
|
|
|
@ -9,6 +9,7 @@ set -e
|
||||||
declare -a req_vars
|
declare -a req_vars
|
||||||
req_vars=(\
|
req_vars=(\
|
||||||
REG_REPO
|
REG_REPO
|
||||||
|
REG_COMMIT
|
||||||
REG_COMMIT_SCHEMA1
|
REG_COMMIT_SCHEMA1
|
||||||
OSO_REPO
|
OSO_REPO
|
||||||
OSO_TAG
|
OSO_TAG
|
||||||
|
@ -42,6 +43,12 @@ cd "$REG_GOSRC"
|
||||||
(
|
(
|
||||||
# This is required to be set like this by the build system
|
# This is required to be set like this by the build system
|
||||||
export GOPATH="$PWD/Godeps/_workspace:$GOPATH"
|
export GOPATH="$PWD/Godeps/_workspace:$GOPATH"
|
||||||
|
# This comes in from the Containerfile
|
||||||
|
# shellcheck disable=SC2154
|
||||||
|
git checkout -q "$REG_COMMIT"
|
||||||
|
go build -o /usr/local/bin/registry-v2 \
|
||||||
|
github.com/docker/distribution/cmd/registry
|
||||||
|
|
||||||
# This comes in from the Containerfile
|
# This comes in from the Containerfile
|
||||||
# shellcheck disable=SC2154
|
# shellcheck disable=SC2154
|
||||||
git checkout -q "$REG_COMMIT_SCHEMA1"
|
git checkout -q "$REG_COMMIT_SCHEMA1"
|
||||||
|
@ -61,10 +68,6 @@ sed -i -e 's/\[\[ "\${go_version\[2]}" < "go1.5" ]]/false/' ./hack/common.sh
|
||||||
# 8 characters long. This can happen if/when systemd-resolved adds 'trust-ad'.
|
# 8 characters long. This can happen if/when systemd-resolved adds 'trust-ad'.
|
||||||
sed -i '/== "attempts:"/s/ 8 / 9 /' vendor/github.com/miekg/dns/clientconfig.go
|
sed -i '/== "attempts:"/s/ 8 / 9 /' vendor/github.com/miekg/dns/clientconfig.go
|
||||||
|
|
||||||
# Backport https://github.com/ugorji/go/commit/8286c2dc986535d23e3fad8d3e816b9dd1e5aea6
|
|
||||||
# Go ≥ 1.22 panics with a base64 encoding using duplicated characters.
|
|
||||||
sed -i -e 's,"encoding/base64","encoding/base32", ; s,base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__"),base32.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef"),' vendor/github.com/ugorji/go/codec/gen.go
|
|
||||||
|
|
||||||
make build
|
make build
|
||||||
make all WHAT=cmd/dockerregistry
|
make all WHAT=cmd/dockerregistry
|
||||||
cp -a ./_output/local/bin/linux/*/* /usr/local/bin/
|
cp -a ./_output/local/bin/linux/*/* /usr/local/bin/
|
||||||
|
|
|
@ -12,7 +12,7 @@ if [[ "$UID" -ne 0 ]]; then
|
||||||
export SUDO="sudo env DEBIAN_FRONTEND=noninteractive"
|
export SUDO="sudo env DEBIAN_FRONTEND=noninteractive"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean update-notifier-download mlocate-updatedb plocate-updatedb"
|
EVIL_UNITS="cron crond atd apt-daily-upgrade apt-daily fstrim motd-news systemd-tmpfiles-clean update-notifier-download mlocate-updatedb"
|
||||||
|
|
||||||
if [[ "$1" == "--list" ]]
|
if [[ "$1" == "--list" ]]
|
||||||
then
|
then
|
||||||
|
|
|
@ -1,13 +1,6 @@
|
||||||
|
$ErrorActionPreference = "stop"
|
||||||
. $PSScriptRoot\win-lib.ps1
|
|
||||||
|
|
||||||
# Disable WinRM as a security precuation (cirrus launches an agent from user-data, so we don't need it)
|
|
||||||
Set-Service winrm -StartupType Disabled
|
|
||||||
# Also disable RDP (can be enabled via user-data manually)
|
|
||||||
Set-ItemProperty -Path "HKLM:\System\CurrentControlSet\Control\Terminal Server" -Name "fDenyTSConnections" -Value 1
|
|
||||||
Disable-NetFirewallRule -DisplayGroup "Remote Desktop"
|
|
||||||
|
|
||||||
$username = "Administrator"
|
$username = "Administrator"
|
||||||
|
|
||||||
# Temporary random password to allow autologon that will be replaced
|
# Temporary random password to allow autologon that will be replaced
|
||||||
# before the instance is put into service.
|
# before the instance is put into service.
|
||||||
$syms = [char[]]([char]'a'..[char]'z' `
|
$syms = [char[]]([char]'a'..[char]'z' `
|
||||||
|
@ -35,6 +28,6 @@ Set-ItemProperty `
|
||||||
# NOTE: For now, we do not run sysprep, since initialization with reboots
|
# NOTE: For now, we do not run sysprep, since initialization with reboots
|
||||||
# are exceptionally slow on metal nodes, which these target to run. This
|
# are exceptionally slow on metal nodes, which these target to run. This
|
||||||
# will lead to a duplicate machine id, which is not ideal, but allows
|
# will lead to a duplicate machine id, which is not ideal, but allows
|
||||||
# instances to start quickly. So, instead of sysprep, trigger a reset so
|
# instances to start instantly. So, instead of sysprep, trigger a reset so
|
||||||
# that the admin password reset, and activation rerun on boot.
|
# that the admin password reset, and activation rerun on boot
|
||||||
& 'C:\Program Files\Amazon\EC2Launch\ec2launch' reset --block
|
& 'C:\Program Files\Amazon\EC2Launch\ec2launch' reset --block
|
|
@ -1,4 +0,0 @@
|
||||||
<powershell>
|
|
||||||
Set-ItemProperty -Path "HKLM:\System\CurrentControlSet\Control\Terminal Server" -Name "fDenyTSConnections" -Value 0
|
|
||||||
Enable-NetFirewallRule -DisplayGroup "Remote Desktop"
|
|
||||||
</powershell>
|
|
|
@ -1,50 +0,0 @@
|
||||||
|
|
||||||
$ErrorActionPreference = "stop"
|
|
||||||
|
|
||||||
Set-ExecutionPolicy Bypass -Scope Process -Force
|
|
||||||
|
|
||||||
function Check-Exit {
|
|
||||||
param(
|
|
||||||
[parameter(ValueFromRemainingArguments = $true)]
|
|
||||||
[string[]] $codes = @(0)
|
|
||||||
)
|
|
||||||
if ($LASTEXITCODE -eq $null) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
foreach ($code in $codes) {
|
|
||||||
if ($LASTEXITCODE -eq $code) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Exit $LASTEXITCODE
|
|
||||||
}
|
|
||||||
|
|
||||||
# Retry installation on failure or 5-minute timeout (for all packages)
|
|
||||||
function retryInstall {
|
|
||||||
param([Parameter(ValueFromRemainingArguments)] [string[]] $pkgs)
|
|
||||||
|
|
||||||
foreach ($pkg in $pkgs) {
|
|
||||||
for ($retries = 0; ; $retries++) {
|
|
||||||
if ($retries -gt 5) {
|
|
||||||
throw "Could not install package $pkg"
|
|
||||||
}
|
|
||||||
|
|
||||||
if ($pkg -match '(.[^\@]+)@(.+)') {
|
|
||||||
$pkg = @("--version", $Matches.2, $Matches.1)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Chocolatey best practices as of 2024-04:
|
|
||||||
# https://docs.chocolatey.org/en-us/choco/commands/#scripting-integration-best-practices-style-guide
|
|
||||||
# Some of those are suboptimal, e.g., using "upgrade" to mean "install",
|
|
||||||
# hardcoding a specific API URL. We choose to reject those.
|
|
||||||
choco install $pkg -y --allow-downgrade --execution-timeout=300
|
|
||||||
if ($LASTEXITCODE -eq 0) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
Write-Host "Error installing, waiting before retry..."
|
|
||||||
Start-Sleep -Seconds 6
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -34,12 +34,7 @@ builders:
|
||||||
security_group_id: "sg-042c75677872ef81c"
|
security_group_id: "sg-042c75677872ef81c"
|
||||||
ami_name: &ami_name '{{build_name}}-c{{user `IMG_SFX`}}'
|
ami_name: &ami_name '{{build_name}}-c{{user `IMG_SFX`}}'
|
||||||
ami_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}'
|
ami_description: 'Built in https://cirrus-ci.com/task/{{user `CIRRUS_TASK_ID`}}'
|
||||||
launch_block_device_mappings:
|
|
||||||
- device_name: '/dev/sda1'
|
|
||||||
volume_size: 200
|
|
||||||
volume_type: 'gp3'
|
|
||||||
iops: 6000
|
|
||||||
delete_on_termination: true
|
|
||||||
# These are critical and used by security-polciy to enforce instance launch limits.
|
# These are critical and used by security-polciy to enforce instance launch limits.
|
||||||
tags: &awstags
|
tags: &awstags
|
||||||
# EC2 expects "Name" to be capitalized
|
# EC2 expects "Name" to be capitalized
|
||||||
|
@ -58,22 +53,18 @@ builders:
|
||||||
|
|
||||||
provisioners:
|
provisioners:
|
||||||
- type: powershell
|
- type: powershell
|
||||||
inline:
|
script: '{{template_dir}}/win_packaging.ps1'
|
||||||
- '$ErrorActionPreference = "stop"'
|
|
||||||
- 'New-Item -Path "c:\" -Name "temp" -ItemType "directory" -Force'
|
|
||||||
- 'New-Item -Path "c:\temp" -Name "automation_images" -ItemType "directory" -Force'
|
|
||||||
- type: 'file'
|
|
||||||
source: '{{ pwd }}/'
|
|
||||||
destination: "c:\\temp\\automation_images\\"
|
|
||||||
- type: powershell
|
|
||||||
inline:
|
|
||||||
- 'c:\temp\automation_images\win_images\win_packaging.ps1'
|
|
||||||
# Several installed items require a reboot, do that now in case it would
|
|
||||||
# cause a problem with final image preperations.
|
|
||||||
- type: windows-restart
|
- type: windows-restart
|
||||||
- type: powershell
|
- type: powershell
|
||||||
inline:
|
inline:
|
||||||
- 'c:\temp\automation_images\win_images\win_finalization.ps1'
|
# Disable WinRM as a security precuation (cirrus launches an agent from user-data, so we don't need it)
|
||||||
|
- Set-Service winrm -StartupType Disabled
|
||||||
|
# Also disable RDP (can be enabled via user-data manually)
|
||||||
|
- Set-ItemProperty -Path "HKLM:\System\CurrentControlSet\Control\Terminal Server" -Name "fDenyTSConnections" -Value 1
|
||||||
|
- Disable-NetFirewallRule -DisplayGroup "Remote Desktop"
|
||||||
|
# Setup Autologon and reset, must be last, due to pw change
|
||||||
|
- type: powershell
|
||||||
|
script: '{{template_dir}}/auto_logon.ps1'
|
||||||
|
|
||||||
|
|
||||||
post-processors:
|
post-processors:
|
||||||
|
@ -84,3 +75,4 @@ post-processors:
|
||||||
IMG_SFX: '{{ user `IMG_SFX` }}'
|
IMG_SFX: '{{ user `IMG_SFX` }}'
|
||||||
STAGE: cache
|
STAGE: cache
|
||||||
TASK: '{{user `CIRRUS_TASK_ID`}}'
|
TASK: '{{user `CIRRUS_TASK_ID`}}'
|
||||||
|
|
||||||
|
|
|
@ -1,36 +1,36 @@
|
||||||
|
function CheckExit {
|
||||||
|
param(
|
||||||
|
[parameter(ValueFromRemainingArguments = $true)]
|
||||||
|
[string[]] $codes = @(0)
|
||||||
|
)
|
||||||
|
if ($LASTEXITCODE -eq $null) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach ($code in $codes) {
|
||||||
|
if ($LASTEXITCODE -eq $code) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Exit $LASTEXITCODE
|
||||||
|
}
|
||||||
|
|
||||||
. $PSScriptRoot\win-lib.ps1
|
|
||||||
|
|
||||||
# Disables runtime process virus scanning, which is not necessary
|
# Disables runtime process virus scanning, which is not necessary
|
||||||
Set-MpPreference -DisableRealtimeMonitoring 1
|
Set-MpPreference -DisableRealtimeMonitoring 1
|
||||||
|
$ErrorActionPreference = "stop"
|
||||||
|
|
||||||
|
Set-ExecutionPolicy Bypass -Scope Process -Force
|
||||||
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072
|
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072
|
||||||
iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
|
iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
|
||||||
|
|
||||||
# Install basic required tooling.
|
# Install Git, BZ2 archive support, Go, and the MingW (GCC for Win) compiler for CGO support
|
||||||
# psexec needed to workaround session 0 WSL bug
|
# Add pstools to workaorund sess 0 WSL bug
|
||||||
retryInstall 7zip git archiver psexec golang mingw StrawberryPerl zstandard; Check-Exit
|
choco install -y git mingw archiver psexec; CheckExit
|
||||||
|
choco install golang --version 1.19.2 -y; CheckExit
|
||||||
# Update service is required for dotnet
|
|
||||||
Set-Service -Name wuauserv -StartupType "Manual"; Check-Exit
|
|
||||||
|
|
||||||
# Install dotnet as that's the best way to install WiX 4+
|
|
||||||
# Choco does not support installing anything over WiX 3.14
|
|
||||||
Invoke-WebRequest -Uri https://dotnet.microsoft.com/download/dotnet/scripts/v1/dotnet-install.ps1 -OutFile dotnet-install.ps1
|
|
||||||
.\dotnet-install.ps1 -InstallDir 'C:\Program Files\dotnet'
|
|
||||||
|
|
||||||
# Configure NuGet sources for dotnet to fetch wix (and other packages) from
|
|
||||||
& 'C:\Program Files\dotnet\dotnet.exe' nuget add source https://api.nuget.org/v3/index.json -n nuget.org
|
|
||||||
|
|
||||||
# Install wix
|
|
||||||
& 'C:\Program Files\dotnet\dotnet.exe' tool install --global wix
|
|
||||||
|
|
||||||
# Install Hyper-V
|
|
||||||
Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All -NoRestart
|
|
||||||
Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V-Management-PowerShell -All -NoRestart
|
|
||||||
Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V-Management-Clients -All -NoRestart
|
|
||||||
|
|
||||||
# Install WSL, and capture text output which is not normally visible
|
# Install WSL, and capture text output which is not normally visible
|
||||||
$x = wsl --install; Check-Exit 0 1 # wsl returns 1 on reboot required
|
$x = wsl --install; CheckExit 0 1 # wsl returns 1 on reboot required
|
||||||
Write-Host $x
|
Write-Output $x
|
||||||
Exit 0
|
Exit 0
|
||||||
|
|
Loading…
Reference in New Issue