Compare commits
338 Commits
Author | SHA1 | Date |
---|---|---|
|
cc7d9b2a26 | |
|
0af8676cb8 | |
|
f55fe34cfb | |
|
987689cc34 | |
|
cb12019fba | |
|
e1231d1520 | |
|
b0959cb192 | |
|
7f213bf685 | |
|
79e68ef97c | |
|
aba42ca8ff | |
|
d805c0c822 | |
|
e83dcfcabf | |
|
7f13540563 | |
|
50c43af45e | |
|
cd259102d4 | |
|
051f0951f1 | |
|
e8a30ae1ea | |
|
a4888b2ce9 | |
|
8faa8b216c | |
|
fd6f70913e | |
|
f3777be65b | |
|
16f757f699 | |
|
26ab1b7744 | |
|
994ba027c2 | |
|
fa70d9e3af | |
|
3e2662f02b | |
|
0f5226e050 | |
|
24800f0f77 | |
|
5ae1659c96 | |
|
3c034bcadc | |
|
7067540a52 | |
|
e3c74c2aa4 | |
|
8c5bb22af7 | |
|
3b33514d26 | |
|
973aa8c2fe | |
|
4d23dd41f0 | |
|
b9186a2b38 | |
|
8b1776b799 | |
|
8218f24c4d | |
|
8f39f4b1af | |
|
99d1c2662e | |
|
32b94cedea | |
|
5ad53bd723 | |
|
24a62a63d3 | |
|
ab1f7624a0 | |
|
35a29e5dfe | |
|
657247095b | |
|
cc18e81abf | |
|
d2e5f7815e | |
|
48c9554a6c | |
|
0a0bc4f395 | |
|
b8969128d0 | |
|
4739c8921c | |
|
34ea41cc7f | |
|
ee5fba7664 | |
|
34e2995cd7 | |
|
51a2c1fbed | |
|
718ecdb04e | |
|
7ae84eb74c | |
|
d81a56f85b | |
|
27f6f9363f | |
|
1b35e0e24d | |
|
2c1ee35362 | |
|
447f70e9c7 | |
|
1809c5b6c0 | |
|
c552d5bba1 | |
|
3568a50f52 | |
|
436dceb68f | |
|
13be11668c | |
|
47a5015b07 | |
|
b0dde0f4fc | |
|
689cfa189c | |
|
bb3343c0c4 | |
|
b1d7d1d447 | |
|
256fefe0dd | |
|
11359412d4 | |
|
378249996e | |
|
12b7b27dda | |
|
720ba14043 | |
|
a69abee410 | |
|
399120c350 | |
|
4302d62c26 | |
|
8204fd5794 | |
|
d0474a3847 | |
|
14fd648920 | |
|
420ed9a467 | |
|
dc21cdf863 | |
|
b813ad7981 | |
|
415e21b68b | |
|
8b9ae348a0 | |
|
663cb85121 | |
|
9c771bf862 | |
|
13aaf6100f | |
|
46d69a3969 | |
|
081b9c3be5 | |
|
e4e0cdbd51 | |
|
ae7f68a9ac | |
|
836d5a7487 | |
|
02d3c0a99c | |
|
f750079c85 | |
|
0eb6675f13 | |
|
3a39b5cafc | |
|
8a0e087c4b | |
|
c910e69c12 | |
|
37e71d45af | |
|
9a8a1a2413 | |
|
2e805276bb | |
|
5d234f1e4a | |
|
badedd4968 | |
|
2cdb0b15ee | |
|
f27c7ae6d9 | |
|
d7a884b8cf | |
|
9336e20516 | |
|
7feb7435c2 | |
|
478b8d9d30 | |
|
1bd2fbdfe3 | |
|
d061d8061e | |
|
13f6c9fb53 | |
|
af1016e668 | |
|
74f8447d45 | |
|
3bf3cfd233 | |
|
428f06ed36 | |
|
b9ce71232f | |
|
36c2bc68e9 | |
|
df5c5e90ac | |
|
11026c20a3 | |
|
1f2ccedbfd | |
|
2c1a0c6c4c | |
|
fb6ba4a224 | |
|
f12157050c | |
|
4353f8c5b1 | |
|
86ddf63ac5 | |
|
948206e893 | |
|
c0112c254c | |
|
86660f745e | |
|
679575c7d1 | |
|
0e328d6db5 | |
|
71ede1b334 | |
|
1f5d6b5691 | |
|
f425d902df | |
|
d4f5d65014 | |
|
0a0d617ee9 | |
|
420d72a42e | |
|
907e840d64 | |
|
a19393dd92 | |
|
72ed4a5532 | |
|
99a94ca880 | |
|
25651a0a31 | |
|
47cf77670e | |
|
7ce27001a4 | |
|
d4314cc954 | |
|
92ed5911d6 | |
|
93455e8a08 | |
|
778e26b27c | |
|
3cd711bba5 | |
|
75c0f0bb47 | |
|
22a0e4db8f | |
|
22fcddc3c2 | |
|
dfdb3ffd29 | |
|
2441295d69 | |
|
d74cf63fb4 | |
|
b182b9ba96 | |
|
a5b7947fed | |
|
cac7b02d4f | |
|
4f066e397d | |
|
f7a85f3a80 | |
|
646016818c | |
|
851d152282 | |
|
9a08aa2aed | |
|
61556ac3e9 | |
|
e8b260f41d | |
|
8d8e12b3dd | |
|
a9eb5b1f12 | |
|
20df1f7904 | |
|
111991e6eb | |
|
67c74ffe7c | |
|
8b968401af | |
|
e368472ce7 | |
|
93962e6cf1 | |
|
32554b55cd | |
|
90da395f0a | |
|
2aea32e1a4 | |
|
3e8e4726f6 | |
|
cc10ff405a | |
|
77f63d7765 | |
|
71622bfde6 | |
|
723fbf1039 | |
|
d1a3503a7f | |
|
3a9c2d4675 | |
|
7244323cef | |
|
d6ec0981eb | |
|
c5b3a9a9e1 | |
|
475167d677 | |
|
d41b3455df | |
|
aba52cf01f | |
|
6abea9345e | |
|
b42bbe547b | |
|
d277f04f02 | |
|
d4fb87ec3c | |
|
6039ae9c96 | |
|
849ff94def | |
|
ac050a015d | |
|
10847d5e03 | |
|
b4b74c0ca9 | |
|
2da3679e46 | |
|
9bee18f881 | |
|
badfb3a09e | |
|
880840c20a | |
|
b6959491e3 | |
|
6dc87f5330 | |
|
0e134f9243 | |
|
ac96839c65 | |
|
8f61a71bf9 | |
|
f95465c2a5 | |
|
a5fb655295 | |
|
a2ccd7e494 | |
|
81fc66e54a | |
|
172a5357a2 | |
|
adda8b1c76 | |
|
983cf6575a | |
|
abcf6f4370 | |
|
62979df383 | |
|
c005bb4c47 | |
|
c0b7e90d1c | |
|
3816822eea | |
|
e37b001fec | |
|
0f199e3379 | |
|
59a21c91f4 | |
|
4583a89895 | |
|
bc78af7371 | |
|
68f51fc116 | |
|
368147bae7 | |
|
27e2dc2bea | |
|
53c909b9de | |
|
f6ffe2b535 | |
|
6e917d6f03 | |
|
08d932a1d4 | |
|
1e0ff5ac17 | |
|
75156208dd | |
|
41795aac2e | |
|
e5417ea731 | |
|
4e6b89ac8b | |
|
bd25741ea3 | |
|
1182675918 | |
|
394eeb9da7 | |
|
7861f60698 | |
|
6c7ab6cd3b | |
|
c038bce8c6 | |
|
4a63655328 | |
|
e419343eb4 | |
|
57f1c46889 | |
|
ac6b0d5ed0 | |
|
646fdac890 | |
|
36af60a819 | |
|
a2c7b99e2e | |
|
bbd4a0a1f2 | |
|
aa4ccb1e98 | |
|
63703d3191 | |
|
088ecd39f7 | |
|
fada0fa488 | |
|
a776353038 | |
|
881ffc3ad5 | |
|
8c9402f8b3 | |
|
8ff4776dfd | |
|
ddd1bae263 | |
|
5cf038f327 | |
|
c1bc95c88b | |
|
cbaa773fc3 | |
|
b62d664926 | |
|
467932a357 | |
|
3e3387fc97 | |
|
8746065b3a | |
|
d16c2bf941 | |
|
9f208b5cd6 | |
|
98ebefeea1 | |
|
9053f79f37 | |
|
62b9196f35 | |
|
ffb31fde7b | |
|
5245367ad4 | |
|
4521139d0f | |
|
169064aef8 | |
|
c8fc0c9247 | |
|
56579d1750 | |
|
96b9192fdc | |
|
bc50f835e5 | |
|
203c9e3b0a | |
|
75862d43aa | |
|
6806a5d8f7 | |
|
0fa6031d53 | |
|
379b197a0c | |
|
c9a8e43c5d | |
|
739eb91b78 | |
|
6b3f5ff3c7 | |
|
d3c8422700 | |
|
2a7f26ad53 | |
|
c4f89407ff | |
|
422ce67d75 | |
|
00f6c29ac2 | |
|
97a8d96277 | |
|
16faedda61 | |
|
4fad69c4be | |
|
5eeb0fe171 | |
|
13d4024e81 | |
|
fd88ae5ae0 | |
|
4d2cb35dfc | |
|
e66e6fafaa | |
|
76e6acc97c | |
|
ff3aab803f | |
|
85a6688a4e | |
|
afa597d2ab | |
|
c87ad16664 | |
|
75e4d3ed4f | |
|
9ea4519afa | |
|
fd707ba823 | |
|
939fe05553 | |
|
7c98d54184 | |
|
cd7a142baf | |
|
6cba956155 | |
|
d9fc524072 | |
|
27b353ce86 | |
|
fccddf1ce0 | |
|
8f15a04151 | |
|
f9a00a0876 | |
|
0b97dd7a6c | |
|
7fa5258631 | |
|
a1010972fb | |
|
3426cb890d | |
|
26f565c564 | |
|
b23f06d916 | |
|
49d322750a | |
|
b21c51cf1f | |
|
f774ca2aa2 | |
|
48ab491cc6 | |
|
25056207c3 | |
|
4ccd41a24a | |
|
52caed19d9 | |
|
ccedf33056 | |
|
d7bf502421 |
42
.cirrus.yml
42
.cirrus.yml
|
@ -7,16 +7,14 @@ env:
|
|||
# Name of the typical destination branch for PRs.
|
||||
DEST_BRANCH: "main"
|
||||
|
||||
|
||||
# Default task runtime environment
|
||||
container:
|
||||
dockerfile: ci/Dockerfile
|
||||
cpu: 1
|
||||
memory: 1
|
||||
|
||||
|
||||
# Execute all unit-tests in the repo
|
||||
cirrus-ci/unit-test_task:
|
||||
only_if: ¬_docs $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*'
|
||||
# Default task runtime environment
|
||||
container: &ci_container
|
||||
dockerfile: ci/Dockerfile
|
||||
cpu: 1
|
||||
memory: 1
|
||||
env:
|
||||
CIRRUS_CLONE_DEPTH: 0
|
||||
script:
|
||||
|
@ -26,10 +24,21 @@ cirrus-ci/unit-test_task:
|
|||
test_output_artifacts:
|
||||
path: '*.log'
|
||||
|
||||
cirrus-ci/renovate_validation_task:
|
||||
only_if: *not_docs
|
||||
container:
|
||||
image: "ghcr.io/renovatebot/renovate:latest"
|
||||
preset_validate_script:
|
||||
- renovate-config-validator $CIRRUS_WORKING_DIR/renovate/defaults.json5
|
||||
repo_validate_script:
|
||||
- renovate-config-validator $CIRRUS_WORKING_DIR/.github/renovate.json5
|
||||
|
||||
# This is the same setup as used for Buildah CI
|
||||
gcp_credentials: ENCRYPTED[fc95bcc9f4506a3b0d05537b53b182e104d4d3979eedbf41cf54205be6397ca0bce0831d0d47580cf578dae5776548a5]
|
||||
|
||||
cirrus-ci/build-push_test_task:
|
||||
only_if: *not_docs
|
||||
container: *ci_container
|
||||
depends_on:
|
||||
- cirrus-ci/unit-test
|
||||
gce_instance:
|
||||
|
@ -42,10 +51,10 @@ cirrus-ci/build-push_test_task:
|
|||
# only stock, google-managed generic image. This also avoids needing to
|
||||
# update custom-image last-used timestamps.
|
||||
image_project: centos-cloud
|
||||
image_family: centos-stream-8
|
||||
timeout_in: 20
|
||||
image_family: centos-stream-9
|
||||
timeout_in: 30
|
||||
env:
|
||||
CIMG: quay.io/buildah/stable:v1.23.0
|
||||
CIMG: quay.io/buildah/stable:latest
|
||||
TEST_FQIN: quay.io/buildah/do_not_use
|
||||
# Robot account credentials for test-push to
|
||||
# $TEST_FQIN registry by build-push/test/testbuilds.sh
|
||||
|
@ -74,8 +83,11 @@ cirrus-ci/build-push_test_task:
|
|||
|
||||
# Represent primary Cirrus-CI based testing (Required for merge)
|
||||
cirrus-ci/success_task:
|
||||
depends_on:
|
||||
container: *ci_container
|
||||
depends_on: &everything
|
||||
- cirrus-ci/unit-test
|
||||
- cirrus-ci/build-push_test
|
||||
- cirrus-ci/renovate_validation
|
||||
clone_script: mkdir -p "$CIRRUS_WORKING_DIR"
|
||||
script: >-
|
||||
echo "Required for Action Workflow: https://github.com/${CIRRUS_REPO_FULL_NAME}/actions/runs/${GITHUB_CHECK_SUITE_ID}"
|
||||
|
@ -88,15 +100,15 @@ cirrus-ci/success_task:
|
|||
# fire since the manual task has dependencies that cannot be
|
||||
# satisfied.
|
||||
github-actions/success_task:
|
||||
container: *ci_container
|
||||
# Note: ***DO NOT*** manually trigger this task under normal circumstances.
|
||||
# It is triggered automatically by the cirrus-ci_retrospective
|
||||
# Github Action. This action is responsible for testing the PR changes
|
||||
# to the action itself.
|
||||
trigger_type: manual
|
||||
# Only required for PRs, never tag or branch testing
|
||||
only_if: $CIRRUS_PR =~ '.*[0-9]+.*'
|
||||
depends_on:
|
||||
- cirrus-ci/build-push_test
|
||||
only_if: $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && $CIRRUS_PR != ''
|
||||
depends_on: *everything
|
||||
clone_script: mkdir -p "$CIRRUS_WORKING_DIR"
|
||||
script: >-
|
||||
echo "Triggered by Github Action Workflow: https://github.com/${CIRRUS_REPO_FULL_NAME}/actions/runs/${GITHUB_CHECK_SUITE_ID}"
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
/*
|
||||
Renovate is a service similar to GitHub Dependabot, but with
|
||||
(fantastically) more configuration options. So many options
|
||||
in fact, if you're new I recommend glossing over this cheat-sheet
|
||||
prior to the official documentation:
|
||||
|
||||
https://www.augmentedmind.de/2021/07/25/renovate-bot-cheat-sheet
|
||||
|
||||
Configuration Update/Change Procedure:
|
||||
1. Make changes
|
||||
2. Manually validate changes (from repo-root):
|
||||
|
||||
podman run -it \
|
||||
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
|
||||
ghcr.io/renovatebot/renovate:latest \
|
||||
renovate-config-validator
|
||||
3. Commit.
|
||||
|
||||
Configuration Reference:
|
||||
https://docs.renovatebot.com/configuration-options/
|
||||
|
||||
Monitoring Dashboard:
|
||||
https://app.renovatebot.com/dashboard#github/containers
|
||||
|
||||
Note: The Renovate bot will create/manage it's business on
|
||||
branches named 'renovate/*'. Otherwise, and by
|
||||
default, the only the copy of this file that matters
|
||||
is the one on the `main` branch. No other branches
|
||||
will be monitored or touched in any way.
|
||||
*/
|
||||
|
||||
{
|
||||
/*************************************************
|
||||
****** Global/general configuration options *****
|
||||
*************************************************/
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
// Re-use predefined sets of configuration options to DRY
|
||||
"extends": [
|
||||
// https://github.com/containers/automation/blob/main/renovate/defaults.json5
|
||||
"github>containers/automation//renovate/defaults.json5"
|
||||
],
|
||||
/*************************************************
|
||||
*** Repository-specific configuration options ***
|
||||
*************************************************/
|
||||
}
|
|
@ -16,11 +16,11 @@ env:
|
|||
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
|
||||
|
||||
jobs:
|
||||
unit-test:
|
||||
helper_unit-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Clone the repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
path: ./
|
||||
|
|
|
@ -44,7 +44,7 @@ jobs:
|
|||
GITHUB_TOKEN: ${{ github.token }}
|
||||
|
||||
- name: Clone latest main branch repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
path: ./main
|
||||
|
@ -64,16 +64,14 @@ jobs:
|
|||
- if: steps.retro.outputs.do_intg == 'true'
|
||||
id: create_pr_comment
|
||||
name: Create a status comment in the PR
|
||||
# Ref: https://github.com/marketplace/actions/comment-action
|
||||
uses: jungwinter/comment@v1
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
issue_number: '${{ steps.retro.outputs.prn }}'
|
||||
type: 'create'
|
||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||
comment-tag: retro
|
||||
# N/B: At the time of this comment, it is not possible to provide
|
||||
# direct links to specific job-steps (here) nor links to artifact
|
||||
# files. There are open RFE's for this capability to be added.
|
||||
body: >-
|
||||
message: >-
|
||||
[Cirrus-CI Retrospective Github
|
||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||
has started. Running against
|
||||
|
@ -84,7 +82,7 @@ jobs:
|
|||
# block allow direct checkout of PR code.
|
||||
- if: steps.retro.outputs.do_intg == 'true'
|
||||
name: Clone all repository code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
# Get ALL available history to avoid problems during any run of
|
||||
# 'git describe' from any script in the repo.
|
||||
|
@ -119,12 +117,11 @@ jobs:
|
|||
- if: steps.retro.outputs.do_intg == 'true'
|
||||
id: edit_pr_comment_build
|
||||
name: Update status comment on PR
|
||||
uses: jungwinter/comment@v1
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
type: 'edit'
|
||||
comment_id: '${{ steps.create_pr_comment.outputs.id }}'
|
||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
body: >-
|
||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||
comment-tag: retro
|
||||
message: >-
|
||||
Unit-testing passed (`${{ env.HELPER_LIB_TEST }}`)passed.
|
||||
[Cirrus-CI Retrospective Github
|
||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||
|
@ -135,12 +132,11 @@ jobs:
|
|||
- if: steps.retro.outputs.do_intg == 'true'
|
||||
id: edit_pr_comment_exec
|
||||
name: Update status comment on PR again
|
||||
uses: jungwinter/comment@v1
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
type: 'edit'
|
||||
comment_id: '${{ steps.edit_pr_comment_build.outputs.id }}'
|
||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
body: >-
|
||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||
comment-tag: retro
|
||||
message: >-
|
||||
Smoke testing passed [Cirrus-CI Retrospective Github
|
||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||
is triggering Cirrus-CI ${{ env.ACTION_TASK }} task.
|
||||
|
@ -154,12 +150,12 @@ jobs:
|
|||
run: |
|
||||
set +x
|
||||
trap "history -c" EXIT
|
||||
curl --request POST \
|
||||
curl --fail-with-body --request POST \
|
||||
--url https://api.cirrus-ci.com/graphql \
|
||||
--header "Authorization: Bearer ${{ secrets.CIRRUS_API_TOKEN }}" \
|
||||
--header 'content-type: application/json' \
|
||||
--data '{"query":"mutation {\n trigger(input: {taskId: \"${{steps.retro.outputs.tid}}\", clientMutationId: \"${{env.UUID}}\"}) {\n clientMutationId\n task {\n name\n }\n }\n}"}' \
|
||||
> ./test_artifacts/action_task_trigger.json
|
||||
| tee ./test_artifacts/action_task_trigger.json
|
||||
|
||||
actual=$(jq --raw-output '.data.trigger.clientMutationId' ./test_artifacts/action_task_trigger.json)
|
||||
echo "Verifying '$UUID' matches returned tracking value '$actual'"
|
||||
|
@ -167,12 +163,11 @@ jobs:
|
|||
|
||||
- if: steps.retro.outputs.do_intg == 'true'
|
||||
name: Update comment on workflow success
|
||||
uses: jungwinter/comment@v1
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
type: 'edit'
|
||||
comment_id: '${{ steps.edit_pr_comment_exec.outputs.id }}'
|
||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
body: >-
|
||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||
comment-tag: retro
|
||||
message: >-
|
||||
Successfully triggered [${{ env.ACTION_TASK }}
|
||||
task](https://cirrus-ci.com/task/${{ steps.retro.outputs.tid }}?command=main#L0)
|
||||
to indicate
|
||||
|
@ -183,12 +178,11 @@ jobs:
|
|||
|
||||
- if: failure() && steps.retro.outputs.do_intg == 'true'
|
||||
name: Update comment on workflow failure
|
||||
uses: jungwinter/comment@v1
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
type: 'edit'
|
||||
comment_id: '${{ steps.create_pr_comment.outputs.id }}'
|
||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
body: >-
|
||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||
comment-tag: retro
|
||||
message: >-
|
||||
Failure running [Cirrus-CI Retrospective Github
|
||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||
failed against this PR's
|
||||
|
@ -197,24 +191,22 @@ jobs:
|
|||
# This can happen because of --force push, manual cancel button press, or some other cause.
|
||||
- if: cancelled() && steps.retro.outputs.do_intg == 'true'
|
||||
name: Update comment on workflow cancellation
|
||||
uses: jungwinter/comment@v1
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
type: 'edit'
|
||||
comment_id: '${{ steps.create_pr_comment.outputs.id }}'
|
||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
body: '[Cancelled](https://github.com/${{github.repository}}/pull/${{steps.retro.outputs.prn}}/commits/${{steps.retro.outputs.sha}})'
|
||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||
comment-tag: retro
|
||||
message: '[Cancelled](https://github.com/${{github.repository}}/pull/${{steps.retro.outputs.prn}}/commits/${{steps.retro.outputs.sha}})'
|
||||
|
||||
# Abnormal workflow ($ACTION-TASK task already ran / not paused on a PR).
|
||||
- if: steps.retro.outputs.is_pr == 'true' && steps.retro.outputs.do_intg != 'true'
|
||||
id: create_error_pr_comment
|
||||
name: Create an error status comment in the PR
|
||||
# Ref: https://github.com/marketplace/actions/comment-action
|
||||
uses: jungwinter/comment@v1
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
with:
|
||||
issue_number: '${{ steps.retro.outputs.prn }}'
|
||||
type: 'create'
|
||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
body: >-
|
||||
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||
comment-tag: error
|
||||
message: >-
|
||||
***ERROR***: [cirrus-ci_retrospective
|
||||
action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||
found `${{ env.ACTION_TASK }}` task with unexpected `${{ steps.retro.outputs.tst }}`
|
||||
|
@ -230,7 +222,7 @@ jobs:
|
|||
# Provide an archive of files for debugging/analysis.
|
||||
- if: always() && steps.retro.outputs.do_intg == 'true'
|
||||
name: Archive event, build, and debugging output
|
||||
uses: actions/upload-artifact@v1.0.0
|
||||
uses: actions/upload-artifact@v4.6.2
|
||||
with:
|
||||
name: pr_${{ steps.retro.outputs.prn }}_debug.zip
|
||||
path: ./test_artifacts
|
||||
|
|
|
@ -28,9 +28,9 @@ jobs:
|
|||
fi
|
||||
|
||||
unit-tests: # N/B: Duplicates `ubuntu_unit_tests.yml` - templating not supported
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
# Testing installer requires a full repo. history
|
||||
fetch-depth: 0
|
||||
|
@ -66,18 +66,18 @@ jobs:
|
|||
# context data.
|
||||
- id: get_tag
|
||||
name: Retrieve the tag name
|
||||
run: printf "::set-output name=TAG_NAME::%s\n" $(basename "$GITHUB_REF")
|
||||
run: printf "TAG_NAME=%s\n" $(basename "$GITHUB_REF") >> $GITHUB_OUTPUT
|
||||
|
||||
- id: create_release # Pre-req for upload-release-asset below
|
||||
name: Create a new Github Release item for tag
|
||||
uses: actions/create-release@v1.0.1
|
||||
uses: actions/create-release@v1.1.4
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ steps.get_tag.outputs.TAG_NAME }}
|
||||
release_name: ${{ steps.get_tag.outputs.TAG_NAME }}
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./
|
||||
|
@ -102,7 +102,7 @@ jobs:
|
|||
REPO_USER: libpod
|
||||
REPO_NAME: cirrus-ci_retrospective
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
path: ./
|
||||
|
@ -128,7 +128,7 @@ jobs:
|
|||
|
||||
- name: Retrieve the tag name
|
||||
id: get_tag
|
||||
run: printf "::set-output name=TAG_NAME::%s\n" $(basename "$GITHUB_REF" | tee /dev/stderr)
|
||||
run: printf "TAG_NAME=%s\n" $(basename "$GITHUB_REF" | tee /dev/stderr) >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Tag and push cirrus-ci_retrospective container image to registry
|
||||
run: |
|
||||
|
@ -145,7 +145,7 @@ jobs:
|
|||
run: jq --indent 4 --color-output . ${{ github.event_path }}
|
||||
|
||||
- if: always()
|
||||
uses: actions/upload-artifact@v1.0.0
|
||||
uses: actions/upload-artifact@v4.6.2
|
||||
name: Archive triggering event JSON
|
||||
with:
|
||||
name: event.json.zip
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
|
||||
on: [pull_request]
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
runs-on: ubuntu-20.04
|
||||
automation_unit-tests:
|
||||
runs-on: ubuntu-24.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
persist-credentials: false
|
||||
|
|
|
@ -23,13 +23,13 @@ it may be passed `latest` to install the HEAD of the main branch.
|
|||
|
||||
For example, to install the `v1.1.3` release, run:
|
||||
```bash
|
||||
~# url='https://github.com/containers/automation/releases/latest/download/install_automation.sh'
|
||||
~# url='https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh'
|
||||
~# curl -sL "$url" | bash -s 1.1.3
|
||||
```
|
||||
|
||||
To install `latest`, run:
|
||||
```bash
|
||||
~# url='https://github.com/containers/automation/releases/latest/download/install_automation.sh'
|
||||
~# url='https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh'
|
||||
~# curl -sL "$url" | bash -s latest
|
||||
```
|
||||
|
||||
|
@ -52,7 +52,7 @@ install system-wide. Available components are simply any subdirectory in the re
|
|||
which contain a `.install.sh` file. For example, to install the latest `build-push` system-wide run:
|
||||
|
||||
```bash
|
||||
~# url='https://github.com/containers/automation/releases/latest/download/install_automation.sh'
|
||||
~# url='https://raw.githubusercontent.com/containers/automation/master/bin/install_automation.sh'
|
||||
~# curl -sL "$url" | bash -s latest build-push
|
||||
```
|
||||
|
||||
|
@ -80,7 +80,7 @@ if [[ -n "$AUTOMATION_LIB_PATH" ]]; then
|
|||
else
|
||||
(
|
||||
echo "WARNING: It doesn't appear containers/automation common was installed."
|
||||
) > /dev/stderr
|
||||
) >> /dev/stderr
|
||||
fi
|
||||
|
||||
...do stuff...
|
||||
|
|
|
@ -36,7 +36,7 @@ INSTALL_PREFIX="${INSTALL_PREFIX%%/}" # Make debugging path problems easier
|
|||
# When installing as root, allow sourcing env. vars. from this file
|
||||
INSTALL_ENV_FILEPATH="${INSTALL_ENV_FILEPATH:-/etc/automation_environment}"
|
||||
# Used internally here and in unit-testing, do not change without a really, really good reason.
|
||||
_ARGS="$@"
|
||||
_ARGS="$*"
|
||||
_MAGIC_JUJU=${_MAGIC_JUJU:-XXXXX}
|
||||
_DEFAULT_MAGIC_JUJU=d41d844b68a14ee7b9e6a6bb88385b4d
|
||||
|
||||
|
@ -109,7 +109,8 @@ install_automation() {
|
|||
fi
|
||||
# Allow re-installing different versions, clean out old version if found
|
||||
if [[ -d "$actual_inst_path" ]] && [[ -r "$actual_inst_path/AUTOMATION_VERSION" ]]; then
|
||||
local installed_version=$(cat "$actual_inst_path/AUTOMATION_VERSION")
|
||||
local installed_version
|
||||
installed_version=$(<"$actual_inst_path/AUTOMATION_VERSION")
|
||||
msg "Warning: Removing existing installed version '$installed_version'"
|
||||
rm -rvf "$actual_inst_path"
|
||||
elif [[ -d "$actual_inst_path" ]]; then
|
||||
|
@ -125,8 +126,8 @@ install_automation() {
|
|||
|
||||
dbg "Configuring environment file $INSTALLATION_SOURCE/environment"
|
||||
cat <<EOF>"$INSTALLATION_SOURCE/environment"
|
||||
# Added on $(date --iso-8601=minutes) by $actual_inst_path/bin/$SCRIPT_FILENAME"
|
||||
# Any manual modifications will be lost upon upgrade or reinstall.
|
||||
# Added on $(date --utc --iso-8601=minutes) by $actual_inst_path/bin/$SCRIPT_FILENAME"
|
||||
# for version '$AUTOMATION_VERSION'. Any manual modifications will be lost upon upgrade or reinstall.
|
||||
export AUTOMATION_LIB_PATH="$actual_inst_path/lib"
|
||||
export PATH="$PATH:$actual_inst_path/bin"
|
||||
EOF
|
||||
|
@ -217,7 +218,7 @@ check_args() {
|
|||
msg " Use version '$MAGIC_LOCAL_VERSION' to install from local source."
|
||||
msg " Use version 'latest' to install from current upstream"
|
||||
exit 2
|
||||
elif ! echo "$AUTOMATION_VERSION" | egrep -q "$arg_rx"; then
|
||||
elif ! echo "$AUTOMATION_VERSION" | grep -E -q "$arg_rx"; then
|
||||
msg "Error: '$AUTOMATION_VERSION' does not appear to be a valid version number"
|
||||
exit 4
|
||||
elif [[ -z "$_ARGS" ]] && [[ "$_MAGIC_JUJU" == "XXXXX" ]]; then
|
||||
|
@ -254,12 +255,14 @@ elif [[ "$_MAGIC_JUJU" == "$_DEFAULT_MAGIC_JUJU" ]]; then
|
|||
CHAIN_TO="$INSTALLATION_SOURCE/$arg/.install.sh"
|
||||
if [[ -r "$CHAIN_TO" ]]; then
|
||||
# Cannot assume common was installed system-wide
|
||||
# AUTOMATION_LIB_PATH defined by anchors.sh
|
||||
# shellcheck disable=SC2154
|
||||
env AUTOMATION_LIB_PATH=$AUTOMATION_LIB_PATH \
|
||||
AUTOMATION_VERSION=$AUTOMATION_VERSION \
|
||||
INSTALLATION_SOURCE=$INSTALLATION_SOURCE \
|
||||
A_DEBUG=$A_DEBUG \
|
||||
MAGIC_JUJU=$_MAGIC_JUJU \
|
||||
/bin/bash $CHAIN_TO
|
||||
$CHAIN_TO
|
||||
msg "##### Installation complete for '$arg' subcomponent"
|
||||
else
|
||||
msg "Warning: Cannot find installer for $CHAIN_TO"
|
||||
|
|
|
@ -20,10 +20,10 @@ runner_script_filename="$(basename $0)"
|
|||
for test_subdir in $(find "$(realpath $(dirname $0)/../)" -type d -name test | sort -r); do
|
||||
test_runner_filepath="$test_subdir/$runner_script_filename"
|
||||
if [[ -x "$test_runner_filepath" ]] && [[ "$test_runner_filepath" != "$this_script_filepath" ]]; then
|
||||
echo -e "\nExecuting $test_runner_filepath..." > /dev/stderr
|
||||
echo -e "\nExecuting $test_runner_filepath..." >> /dev/stderr
|
||||
$test_runner_filepath
|
||||
else
|
||||
echo -e "\nWARNING: Skipping $test_runner_filepath" > /dev/stderr
|
||||
echo -e "\nWARNING: Skipping $test_runner_filepath" >> /dev/stderr
|
||||
fi
|
||||
done
|
||||
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Installs 'build-push' script system-wide. NOT intended to be used directly
|
||||
# by humans, should only be used indirectly by running
|
||||
# ../bin/install_automation.sh <ver> build-push
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
source "$AUTOMATION_LIB_PATH/anchors.sh"
|
||||
source "$AUTOMATION_LIB_PATH/console_output.sh"
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ if [[ ! -r "$AUTOMATION_LIB_PATH/common_lib.sh" ]]; then
|
|||
echo "ERROR: Expecting \$AUTOMATION_LIB_PATH to contain the installation"
|
||||
echo " directory path for the common automation tooling."
|
||||
echo " Please refer to the README.md for installation instructions."
|
||||
) > /dev/stderr
|
||||
) >> /dev/stderr
|
||||
exit 2 # Verified by tests
|
||||
fi
|
||||
|
||||
|
@ -228,7 +228,8 @@ parse_args() {
|
|||
dbg "Grabbing Context parameter: '$arg'."
|
||||
CONTEXT=$(realpath -e -P $arg || die_help "$E_CONTEXT '$arg'")
|
||||
else
|
||||
# Properly handle any embedded special characters
|
||||
# Hack: Allow array addition to handle any embedded special characters
|
||||
# shellcheck disable=SC2207
|
||||
BUILD_ARGS+=($(printf "%q" "$arg"))
|
||||
fi
|
||||
;;
|
||||
|
@ -290,12 +291,12 @@ stage_notice() {
|
|||
# N/B: It would be nice/helpful to resolve any env. vars. in '$@'
|
||||
# for display. Unfortunately this is hard to do safely
|
||||
# with (e.g.) eval echo "$@" :(
|
||||
msg="$@"
|
||||
msg="$*"
|
||||
(
|
||||
echo "############################################################"
|
||||
echo "$msg"
|
||||
echo "############################################################"
|
||||
) > /dev/stderr
|
||||
) >> /dev/stderr
|
||||
}
|
||||
|
||||
BUILTIID="" # populated with the image-id on successful build
|
||||
|
@ -322,22 +323,31 @@ parallel_build() {
|
|||
|
||||
# Keep user-specified BUILD_ARGS near the beginning so errors are easy to spot
|
||||
# Provide a copy of the output in case something goes wrong in a complex build
|
||||
stage_notice "Executing build command: '$RUNTIME build ${BUILD_ARGS[@]} ${_args[@]}'"
|
||||
stage_notice "Executing build command: '$RUNTIME build ${BUILD_ARGS[*]} ${_args[*]}'"
|
||||
"$RUNTIME" build "${BUILD_ARGS[@]}" "${_args[@]}"
|
||||
}
|
||||
|
||||
confirm_arches() {
|
||||
local inspjson
|
||||
local filter=".manifests[].platform.architecture"
|
||||
local arch
|
||||
local maniarches
|
||||
|
||||
dbg "in confirm_arches()"
|
||||
req_env_vars FQIN ARCHES RUNTIME
|
||||
maniarches=$($RUNTIME manifest inspect "containers-storage:$FQIN:latest" | \
|
||||
jq -r "$filter" | \
|
||||
grep -v 'null' | \
|
||||
tr -s '[:space:]' ' ' | \
|
||||
sed -z '$ s/[\n ]$//')
|
||||
if ! inspjson=$($RUNTIME manifest inspect "containers-storage:$FQIN:latest"); then
|
||||
die "Error reading manifest list metadata for 'containers-storage:$FQIN:latest'"
|
||||
fi
|
||||
|
||||
# Convert into space-delimited string for grep error message (below)
|
||||
# TODO: Use an array instead, could be simpler? Would need testing.
|
||||
if ! maniarches=$(jq -r "$filter" <<<"$inspjson" | \
|
||||
grep -v 'null' | \
|
||||
tr -s '[:space:]' ' ' | \
|
||||
sed -z '$ s/[\n ]$//'); then
|
||||
die "Error processing manifest list metadata:
|
||||
$inspjson"
|
||||
fi
|
||||
dbg "Found manifest arches: $maniarches"
|
||||
|
||||
for arch in $ARCHES; do
|
||||
|
@ -357,7 +367,7 @@ registry_login() {
|
|||
$RUNTIME login --username "$NAMESPACE_USERNAME" --password-stdin \
|
||||
"$REGSERVER/$NAMESPACE"
|
||||
LOGGEDIN=1
|
||||
else
|
||||
elif ((PUSH)); then
|
||||
dbg " Already logged in"
|
||||
fi
|
||||
|
||||
|
@ -369,6 +379,8 @@ run_prepmod_cmd() {
|
|||
local kind="$1"
|
||||
shift
|
||||
dbg "Exporting variables '$_CMD_ENV'"
|
||||
# The indirect export is intentional here
|
||||
# shellcheck disable=SC2163
|
||||
export $_CMD_ENV
|
||||
stage_notice "Executing $kind-command: " "$@"
|
||||
bash -c "$@"
|
||||
|
@ -377,51 +389,72 @@ run_prepmod_cmd() {
|
|||
|
||||
# Outputs sorted list of FQIN w/ tags to stdout, silent otherwise
|
||||
get_manifest_tags() {
|
||||
local _json
|
||||
local result_json
|
||||
local fqin_names
|
||||
dbg "in get_manifest_fqins()"
|
||||
|
||||
# At the time of this comment, there is no reliable way to
|
||||
# lookup all tags based solely on inspecting a manifest.
|
||||
# However, since we know $FQIN (remember, value has no tag) we can
|
||||
# use it to search all related names container storage. Unfortunately
|
||||
# use it to search all related names in container storage. Unfortunately
|
||||
# because images can have multiple tags, the `reference` filter
|
||||
# can return names we don't care about. Work around this by
|
||||
# sending the final result back through a grep of $FQIN
|
||||
_json=$($RUNTIME images --json --filter=reference=$FQIN)
|
||||
dbg "Image listing json: $_json"
|
||||
if [[ -n "$_json" ]] && jq --exit-status '.[].names' <<<"$_json" &>/dev/null
|
||||
then
|
||||
jq --raw-output '.[].names[]'<<<"$_json" | grep "$FQIN" | sort
|
||||
# can return names we don't care about. Work around this with a
|
||||
# grep of $FQIN in the results.
|
||||
if ! result_json=$($RUNTIME images --json --filter=reference=$FQIN); then
|
||||
die "Error listing manifest-list images that reference '$FQIN'"
|
||||
fi
|
||||
|
||||
dbg "Image listing json: $result_json"
|
||||
if [[ -n "$result_json" ]]; then # N/B: value could be '[]'
|
||||
# Rely on the caller to handle an empty list, ignore items missing a name key.
|
||||
if ! fqin_names=$(jq -r '.[]? | .names[]?'<<<"$result_json"); then
|
||||
die "Error obtaining image names from '$FQIN' manifest-list search result:
|
||||
$result_json"
|
||||
fi
|
||||
|
||||
dbg "Sorting fqin_names"
|
||||
# Don't emit an empty newline when the list is empty
|
||||
[[ -z "$fqin_names" ]] || \
|
||||
sort <<<"$fqin_names"
|
||||
fi
|
||||
dbg "get_manifest_tags() returning successfully"
|
||||
}
|
||||
|
||||
push_images() {
|
||||
local _fqins
|
||||
local _fqin
|
||||
local fqin_list
|
||||
local fqin
|
||||
dbg "in push_images()"
|
||||
|
||||
# It's possible that --modcmd=* removed all images, make sure
|
||||
# this is known to the caller.
|
||||
_fqins=$(get_manifest_tags)
|
||||
if [[ -z "$_fqins" ]]; then
|
||||
die "No FQIN(s) to be pushed."
|
||||
if ! fqin_list=$(get_manifest_tags); then
|
||||
die "Retrieving set of manifest-list tags to push for '$FQIN'"
|
||||
fi
|
||||
if [[ -z "$fqin_list" ]]; then
|
||||
warn "No FQIN(s) to be pushed."
|
||||
fi
|
||||
|
||||
dbg "Will try to push FQINs: $_fqins"
|
||||
if ((PUSH)); then
|
||||
dbg "Will try to push FQINs: '$fqin_list'"
|
||||
|
||||
registry_login
|
||||
for _fqin in $_fqins; do
|
||||
# Note: --all means push manifest AND images it references
|
||||
msg "Pushing $_fqin"
|
||||
$RUNTIME manifest push --all $_fqin docker://$_fqin
|
||||
done
|
||||
registry_login
|
||||
for fqin in $fqin_list; do
|
||||
# Note: --all means push manifest AND images it references
|
||||
msg "Pushing $fqin"
|
||||
$RUNTIME manifest push --all $fqin docker://$fqin
|
||||
done
|
||||
else
|
||||
# Even if --nopush was specified, be helpful to humans with a lookup of all the
|
||||
# relevant tags for $FQIN that would have been pushed and display them.
|
||||
warn "Option --nopush specified, not pushing: '$fqin_list'"
|
||||
fi
|
||||
}
|
||||
|
||||
##### MAIN() #####
|
||||
|
||||
# Handle requested help first before anything else
|
||||
if grep -q -- '--help' <<<"$@"; then
|
||||
echo "$E_USAGE" > /dev/stdout # allow grep'ing
|
||||
echo "$E_USAGE" >> /dev/stdout # allow grep'ing
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
@ -433,9 +466,16 @@ if [[ -n "$PREPCMD" ]]; then
|
|||
fi
|
||||
|
||||
parallel_build "$FQIN:latest"
|
||||
|
||||
# If a parallel build or the manifest-list assembly fails, buildah
|
||||
# may still exit successfully. Catch this condition by verifying
|
||||
# all expected arches are present in the manifest list.
|
||||
confirm_arches
|
||||
|
||||
if [[ -n "$MODCMD" ]]; then
|
||||
registry_login
|
||||
run_prepmod_cmd mod "$MODCMD"
|
||||
fi
|
||||
if ((PUSH)); then push_images; fi
|
||||
|
||||
# Handles --nopush internally
|
||||
push_images
|
||||
|
|
|
@ -29,13 +29,15 @@ if [[ "$1" == "build" ]]; then
|
|||
elif [[ "$1" == "manifest" ]]; then
|
||||
# validate json while outputing it
|
||||
jq . $DATF
|
||||
elif [[ "$1" =~ info ]]; then
|
||||
elif [[ "$1" == "info" ]]; then
|
||||
case "$@" in
|
||||
*arch*) echo "amd64" ;;
|
||||
*cpus*) echo "2" ;;
|
||||
*) exit 1 ;;
|
||||
esac
|
||||
elif [[ "$1" == "images" ]]; then
|
||||
echo '[{"names":["localhost/foo/bar:latest"]}]'
|
||||
else
|
||||
echo "ERROR: Unexpected call to fake_buildah.sh"
|
||||
echo "ERROR: Unexpected arg '$1' to fake_buildah.sh" >> /dev/stderr
|
||||
exit 9
|
||||
fi
|
||||
|
|
|
@ -4,22 +4,16 @@
|
|||
|
||||
set -eo pipefail
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "$CIRRUS_CI" == "true" ]]; then
|
||||
# Cirrus-CI is setup (see .cirrus.yml) to run tests on CentOS
|
||||
# for simplicity, but it has no native qemu-user-static. For
|
||||
# the benefit of CI testing, cheat and use whatever random
|
||||
# emulators are included in the container image.
|
||||
|
||||
# Workaround silly stupid hub rate-limiting
|
||||
cat >> /etc/containers/registries.conf << EOF
|
||||
[[registry]]
|
||||
prefix="docker.io/library"
|
||||
location="mirror.gcr.io"
|
||||
EOF
|
||||
|
||||
# N/B: THIS IS NOT SAFE FOR PRODUCTION USE!!!!!
|
||||
podman run --rm --privileged \
|
||||
docker.io/multiarch/qemu-user-static:latest \
|
||||
mirror.gcr.io/multiarch/qemu-user-static:latest \
|
||||
--reset -p yes
|
||||
elif [[ -x "/usr/bin/qemu-aarch64-static" ]]; then
|
||||
# TODO: Better way to determine if kernel already setup?
|
||||
|
|
|
@ -8,7 +8,7 @@ source $TEST_SOURCE_DIRPATH/testlib.sh || exit 1
|
|||
SUBJ_FILEPATH="$TEST_DIR/$SUBJ_FILENAME"
|
||||
TEST_CONTEXT="$TEST_SOURCE_DIRPATH/test_context"
|
||||
EMPTY_CONTEXT=$(mktemp -d -p '' .tmp_$(basename ${BASH_SOURCE[0]})_XXXX)
|
||||
export NATIVE_GOARCH=$($RUNTIME info --format='{{.host.arch}}')
|
||||
export NATIVE_GOARCH=$(buildah info --format='{{.host.arch}}')
|
||||
|
||||
test_cmd "Verify error when automation library not found" \
|
||||
2 'ERROR: Expecting \$AUTOMATION_LIB_PATH' \
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
# Any/all other usage is virtually guaranteed to fail and/or cause
|
||||
# harm to the system.
|
||||
|
||||
for varname in RUNTIME TEST_FQIN BUILDAH_USERNAME BUILDAH_PASSWORD; do
|
||||
for varname in RUNTIME SUBJ_FILEPATH TEST_CONTEXT TEST_SOURCE_DIRPATH TEST_FQIN BUILDAH_USERNAME BUILDAH_PASSWORD; do
|
||||
value=${!varname}
|
||||
if [[ -z "$value" ]]; then
|
||||
echo "ERROR: Required \$$varname variable is unset/empty."
|
||||
|
@ -13,6 +13,8 @@ for varname in RUNTIME TEST_FQIN BUILDAH_USERNAME BUILDAH_PASSWORD; do
|
|||
done
|
||||
unset value
|
||||
|
||||
# RUNTIME is defined by caller
|
||||
# shellcheck disable=SC2154
|
||||
$RUNTIME --version
|
||||
test_cmd "Confirm $(basename $RUNTIME) is available" \
|
||||
0 "buildah version .+" \
|
||||
|
@ -23,7 +25,9 @@ test_cmd "Confirm skopeo is available" \
|
|||
0 "skopeo version .+" \
|
||||
skopeo --version
|
||||
|
||||
PREPCMD='echo "SpecialErrorMessage:$REGSERVER" > /dev/stderr && exit 42'
|
||||
PREPCMD='echo "SpecialErrorMessage:$REGSERVER" >> /dev/stderr && exit 42'
|
||||
# SUBJ_FILEPATH and TEST_CONTEXT are defined by caller
|
||||
# shellcheck disable=SC2154
|
||||
test_cmd "Confirm error output and exit(42) from --prepcmd" \
|
||||
42 "SpecialErrorMessage:localhost" \
|
||||
bash -c "$SUBJ_FILEPATH --nopush localhost/foo/bar $TEST_CONTEXT --prepcmd='$PREPCMD' 2>&1"
|
||||
|
@ -53,7 +57,7 @@ test_cmd "Confirm manifest-list can be removed by name" \
|
|||
$RUNTIME manifest rm containers-storage:localhost/foo/bar:latest
|
||||
|
||||
test_cmd "Verify expected partial failure when passing bogus architectures" \
|
||||
125 "error creating build.+architecture staple" \
|
||||
125 "no image found in image index for architecture" \
|
||||
bash -c "A_DEBUG=1 $SUBJ_FILEPATH --arches=correct,horse,battery,staple localhost/foo/bar --nopush $TEST_CONTEXT 2>&1"
|
||||
|
||||
MODCMD='$RUNTIME tag $FQIN:latest $FQIN:9.8.7-testing'
|
||||
|
@ -86,15 +90,12 @@ test_cmd "Verify tagged manifest image digest matches the same in latest" \
|
|||
MODCMD='
|
||||
set -x;
|
||||
$RUNTIME images && \
|
||||
$RUNTIME manifest rm containers-storage:$FQIN:latest && \
|
||||
$RUNTIME manifest rm containers-storage:$FQIN:9.8.7-testing && \
|
||||
$RUNTIME manifest rm $FQIN:latest && \
|
||||
$RUNTIME manifest rm $FQIN:9.8.7-testing && \
|
||||
echo "AllGone";
|
||||
'
|
||||
# TODO: Test fails due to: https://github.com/containers/buildah/issues/3490
|
||||
# for now pretend it should exit(125) which will be caught when bug is fixed
|
||||
# - causing it to exit(0) as it should
|
||||
test_cmd "Verify --modcmd can execute a long string with substitutions" \
|
||||
125 "AllGone" \
|
||||
test_cmd "Verify --modcmd can execute command string that removes all tags" \
|
||||
0 "AllGone.*No FQIN.+to be pushed" \
|
||||
bash -c "A_DEBUG=1 $SUBJ_FILEPATH --modcmd='$MODCMD' localhost/foo/bar --nopush $TEST_CONTEXT 2>&1"
|
||||
|
||||
test_cmd "Verify previous --modcmd removed the 'latest' tagged image" \
|
||||
|
@ -109,6 +110,8 @@ FAKE_VERSION=$RANDOM
|
|||
MODCMD="set -ex;
|
||||
\$RUNTIME tag \$FQIN:latest \$FQIN:$FAKE_VERSION;
|
||||
\$RUNTIME manifest rm \$FQIN:latest;"
|
||||
# TEST_FQIN and TEST_SOURCE_DIRPATH defined by caller
|
||||
# shellcheck disable=SC2154
|
||||
test_cmd "Verify e2e workflow w/ additional build-args" \
|
||||
0 "Pushing $TEST_FQIN:$FAKE_VERSION" \
|
||||
bash -c "env A_DEBUG=1 $SUBJ_FILEPATH \
|
||||
|
@ -121,7 +124,7 @@ test_cmd "Verify e2e workflow w/ additional build-args" \
|
|||
2>&1"
|
||||
|
||||
test_cmd "Verify latest tagged image was not pushed" \
|
||||
1 "(Tag latest was deleted or has expired.)|(manifest unknown: manifest unknown)" \
|
||||
2 'reading manifest latest in quay\.io/buildah/do_not_use: manifest unknown' \
|
||||
skopeo inspect docker://$TEST_FQIN:latest
|
||||
|
||||
test_cmd "Verify architectures can be obtained from manifest list" \
|
||||
|
@ -132,7 +135,7 @@ test_cmd "Verify architectures can be obtained from manifest list" \
|
|||
for arch in amd64 s390x arm64 ppc64le; do
|
||||
test_cmd "Verify $arch architecture present in $TEST_FQIN:$FAKE_VERSION" \
|
||||
0 "" \
|
||||
fgrep -qx "$arch" $TEST_TEMP/maniarches
|
||||
grep -Fqx "$arch" $TEST_TEMP/maniarches
|
||||
done
|
||||
|
||||
test_cmd "Verify pushed image can be removed" \
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
# Podman First-Time Contributor Certificate Generator
|
||||
|
||||
This directory contains a simple web-based certificate generator to celebrate first-time contributors to the Podman project.
|
||||
|
||||
## Files
|
||||
|
||||
- **`certificate_generator.html`** - Interactive web interface for creating certificates
|
||||
- **`certificate_template.html`** - The certificate template used for generation
|
||||
- **`first_pr.png`** - Podman logo/branding image used in certificates
|
||||
|
||||
## Usage
|
||||
|
||||
1. Open `certificate_generator.html` in a web browser
|
||||
2. Fill in the contributor's details:
|
||||
- Name
|
||||
- Pull Request number
|
||||
- Date (defaults to current date)
|
||||
3. Preview the certificate in real-time
|
||||
4. Click "Download Certificate" to save as HTML
|
||||
|
||||
## Purpose
|
||||
|
||||
These certificates are designed to recognize and celebrate community members who make their first contribution to the Podman project. The certificates feature Podman branding and can be customized for each contributor.
|
||||
|
||||
## Contributing
|
||||
|
||||
Feel free to improve the design, add features, or suggest enhancements to make the certificate generator even better for recognizing our amazing contributors!
|
|
@ -0,0 +1,277 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Podman Certificate Generator</title>
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
|
||||
@import url('https://fonts.googleapis.com/css2?family=Merriweather:wght@400;700;900&display=swap');
|
||||
|
||||
body {
|
||||
font-family: 'Inter', sans-serif;
|
||||
background-color: #f0f2f5;
|
||||
margin: 0;
|
||||
padding: 2rem;
|
||||
}
|
||||
.container {
|
||||
display: grid;
|
||||
grid-template-columns: 380px 1fr;
|
||||
gap: 2rem;
|
||||
max-width: 1600px;
|
||||
margin: auto;
|
||||
}
|
||||
.form-panel {
|
||||
background-color: white;
|
||||
padding: 2rem;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 4px 12px rgba(0,0,0,0.1);
|
||||
height: fit-content;
|
||||
position: sticky;
|
||||
top: 2rem;
|
||||
}
|
||||
.form-panel h2 {
|
||||
margin-top: 0;
|
||||
color: #333;
|
||||
font-family: 'Merriweather', serif;
|
||||
}
|
||||
.form-group {
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
.form-group label {
|
||||
display: block;
|
||||
margin-bottom: 0.5rem;
|
||||
font-weight: 600;
|
||||
color: #555;
|
||||
}
|
||||
.form-group input {
|
||||
width: 100%;
|
||||
padding: 0.75rem;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 4px;
|
||||
box-sizing: border-box;
|
||||
font-size: 1rem;
|
||||
}
|
||||
.action-buttons {
|
||||
display: flex;
|
||||
gap: 1rem;
|
||||
margin-top: 1.5rem;
|
||||
}
|
||||
.action-buttons button {
|
||||
flex-grow: 1;
|
||||
padding: 0.75rem;
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
font-size: 1rem;
|
||||
font-weight: 600;
|
||||
cursor: pointer;
|
||||
transition: background-color 0.3s;
|
||||
}
|
||||
#downloadBtn {
|
||||
background-color: #28a745;
|
||||
color: white;
|
||||
}
|
||||
#downloadBtn:hover {
|
||||
background-color: #218838;
|
||||
}
|
||||
.preview-panel {
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: flex-start;
|
||||
}
|
||||
|
||||
/* Certificate Styles (copied from template and scaled) */
|
||||
.certificate {
|
||||
width: 800px;
|
||||
height: 1100px;
|
||||
background: #fdfaf0;
|
||||
border: 2px solid #333;
|
||||
position: relative;
|
||||
box-shadow: 0 10px 30px rgba(0,0,0,0.2);
|
||||
padding: 50px;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
font-family: 'Merriweather', serif;
|
||||
transform: scale(0.8);
|
||||
transform-origin: top center;
|
||||
}
|
||||
.party-popper { position: absolute; font-size: 40px; }
|
||||
.top-left { top: 40px; left: 40px; }
|
||||
.top-right { top: 40px; right: 40px; }
|
||||
.main-title { font-size: 48px; font-weight: 900; color: #333; text-align: center; margin-top: 60px; line-height: 1.2; text-transform: uppercase; }
|
||||
.subtitle { font-size: 24px; font-weight: 400; color: #333; text-align: center; margin-top: 30px; text-transform: uppercase; letter-spacing: 2px; }
|
||||
.contributor-name { font-size: 56px; font-weight: 700; color: #333; text-align: center; margin: 15px 0 50px; }
|
||||
.mascot-image { width: 450px; height: 450px; background-image: url('first_pr.png'); background-size: contain; background-repeat: no-repeat; background-position: center; margin-top: 20px; -webkit-print-color-adjust: exact; print-color-adjust: exact; }
|
||||
.description { font-size: 22px; color: #333; line-height: 1.6; text-align: center; margin-top: 40px; }
|
||||
.description strong { font-weight: 700; }
|
||||
.footer { width: 100%; margin-top: auto; padding-top: 30px; border-top: 1px solid #ccc; display: flex; justify-content: space-between; align-items: flex-end; font-size: 16px; color: #333; }
|
||||
.pr-info { text-align: left; }
|
||||
.signature { text-align: right; font-style: italic; }
|
||||
|
||||
@media print {
|
||||
body {
|
||||
background: #fff;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
.form-panel, .action-buttons {
|
||||
display: none;
|
||||
}
|
||||
.container {
|
||||
display: block;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
.preview-panel {
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
.certificate {
|
||||
transform: scale(1);
|
||||
box-shadow: none;
|
||||
width: 100%;
|
||||
height: 100vh;
|
||||
page-break-inside: avoid;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="form-panel">
|
||||
<h2>Certificate Generator</h2>
|
||||
<div class="form-group">
|
||||
<label for="contributorName">Contributor Name</label>
|
||||
<input type="text" id="contributorName" value="Mike McGrath">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="prNumber">PR Number</label>
|
||||
<input type="text" id="prNumber" value="26393">
|
||||
</div>
|
||||
<div class="form-group">
|
||||
<label for="mergeDate">Date</label>
|
||||
<input type="text" id="mergeDate" value="June 13, 2025">
|
||||
</div>
|
||||
<div class="action-buttons">
|
||||
<button id="downloadBtn">Download HTML</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="preview-panel">
|
||||
<div id="certificatePreview">
|
||||
<!-- Certificate HTML will be injected here by script -->
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
const nameInput = document.getElementById('contributorName');
|
||||
const prNumberInput = document.getElementById('prNumber');
|
||||
const dateInput = document.getElementById('mergeDate');
|
||||
const preview = document.getElementById('certificatePreview');
|
||||
|
||||
function generateCertificateHTML(name, prNumber, date) {
|
||||
const prLink = `https://github.com/containers/podman/pull/${prNumber}`;
|
||||
// This is the full, self-contained HTML for the certificate
|
||||
return `
|
||||
<div class="certificate">
|
||||
<div class="party-popper top-left">🎉</div>
|
||||
<div class="party-popper top-right">🎉</div>
|
||||
<div class="main-title">Certificate of<br>Contribution</div>
|
||||
<div class="subtitle">Awarded To</div>
|
||||
<div class="contributor-name">${name}</div>
|
||||
<div class="mascot-image"></div>
|
||||
<div class="description">
|
||||
For successfully submitting and merging their <strong>First Pull Request</strong> to the <strong>Podman project</strong>.<br>
|
||||
Your contribution helps make open source better—one PR at a time!
|
||||
</div>
|
||||
<div class="footer">
|
||||
<div class="pr-info">
|
||||
<div>🔧 Merged PR: <a href="${prLink}" target="_blank">${prLink}</a></div>
|
||||
<div style="margin-top: 5px;">${date}</div>
|
||||
</div>
|
||||
<div class="signature">
|
||||
Keep hacking, keep contributing!<br>
|
||||
– The Podman Community
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
}
|
||||
|
||||
function updatePreview() {
|
||||
const name = nameInput.value || '[CONTRIBUTOR_NAME]';
|
||||
const prNumber = prNumberInput.value || '[PR_NUMBER]';
|
||||
const date = dateInput.value || '[DATE]';
|
||||
preview.innerHTML = generateCertificateHTML(name, prNumber, date);
|
||||
}
|
||||
|
||||
document.getElementById('downloadBtn').addEventListener('click', () => {
|
||||
const name = nameInput.value || 'contributor';
|
||||
const prNumber = prNumberInput.value || '00000';
|
||||
const date = dateInput.value || 'Date';
|
||||
|
||||
const certificateHTML = generateCertificateHTML(name, prNumber, date);
|
||||
const fullPageHTML = `
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Certificate for ${name}</title>
|
||||
<style>
|
||||
/* All the CSS from the generator page */
|
||||
@import url('https://fonts.googleapis.com/css2?family=Merriweather:wght@400;700;900&display=swap');
|
||||
body { margin: 20px; font-family: 'Merriweather', serif; background: #e0e0e0; }
|
||||
.certificate {
|
||||
transform: scale(1);
|
||||
box-shadow: none;
|
||||
margin: auto;
|
||||
}
|
||||
/* Paste all certificate-related styles here */
|
||||
.certificate { width: 800px; height: 1100px; background: #fdfaf0; border: 2px solid #333; position: relative; padding: 50px; box-sizing: border-box; display: flex; flex-direction: column; align-items: center; }
|
||||
.party-popper { position: absolute; font-size: 40px; }
|
||||
.top-left { top: 40px; left: 40px; }
|
||||
.top-right { top: 40px; right: 40px; }
|
||||
.main-title { font-size: 48px; font-weight: 900; color: #333; text-align: center; margin-top: 60px; line-height: 1.2; text-transform: uppercase; }
|
||||
.subtitle { font-size: 24px; font-weight: 400; color: #333; text-align: center; margin-top: 30px; text-transform: uppercase; letter-spacing: 2px; }
|
||||
.contributor-name { font-size: 56px; font-weight: 700; color: #333; text-align: center; margin: 15px 0 50px; }
|
||||
.mascot-image { width: 450px; height: 450px; background-image: url('first_pr.png'); background-size: contain; background-repeat: no-repeat; background-position: center; margin-top: 20px; -webkit-print-color-adjust: exact; print-color-adjust: exact; }
|
||||
.description { font-size: 22px; color: #333; line-height: 1.6; text-align: center; margin-top: 40px; }
|
||||
.description strong { font-weight: 700; }
|
||||
.footer { width: 100%; margin-top: auto; padding-top: 30px; border-top: 1px solid #ccc; display: flex; justify-content: space-between; align-items: flex-end; font-size: 16px; color: #333; }
|
||||
.pr-info { text-align: left; }
|
||||
.signature { text-align: right; font-style: italic; }
|
||||
|
||||
@media print {
|
||||
@page { size: A4 portrait; margin: 0; }
|
||||
body, html { width: 100%; height: 100%; margin: 0; padding: 0; }
|
||||
.certificate { width: 100%; height: 100%; box-shadow: none; transform: scale(1); }
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>${certificateHTML}</body>
|
||||
</html>
|
||||
`;
|
||||
|
||||
const blob = new Blob([fullPageHTML], { type: 'text/html' });
|
||||
const url = URL.createObjectURL(blob);
|
||||
const a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.download = `podman-contribution-certificate-${name.toLowerCase().replace(/\s+/g, '-')}.html`;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
document.body.removeChild(a);
|
||||
URL.revokeObjectURL(url);
|
||||
});
|
||||
|
||||
// Add event listeners to update preview on input change
|
||||
[nameInput, prNumberInput, dateInput].forEach(input => {
|
||||
input.addEventListener('input', updatePreview);
|
||||
});
|
||||
|
||||
// Initial preview generation
|
||||
updatePreview();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,175 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Podman Certificate of Contribution</title>
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css2?family=Merriweather:wght@400;700;900&display=swap');
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
font-family: 'Merriweather', serif;
|
||||
background: #e0e0e0;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.certificate {
|
||||
width: 800px;
|
||||
height: 1100px;
|
||||
background: #fdfaf0;
|
||||
border: 2px solid #333;
|
||||
position: relative;
|
||||
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.2);
|
||||
padding: 50px;
|
||||
box-sizing: border-box;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.party-popper {
|
||||
position: absolute;
|
||||
font-size: 40px;
|
||||
}
|
||||
|
||||
.top-left {
|
||||
top: 40px;
|
||||
left: 40px;
|
||||
}
|
||||
|
||||
.top-right {
|
||||
top: 40px;
|
||||
right: 40px;
|
||||
}
|
||||
|
||||
.main-title {
|
||||
font-size: 48px;
|
||||
font-weight: 900;
|
||||
color: #333;
|
||||
text-align: center;
|
||||
margin-top: 60px;
|
||||
line-height: 1.2;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
.subtitle {
|
||||
font-size: 24px;
|
||||
font-weight: 400;
|
||||
color: #333;
|
||||
text-align: center;
|
||||
margin-top: 30px;
|
||||
text-transform: uppercase;
|
||||
letter-spacing: 2px;
|
||||
}
|
||||
|
||||
.contributor-name {
|
||||
font-size: 56px;
|
||||
font-weight: 700;
|
||||
color: #333;
|
||||
text-align: center;
|
||||
margin: 15px 0 50px;
|
||||
}
|
||||
|
||||
.mascot-image {
|
||||
width: 450px;
|
||||
height: 450px;
|
||||
background-image: url('first_pr.png');
|
||||
background-size: contain;
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
margin-top: 20px;
|
||||
-webkit-print-color-adjust: exact;
|
||||
print-color-adjust: exact;
|
||||
}
|
||||
|
||||
.description {
|
||||
font-size: 22px;
|
||||
color: #333;
|
||||
line-height: 1.6;
|
||||
text-align: center;
|
||||
margin-top: 40px;
|
||||
}
|
||||
|
||||
.description strong {
|
||||
font-weight: 700;
|
||||
}
|
||||
|
||||
.footer {
|
||||
width: 100%;
|
||||
margin-top: auto;
|
||||
padding-top: 30px;
|
||||
border-top: 1px solid #ccc;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: flex-end;
|
||||
font-size: 16px;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.pr-info {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.signature {
|
||||
text-align: right;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
@media print {
|
||||
@page {
|
||||
size: A4 portrait;
|
||||
margin: 0;
|
||||
}
|
||||
body, html {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
background: #fdfaf0;
|
||||
}
|
||||
.certificate {
|
||||
width: 100%;
|
||||
height: 100vh;
|
||||
box-shadow: none;
|
||||
transform: scale(1);
|
||||
border-radius: 0;
|
||||
page-break-inside: avoid;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="certificate">
|
||||
<div class="party-popper top-left">🎉</div>
|
||||
<div class="party-popper top-right">🎉</div>
|
||||
|
||||
<div class="main-title">Certificate of<br>Contribution</div>
|
||||
<div class="subtitle">Awarded To</div>
|
||||
|
||||
<div class="contributor-name">[CONTRIBUTOR_NAME]</div>
|
||||
|
||||
<div class="mascot-image"></div>
|
||||
|
||||
<div class="description">
|
||||
For successfully submitting and merging their <strong>First Pull Request</strong> to the <strong>Podman project</strong>.<br>
|
||||
Your contribution helps make open source better—one PR at a time!
|
||||
</div>
|
||||
|
||||
<div class="footer">
|
||||
<div class="pr-info">
|
||||
<div>🔧 Merged PR: [PR_LINK]</div>
|
||||
<div style="margin-top: 5px;">[DATE]</div>
|
||||
</div>
|
||||
<div class="signature">
|
||||
Keep hacking, keep contributing!<br>
|
||||
– The Podman Community
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
Binary file not shown.
After Width: | Height: | Size: 578 KiB |
Binary file not shown.
After Width: | Height: | Size: 138 KiB |
Binary file not shown.
After Width: | Height: | Size: 138 KiB |
|
@ -6,7 +6,7 @@ RUN microdnf update -y && \
|
|||
perl-Test perl-Test-Simple perl-Test-Differences \
|
||||
perl-YAML-LibYAML perl-FindBin \
|
||||
python3 python3-virtualenv python3-pip gcc python3-devel \
|
||||
python3-flake8 python3-pep8-naming python3-flake8-docstrings python3-flake8-import-order python3-flake8-polyfill python3-mccabe python3-pep8-naming && \
|
||||
python3-flake8 python3-pep8-naming python3-flake8-import-order python3-flake8-polyfill python3-mccabe python3-pep8-naming && \
|
||||
microdnf clean all && \
|
||||
rm -rf /var/cache/dnf
|
||||
# Required by perl
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Installs cirrus-ci_artifacts and a python virtual environment
|
||||
# to execute with. NOT intended to be used directly
|
||||
# by humans, should only be used indirectly by running
|
||||
# ../bin/install_automation.sh <ver> cirrus-ci_artifacts
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
source "$AUTOMATION_LIB_PATH/anchors.sh"
|
||||
source "$AUTOMATION_LIB_PATH/console_output.sh"
|
||||
|
||||
INSTALL_PREFIX=$(realpath $AUTOMATION_LIB_PATH/../)
|
||||
# Assume the directory this script is in, represents what is being installed
|
||||
INSTALL_NAME=$(basename $(dirname ${BASH_SOURCE[0]}))
|
||||
AUTOMATION_VERSION=$(automation_version)
|
||||
[[ -n "$AUTOMATION_VERSION" ]] || \
|
||||
die "Could not determine version of common automation libs, was 'install_automation.sh' successful?"
|
||||
|
||||
[[ -n "$(type -P virtualenv)" ]] || \
|
||||
die "$INSTALL_NAME requires python3-virtualenv"
|
||||
|
||||
echo "Installing $INSTALL_NAME version $(automation_version) into $INSTALL_PREFIX"
|
||||
|
||||
unset INST_PERM_ARG
|
||||
if [[ $UID -eq 0 ]]; then
|
||||
INST_PERM_ARG="-o root -g root"
|
||||
fi
|
||||
|
||||
cd $(dirname $(realpath "${BASH_SOURCE[0]}"))
|
||||
virtualenv --clear --download \
|
||||
$AUTOMATION_LIB_PATH/ccia.venv
|
||||
(
|
||||
source $AUTOMATION_LIB_PATH/ccia.venv/bin/activate
|
||||
pip3 install --requirement ./requirements.txt
|
||||
deactivate
|
||||
)
|
||||
install -v $INST_PERM_ARG -m '0644' -D -t "$INSTALL_PREFIX/lib/ccia.venv/bin" \
|
||||
./cirrus-ci_artifacts.py
|
||||
install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/bin" ./cirrus-ci_artifacts
|
||||
|
||||
# Needed for installer testing
|
||||
echo "Successfully installed $INSTALL_NAME"
|
|
@ -1,17 +1,15 @@
|
|||
# Description
|
||||
|
||||
This is a small script which examines a Cirrus-CI build and downloads
|
||||
available artifacts in parallel, to the current working directory.
|
||||
Optionally, a regex may be provided to download only specific artifacts
|
||||
(by name/path).
|
||||
available artifacts in parallel, into a subdirectory tree corresponding
|
||||
with the Cirrus-CI build ID, followed by the task-name, artifact-name
|
||||
and file-path. Optionally, a regex may be provided to download only
|
||||
specific artifacts matching the subdirectory path.
|
||||
|
||||
The script may be executed from a currently running Cirrus-CI build
|
||||
(utilizing `$CIRRUS_BUILD_ID`), but only previously uploaded artifacts
|
||||
will be downloaded.
|
||||
|
||||
It is assumed that GCP is used as the back-end for the Cirrus-CI build,
|
||||
and the name of the (repository-specific) google-storage bucket is
|
||||
known.
|
||||
will be downloaded, and the task must have a `depends_on` statement
|
||||
to synchronize with tasks providing expected artifacts.
|
||||
|
||||
# Installation
|
||||
|
||||
|
@ -24,9 +22,12 @@ $ pip3 install --user --requirement ./requirements.txt
|
|||
|
||||
# Usage
|
||||
|
||||
Create and change to the directory where artifacts should be downloaded.
|
||||
Call the script, passing in the following arguments:
|
||||
Create and change to the directory where artifact tree should be
|
||||
created. Call the script, passing in the following arguments:
|
||||
|
||||
1. The Repository owner/name *e.g. `"containers/podman"`*
|
||||
2. The GCS bucket name *e.g. `"cirrus-ci-6707778565701632-fcae48"`*
|
||||
3. Optionally, a filter regex *e.g. `"runner_stats/.*fedora"`*
|
||||
1. Optional, `--verbose` prints out artifacts as they are
|
||||
downloaded or skipped.
|
||||
2. The Cirrus-CI build id (required) to retrieve (doesn't need to be
|
||||
finished running).
|
||||
3. Optional, a filter regex e.g. `'runner_stats/.*fedora.*'` to
|
||||
only download artifacts matching `<task>/<artifact>/<file-path>`
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script wrapps cirrus-ci_artifacts.sh inside a python
|
||||
# virtual environment setup at install time. It should not
|
||||
# be executed prior to installation.
|
||||
|
||||
set -e
|
||||
|
||||
# This is a convenience for callers that don't separately source this first
|
||||
# in their automation setup.
|
||||
if [[ -z "$AUTOMATION_LIB_PATH" ]] && [[ -r /etc/automation_environment ]]; then
|
||||
source /etc/automation_environment
|
||||
fi
|
||||
|
||||
if [[ -z "$AUTOMATION_LIB_PATH" ]]; then
|
||||
(
|
||||
echo "ERROR: Expecting \$AUTOMATION_LIB_PATH to be defined with the"
|
||||
echo " installation directory of automation tooling."
|
||||
) >> /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
source $AUTOMATION_LIB_PATH/ccia.venv/bin/activate
|
||||
exec python3 $AUTOMATION_LIB_PATH/ccia.venv/bin/cirrus-ci_artifacts.py "$@"
|
|
@ -1,52 +1,57 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Download all artifacts from a Cirrus-CI Build into $PWD
|
||||
Download all artifacts from a Cirrus-CI Build into a subdirectory tree.
|
||||
|
||||
Subdirectory naming format: <build ID>/<task-name>/<artifact-name>/<file-path>
|
||||
|
||||
Input arguments (in order):
|
||||
Repo owner/name - string, from github.
|
||||
e.g. "containers/podman"
|
||||
Bucket - Name of the GCS bucket storing Cirrus-CI logs/artifacts.
|
||||
e.g. "cirrus-ci-6707778565701632-fcae48" for podman
|
||||
Build ID - string, the build containing tasks w/ artifacts to download
|
||||
e.g. "5790771712360448"
|
||||
Path RX - Optional, regular expression to include, matched against path
|
||||
format as: task_name/artifact_name/file_name
|
||||
Path RX - Optional, regular expression to match against subdirectory
|
||||
tree naming format.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from urllib.parse import quote, unquote
|
||||
from os import makedirs
|
||||
from os.path import basename, dirname, join
|
||||
import asyncio
|
||||
import re
|
||||
import aiohttp
|
||||
import sys
|
||||
from argparse import ArgumentParser
|
||||
from os import makedirs
|
||||
from os.path import split
|
||||
from urllib.parse import quote, unquote
|
||||
|
||||
import requests
|
||||
# Ref: https://docs.aiohttp.org/en/stable/http_request_lifecycle.html
|
||||
from aiohttp import ClientSession
|
||||
# Ref: https://gql.readthedocs.io/en/latest/index.html
|
||||
# pip3 install --user --requirement ./requirements.txt
|
||||
# (and/or in a python virtual environment)
|
||||
|
||||
from gql import Client as GQLClient
|
||||
from gql import gql
|
||||
from gql.transport.requests import RequestsHTTPTransport
|
||||
from gql import Client as GQLClient
|
||||
|
||||
# Ref: https://docs.aiohttp.org/en/stable/http_request_lifecycle.html
|
||||
import asyncio
|
||||
|
||||
# Base URL for accessing google cloud storage buckets
|
||||
GCS_URL_BASE = "https://storage.googleapis.com"
|
||||
|
||||
# GraphQL API URL for Cirrus-CI
|
||||
CCI_GQL_URL = "https://api.cirrus-ci.com/graphql"
|
||||
|
||||
# Artifact download base-URL for Cirrus-CI.
|
||||
# Download URL will be formed by appending:
|
||||
# "/<CIRRUS_BUILD_ID>/<TASK NAME OR ALIAS>/<ARTIFACTS_NAME>/<PATH>"
|
||||
CCI_ART_URL = "https://api.cirrus-ci.com/v1/artifact/build"
|
||||
|
||||
def get_raw_taskinfo(gqlclient, build_id):
|
||||
"""Given a build ID, return a list of task objects"""
|
||||
# Set True when --verbose is first argument
|
||||
VERBOSE = False
|
||||
|
||||
def get_tasks(gqlclient, buildId): # noqa N803
|
||||
"""Given a build ID, return a list of task objects."""
|
||||
# Ref: https://cirrus-ci.org/api/
|
||||
query = gql('''
|
||||
query tasksByBuildID($build_id: ID!) {
|
||||
build(id: $build_id) {
|
||||
query tasksByBuildId($buildId: ID!) {
|
||||
build(id: $buildId) {
|
||||
tasks {
|
||||
name,
|
||||
id,
|
||||
buildId,
|
||||
artifacts {
|
||||
name,
|
||||
files {
|
||||
|
@ -57,110 +62,100 @@ def get_raw_taskinfo(gqlclient, build_id):
|
|||
}
|
||||
}
|
||||
''')
|
||||
query_vars = dict(build_id=build_id)
|
||||
result = gqlclient.execute(query, variable_values=query_vars)
|
||||
if "build" in result and result["build"]:
|
||||
result = result["build"]
|
||||
if "tasks" in result and len(result["tasks"]):
|
||||
return result["tasks"]
|
||||
else:
|
||||
raise RuntimeError(f"No tasks found for build with id {build_id}")
|
||||
else:
|
||||
raise RuntimeError(f"No Cirrus-CI build found with id {build_id}")
|
||||
query_vars = {"buildId": buildId}
|
||||
tasks = gqlclient.execute(query, variable_values=query_vars)
|
||||
if "build" in tasks and tasks["build"]:
|
||||
b = tasks["build"]
|
||||
if "tasks" in b and len(b["tasks"]):
|
||||
return b["tasks"]
|
||||
raise RuntimeError(f"No tasks found for build with ID {buildId}")
|
||||
raise RuntimeError(f"No Cirrus-CI build found with ID {buildId}")
|
||||
|
||||
|
||||
def art_to_url(tid, artifacts, repo, bucket):
|
||||
"""Given an list of artifacts from a task object, return tuple of names and urls"""
|
||||
if "/" not in repo:
|
||||
raise RuntimeError(f"Expecting slash sep. repo. owner and name: '{repo}'")
|
||||
def task_art_url_sfxs(task):
|
||||
"""Given a task dict return list CCI_ART_URL suffixes for all artifacts."""
|
||||
result = []
|
||||
# N/B: Structure comes from query in get_raw_taskinfo()
|
||||
for art in artifacts:
|
||||
try:
|
||||
key="name" # Also used by exception
|
||||
art_name = quote(art[key]) # Safe use as URL component
|
||||
key="files"
|
||||
art_files = art[key]
|
||||
except KeyError:
|
||||
# Invalid artifact for some reason, skip it with warning.
|
||||
sys.stderr.write(f"Warning: Encountered malformed artifact for TID {tid}, missing expected key '{key}'")
|
||||
continue
|
||||
for art_file in art_files:
|
||||
art_path = quote(art_file["path"]) # NOT AN ACTUAL DIRECTORY STRUCTURE
|
||||
url = f"{GCS_URL_BASE}/{bucket}/artifacts/{repo}/{tid}/{art_name}/{art_path}"
|
||||
# Prevent clashes if/when same file/path (part of URL) is contained
|
||||
# in several named artifacts.
|
||||
result.append((art_name, url))
|
||||
bid = task["buildId"]
|
||||
tname = quote(task["name"]) # Make safe for URLs
|
||||
for art in task["artifacts"]:
|
||||
aname = quote(art["name"])
|
||||
for _file in art["files"]:
|
||||
fpath = quote(_file["path"])
|
||||
result.append(f"{bid}/{tname}/{aname}/{fpath}")
|
||||
return result
|
||||
|
||||
def get_task_art_map(gqlclient, repo, bucket, build_id):
|
||||
"""Rreturn map of task name/artifact name to list of artifact URLs"""
|
||||
tid_map = {}
|
||||
for task in get_raw_taskinfo(gqlclient, build_id):
|
||||
tid = task["id"]
|
||||
artifacts = task["artifacts"]
|
||||
art_names_urls = art_to_url(tid, artifacts, repo, bucket)
|
||||
if len(art_names_urls):
|
||||
tid_map[task["name"]] = art_names_urls
|
||||
return tid_map
|
||||
|
||||
async def download_artifact(session, art_url):
|
||||
"""Asynchronous download contents of art_url as a byte-stream"""
|
||||
async with session.get(art_url) as response:
|
||||
# ref: https://docs.aiohttp.org/en/stable/client_reference.html#aiohttp.ClientResponse.read
|
||||
return await response.read()
|
||||
async def download_artifact(session, dest_path, dl_url):
|
||||
"""Asynchronous download contents of art_url as a byte-stream."""
|
||||
# Last path component assumed to be the filename
|
||||
makedirs(split(dest_path)[0], exist_ok=True) # os.path.split
|
||||
async with session.get(dl_url) as response:
|
||||
with open(dest_path, "wb") as dest_file:
|
||||
dest_file.write(await response.read())
|
||||
|
||||
async def download_artifacts(task_name, art_names_urls, path_rx=None):
|
||||
"""Download artifact if path_rx unset or matches dest. path into CWD subdirs"""
|
||||
async with aiohttp.ClientSession() as session:
|
||||
for art_name, art_url in art_names_urls:
|
||||
# Cirrus-CI Always/Only archives artifacts path one-level deep
|
||||
# (i.e. no subdirectories). The artifact name and filename were
|
||||
# are part of the URL, so must decode them. See art_to_url() above
|
||||
dest_path = join(task_name, unquote(art_name), basename(unquote(art_url)))
|
||||
|
||||
async def download_artifacts(task, path_rx=None):
|
||||
"""Given a task dict, download all artifacts or matches to path_rx."""
|
||||
downloaded = []
|
||||
skipped = []
|
||||
async with ClientSession() as session:
|
||||
for art_url_sfx in task_art_url_sfxs(task):
|
||||
dest_path = unquote(art_url_sfx) # Strip off URL encoding
|
||||
dl_url = f"{CCI_ART_URL}/{dest_path}"
|
||||
if path_rx is None or bool(path_rx.search(dest_path)):
|
||||
print(f"Downloading '{dest_path}'")
|
||||
sys.stderr.flush()
|
||||
makedirs(dirname(dest_path), exist_ok=True)
|
||||
with open(dest_path, "wb") as dest_file:
|
||||
data = await download_artifact(session, art_url)
|
||||
dest_file.write(data)
|
||||
if VERBOSE:
|
||||
print(f" Downloading '{dest_path}'")
|
||||
sys.stdout.flush()
|
||||
await download_artifact(session, dest_path, dl_url)
|
||||
downloaded.append(dest_path)
|
||||
else:
|
||||
if VERBOSE:
|
||||
print(f" Skipping '{dest_path}'")
|
||||
skipped.append(dest_path)
|
||||
return {"downloaded": downloaded, "skipped": skipped}
|
||||
|
||||
|
||||
def get_arg(index, name):
|
||||
"""Return the value of command-line argument, raise error ref: name if empty"""
|
||||
err_msg=f"Error: Missing/empty {name} argument\n\nUsage: {sys.argv[0]} <repo. owner/name> <bucket> <build ID> [path rx]"
|
||||
try:
|
||||
result=sys.argv[index]
|
||||
if bool(result):
|
||||
return result
|
||||
else:
|
||||
raise ValueError(err_msg)
|
||||
except IndexError:
|
||||
sys.stderr.write(f'{err_msg}\n')
|
||||
sys.exit(1)
|
||||
def get_args(argv):
|
||||
"""Return parsed argument namespace object."""
|
||||
parser = ArgumentParser(prog="cirrus-ci_artifacts",
|
||||
description=('Download Cirrus-CI artifacts by Build ID'
|
||||
' number, into a subdirectory of the form'
|
||||
' <Build ID>/<Task Name>/<Artifact Name>'
|
||||
'/<File Path>'))
|
||||
parser.add_argument('-v', '--verbose',
|
||||
dest='verbose', action='store_true', default=False,
|
||||
help='Show "Downloaded" | "Skipped" + relative artifact file-path.')
|
||||
parser.add_argument('buildId', nargs=1, metavar='<Build ID>', type=int,
|
||||
help="A Cirrus-CI Build ID number.")
|
||||
parser.add_argument('path_rx', nargs='?', default=None, metavar='[Reg. Exp.]',
|
||||
help="Reg. exp. include only <task>/<artifact>/<file-path> matches.")
|
||||
return parser.parse_args(args=argv[1:])
|
||||
|
||||
|
||||
async def download(tasks, path_rx=None):
|
||||
"""Return results from all async operations."""
|
||||
# Python docs say to retain a reference to all tasks so they aren't
|
||||
# "garbage-collected" while still active.
|
||||
results = []
|
||||
for task in tasks:
|
||||
if len(task["artifacts"]):
|
||||
results.append(asyncio.create_task(download_artifacts(task, path_rx)))
|
||||
await asyncio.gather(*results)
|
||||
return results
|
||||
|
||||
|
||||
def main(buildId, path_rx=None): # noqa: N803,D103
|
||||
if path_rx is not None:
|
||||
path_rx = re.compile(path_rx)
|
||||
transport = RequestsHTTPTransport(url=CCI_GQL_URL, verify=True, retries=3)
|
||||
with GQLClient(transport=transport, fetch_schema_from_transport=True) as gqlclient:
|
||||
tasks = get_tasks(gqlclient, buildId)
|
||||
transport.close()
|
||||
async_results = asyncio.run(download(tasks, path_rx))
|
||||
return [r.result() for r in async_results]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
repo = get_arg(1, "repo. owner/name")
|
||||
bucket = get_arg(2, "bucket")
|
||||
build_id = get_arg(3, "build ID")
|
||||
path_rx = None
|
||||
if len(sys.argv) >= 5:
|
||||
path_rx = re.compile(get_arg(4, "path rx"))
|
||||
|
||||
# Ref: https://cirrus-ci.org/api/
|
||||
cirrus_graphql_xport = RequestsHTTPTransport(
|
||||
url=CCI_GQL_URL,
|
||||
verify=True,
|
||||
retries=3)
|
||||
gqlclient = GQLClient(transport=cirrus_graphql_xport,
|
||||
fetch_schema_from_transport=True)
|
||||
|
||||
task_art_map = get_task_art_map(gqlclient, repo, bucket, build_id)
|
||||
loop = asyncio.get_event_loop()
|
||||
download_tasks = []
|
||||
for task_name, art_names_urls in task_art_map.items():
|
||||
download_tasks.append(loop.create_task(
|
||||
download_artifacts(task_name, art_names_urls, path_rx)))
|
||||
loop.run_until_complete(asyncio.gather(*download_tasks))
|
||||
args = get_args(sys.argv)
|
||||
VERBOSE = args.verbose
|
||||
main(args.buildId[0], args.path_rx)
|
||||
|
|
|
@ -1,16 +1,19 @@
|
|||
aiohttp>=3.8.1
|
||||
aiosignal>=1.2.0
|
||||
async-timeout>=4.0.2
|
||||
attrs>=21.4.0
|
||||
certifi>=2021.10.8
|
||||
charset-normalizer>=2.0.11
|
||||
frozenlist>=1.3.0
|
||||
gql>=3.0.0
|
||||
graphql-core>=3.2.0
|
||||
idna>=3.3
|
||||
multidict>=6.0.2
|
||||
PyYAML>=6.0
|
||||
requests>=2.27.1
|
||||
requests-toolbelt>=0.9.1
|
||||
urllib3>=1.26.8
|
||||
yarl>=1.7.2
|
||||
# Producing this list was done using the following process:
|
||||
# 1. Create a temporary `req.txt` file containing only the basic
|
||||
# non-distribution provided packages, e.g. `aiohttp[speedups]`,
|
||||
# `PyYAML`, `gql[requests]`, `requests` (see cirrus-ci_artifacts.py,
|
||||
# actual requirements may have changed)
|
||||
# 2. From a Fedora:latest container, install python3 & python3-virtualenv
|
||||
# 3. Setup & activate a temporary virtual environment
|
||||
# 4. Execute `pip3 install --requirements req.txt`
|
||||
# 5. Run pip3 freeze
|
||||
# 6. Edit `requirements.txt`, add the `~=` specifier to each line along
|
||||
# with the correct two-component version number (from freeze output)
|
||||
# 7. In a fresh container, confirm the automation installer
|
||||
# functions with the cirrus-ci_artifacts component (see main README
|
||||
# for installer instructions)
|
||||
PyYAML~=6.0
|
||||
aiohttp[speedups]~=3.8
|
||||
gql[requests]~=3.3
|
||||
requests>=2,<3
|
||||
urllib3<2.5.1
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
../cirrus-ci_artifacts.py
|
|
@ -1,21 +1,29 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [[ "$CIRRUS_CI" != "true" ]]; then
|
||||
echo -e "\nSkipping: Test must be executed under Cirrus-CI\n"
|
||||
exit 0
|
||||
fi
|
||||
set -e
|
||||
|
||||
TESTDIR=$(dirname ${BASH_SOURCE[0]})
|
||||
|
||||
cd "$TESTDIR/../"
|
||||
virtualenv testvenv
|
||||
if [[ "$GITHUB_ACTIONS" == "true" ]]; then
|
||||
echo "Lint/Style checking not supported under github actions: Skipping"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
set -a
|
||||
source testvenv/bin/activate
|
||||
set +a
|
||||
if [[ -x $(type -P flake8-3) ]]; then
|
||||
cd "$TESTDIR"
|
||||
set -a
|
||||
virtualenv testvenv
|
||||
source testvenv/bin/activate
|
||||
testvenv/bin/python -m pip install --upgrade pip
|
||||
pip3 install --requirement ../requirements.txt
|
||||
set +a
|
||||
|
||||
testvenv/bin/python -m pip install --upgrade pip
|
||||
pip3 install --requirement ./requirements.txt
|
||||
./test_cirrus-ci_artifacts.py -v
|
||||
|
||||
cd "$TESTDIR"
|
||||
./test_cirrus-ci_artifacts.py
|
||||
cd ..
|
||||
flake8-3 --max-line-length=100 ./cirrus-ci_artifacts.py
|
||||
flake8-3 --max-line-length=100 --extend-ignore=D101,D102,D103,D105 test/test_cirrus-ci_artifacts.py
|
||||
else
|
||||
echo "Can't find flake-8-3 binary, is script executing inside CI container?"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -1,45 +1,52 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Verify contents of .cirrus.yml meet specific expectations
|
||||
"""
|
||||
"""Verify contents of .cirrus.yml meet specific expectations."""
|
||||
|
||||
import sys
|
||||
import asyncio
|
||||
import os
|
||||
from io import StringIO
|
||||
import re
|
||||
import unittest
|
||||
import importlib.util
|
||||
from contextlib import redirect_stderr, redirect_stdout
|
||||
from unittest.mock import Mock, patch
|
||||
from urllib.parse import quote
|
||||
from io import StringIO
|
||||
from tempfile import TemporaryDirectory
|
||||
from unittest.mock import MagicMock, mock_open, patch
|
||||
|
||||
import ccia
|
||||
|
||||
import yaml
|
||||
|
||||
# Assumes directory structure of this file relative to repo.
|
||||
TEST_DIRPATH = os.path.dirname(os.path.realpath(__file__))
|
||||
SCRIPT_FILENAME = os.path.basename(__file__).replace('test_','')
|
||||
SCRIPT_DIRPATH = os.path.realpath(os.path.join(TEST_DIRPATH, '..', SCRIPT_FILENAME))
|
||||
|
||||
# Script otherwise not intended to be loaded as a module
|
||||
spec = importlib.util.spec_from_file_location("cci_arts", SCRIPT_DIRPATH)
|
||||
cci_arts = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(cci_arts)
|
||||
def fake_makedirs(*args, **dargs):
|
||||
return None
|
||||
|
||||
|
||||
# Needed for testing asyncio functions and calls
|
||||
# ref: https://agariinc.medium.com/strategies-for-testing-async-code-in-python-c52163f2deab
|
||||
class AsyncMock(MagicMock):
|
||||
|
||||
async def __call__(self, *args, **dargs):
|
||||
return super().__call__(*args, **dargs)
|
||||
|
||||
|
||||
class AsyncContextManager(MagicMock):
|
||||
|
||||
async def __aenter__(self, *args, **dargs):
|
||||
return self.__enter__(*args, **dargs)
|
||||
|
||||
async def __aexit__(self, *args, **dargs):
|
||||
return self.__exit__(*args, **dargs)
|
||||
|
||||
|
||||
class TestBase(unittest.TestCase):
|
||||
|
||||
FAKE_GCS = "ftp://foo.bar"
|
||||
FAKE_CCI = "sql://sna.fu"
|
||||
|
||||
ORIGINAL_GCS = cci_arts.GCS_URL_BASE
|
||||
ORIGINAL_CCI = cci_arts.CCI_GQL_URL
|
||||
FAKE_CCI = "sql://fake.url.invalid/graphql"
|
||||
FAKE_API = "smb://fake.url.invalid/artifact"
|
||||
|
||||
def setUp(self):
|
||||
cci_arts.GCS_URL_BASE = self.FAKE_GCS
|
||||
cci_arts.CCI_GQL_URL = self.FAKE_CCI
|
||||
|
||||
def tearDown(self):
|
||||
cci_arts.GCS_URL_BASE = self.ORIGINAL_GCS
|
||||
cci_arts.CCI_GQL_URL = self.ORIGINAL_CCI
|
||||
ccia.VERBOSE = True
|
||||
patch('ccia.CCI_GQL_URL', new=self.FAKE_CCI).start()
|
||||
patch('ccia.CCI_ART_URL', new=self.FAKE_API).start()
|
||||
self.addCleanup(patch.stopall)
|
||||
|
||||
|
||||
class TestUtils(TestBase):
|
||||
|
@ -47,99 +54,140 @@ class TestUtils(TestBase):
|
|||
# YAML is easier on human eyeballs
|
||||
# Ref: https://github.com/cirruslabs/cirrus-ci-web/blob/master/schema.graphql
|
||||
# type Artifacts and ArtifactFileInfo
|
||||
TEST_ARTIFACTS_YAML = """
|
||||
- name: test_art-0
|
||||
type: test_type-0
|
||||
format: art_format-0
|
||||
files:
|
||||
- path: path/test/art/0
|
||||
size: 0
|
||||
- name: test_art-1
|
||||
type: test_type-1
|
||||
format: art_format-1
|
||||
files:
|
||||
- path: path/test/art/1
|
||||
size: 1
|
||||
- name: test_art-2
|
||||
type: test_type-2
|
||||
format: art_format-2
|
||||
files:
|
||||
- path: path/test/art/2
|
||||
size: 2
|
||||
TEST_TASK_YAML = """
|
||||
- &test_task
|
||||
name: task_1
|
||||
id: 1
|
||||
buildId: 0987654321
|
||||
artifacts:
|
||||
- name: test_art-0
|
||||
type: test_type-0
|
||||
format: art_format-0
|
||||
files:
|
||||
- path: path/test/art/0
|
||||
size: 0
|
||||
- name: test_art-1
|
||||
type: test_type-1
|
||||
format: art_format-1
|
||||
files:
|
||||
- path: path/test/art/1
|
||||
size: 1
|
||||
- path: path/test/art/2
|
||||
size: 2
|
||||
- name: test_art-2
|
||||
type: test_type-2
|
||||
format: art_format-2
|
||||
files:
|
||||
- path: path/test/art/3
|
||||
size: 3
|
||||
- path: path/test/art/4
|
||||
size: 4
|
||||
- path: path/test/art/5
|
||||
size: 5
|
||||
- path: path/test/art/6
|
||||
size: 6
|
||||
- <<: *test_task
|
||||
name: task_2
|
||||
id: 2
|
||||
"""
|
||||
TEST_ARTIFACTS = yaml.safe_load(TEST_ARTIFACTS_YAML)
|
||||
TEST_TASKS = yaml.safe_load(TEST_TASK_YAML)
|
||||
TEST_URL_RX = re.compile(r"987654321/task_.+/test_art-.+/path/test/art/.+")
|
||||
|
||||
def test_task_art_url_sfxs(self):
|
||||
for test_task in self.TEST_TASKS:
|
||||
actual = ccia.task_art_url_sfxs(test_task)
|
||||
with self.subTest(test_task=test_task):
|
||||
for url in actual:
|
||||
with self.subTest(url=url):
|
||||
self.assertRegex(url, self.TEST_URL_RX)
|
||||
|
||||
# N/B: The ClientSession mock causes a (probably) harmless warning:
|
||||
# ResourceWarning: unclosed transport <_SelectorSocketTransport fd=7>
|
||||
# I have no idea how to fix or hide this, leaving it as-is.
|
||||
def test_download_artifacts_all(self):
|
||||
for test_task in self.TEST_TASKS:
|
||||
with self.subTest(test_task=test_task), \
|
||||
patch('ccia.download_artifact', new_callable=AsyncMock), \
|
||||
patch('ccia.ClientSession', new_callable=AsyncContextManager), \
|
||||
patch('ccia.makedirs', new=fake_makedirs), \
|
||||
patch('ccia.open', new=mock_open()):
|
||||
|
||||
# N/B: This makes debugging VERY difficult, comment out for pdb use
|
||||
fake_stdout = StringIO()
|
||||
fake_stderr = StringIO()
|
||||
with redirect_stderr(fake_stderr), redirect_stdout(fake_stdout):
|
||||
asyncio.run(ccia.download_artifacts(test_task))
|
||||
self.assertEqual(fake_stderr.getvalue(), '')
|
||||
for line in fake_stdout.getvalue().splitlines():
|
||||
with self.subTest(line=line):
|
||||
self.assertRegex(line.strip(), self.TEST_URL_RX)
|
||||
|
||||
|
||||
class TestMain(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
ccia.VERBOSE = True
|
||||
try:
|
||||
self.bid = os.environ["CIRRUS_BUILD_ID"]
|
||||
except KeyError:
|
||||
self.skipTest("Requires running under Cirrus-CI")
|
||||
self.tmp = TemporaryDirectory(prefix="test_ccia_tmp")
|
||||
self.cwd = os.getcwd()
|
||||
os.chdir(self.tmp.name)
|
||||
|
||||
def test_get_arg(self):
|
||||
argv=('test0', 'test1', 'test2', 'test3', 'test4', 'test5')
|
||||
with patch('sys.argv', new=argv):
|
||||
for arg_n in range(0,6):
|
||||
with self.subTest(arg_n=arg_n):
|
||||
expected = f"test{arg_n}"
|
||||
self.assertEqual(
|
||||
cci_arts.get_arg(arg_n, "foobar"),
|
||||
expected)
|
||||
def tearDown(self):
|
||||
os.chdir(self.cwd)
|
||||
self.tmp.cleanup()
|
||||
|
||||
def test_empty_get_arg(self):
|
||||
argv=('test1', '')
|
||||
with patch('sys.argv', new=argv):
|
||||
self.assertRaisesRegex(ValueError, f"Usage: {argv[0]}",
|
||||
cci_arts.get_arg, 1, "empty")
|
||||
def main_result_has(self, results, stdout_filepath, action="downloaded"):
|
||||
for result in results:
|
||||
for action_filepath in result[action]:
|
||||
if action_filepath == stdout_filepath:
|
||||
exists = os.path.isfile(os.path.join(self.tmp.name, action_filepath))
|
||||
if "downloaded" in action:
|
||||
self.assertTrue(exists,
|
||||
msg=f"Downloaded not found: '{action_filepath}'")
|
||||
return
|
||||
# action==skipped
|
||||
self.assertFalse(exists,
|
||||
msg=f"Skipped file found: '{action_filepath}'")
|
||||
return
|
||||
self.fail(f"Expecting to find {action_filepath} entry in main()'s {action} results")
|
||||
|
||||
def test_empty_get_arg(self):
|
||||
argv=('test2', '')
|
||||
fake_exit = Mock()
|
||||
def test_cirrus_ci_download_all(self):
|
||||
expect_rx = re.compile(f".+'{self.bid}/[^/]+/[^/]+/.+'")
|
||||
# N/B: This makes debugging VERY difficult, comment out for pdb use
|
||||
fake_stdout = StringIO()
|
||||
fake_stderr = StringIO()
|
||||
with patch('sys.argv', new=argv), patch('sys.exit', new=fake_exit):
|
||||
# N/B: This makes debugging VERY difficult
|
||||
with redirect_stderr(fake_stderr), redirect_stdout(fake_stdout):
|
||||
cci_arts.get_arg(2, "unset")
|
||||
self.assertEqual(fake_stdout.getvalue(), '')
|
||||
self.assertRegex(fake_stderr.getvalue(), r'Error: Missing')
|
||||
fake_exit.assert_called_with(1)
|
||||
|
||||
def test_art_to_url(self, test_arts=TEST_ARTIFACTS):
|
||||
exp_tid=1234
|
||||
exp_repo="foo/bar"
|
||||
exp_bucket="snafu"
|
||||
args = (exp_tid, test_arts, exp_repo, exp_bucket)
|
||||
actual = cci_arts.art_to_url(*args)
|
||||
for art_n, act_name_url in enumerate(actual):
|
||||
exp_name = f"test_art-{art_n}"
|
||||
act_name = act_name_url[0]
|
||||
with self.subTest(exp_name=exp_name, act_name=act_name):
|
||||
self.assertEqual(exp_name, act_name)
|
||||
|
||||
# Name and path must be url-encoded
|
||||
exp_q_name = quote(exp_name)
|
||||
exp_q_path = quote(test_arts[art_n]["files"][0]["path"])
|
||||
# No shortcut here other than duplicating the well-established format
|
||||
exp_url = f"{self.FAKE_GCS}/{exp_bucket}/artifacts/{exp_repo}/{exp_tid}/{exp_q_name}/{exp_q_path}"
|
||||
act_url = act_name_url[1]
|
||||
with self.subTest(exp_url=exp_url, act_url=act_url):
|
||||
self.assertEqual(exp_url, act_url)
|
||||
|
||||
def test_bad_art_to_url(self):
|
||||
broken_artifacts = yaml.safe_load(TestUtils.TEST_ARTIFACTS_YAML)
|
||||
del broken_artifacts[0]["files"] # Ref #1 (below)
|
||||
broken_artifacts[1]["files"] = {}
|
||||
broken_artifacts[2] = {} # Ref #2 (below)
|
||||
fake_stdout = StringIO()
|
||||
fake_stderr = StringIO()
|
||||
# N/B: debugging VERY difficult
|
||||
with redirect_stderr(fake_stderr), redirect_stdout(fake_stdout):
|
||||
self.test_art_to_url(test_arts=broken_artifacts)
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
results = ccia.main(self.bid)
|
||||
self.assertEqual(fake_stderr.getvalue(), '')
|
||||
for line in fake_stdout.getvalue().splitlines():
|
||||
with self.subTest(line=line):
|
||||
s_line = line.lower().strip()
|
||||
filepath = line.split(sep="'", maxsplit=3)[1]
|
||||
self.assertRegex(s_line, expect_rx)
|
||||
if s_line.startswith("download"):
|
||||
self.main_result_has(results, filepath)
|
||||
elif s_line.startswith("skip"):
|
||||
self.main_result_has(results, filepath, "skipped")
|
||||
else:
|
||||
self.fail(f"Unexpected stdout line: '{s_line}'")
|
||||
|
||||
stderr = fake_stderr.getvalue()
|
||||
stdout = fake_stdout.getvalue()
|
||||
self.assertEqual(stdout, '')
|
||||
# Ref #1 (above)
|
||||
self.assertRegex(stderr, r"Warning:.+TID 1234.+key 'files'")
|
||||
# Ref #2 (above)
|
||||
self.assertRegex(stderr, r"Warning:.+TID 1234.+key 'name'")
|
||||
def test_cirrus_ci_download_none(self):
|
||||
# N/B: This makes debugging VERY difficult, comment out for pdb use
|
||||
fake_stdout = StringIO()
|
||||
fake_stderr = StringIO()
|
||||
with redirect_stderr(fake_stderr), redirect_stdout(fake_stdout):
|
||||
results = ccia.main(self.bid, r"this-will-match-nothing")
|
||||
for line in fake_stdout.getvalue().splitlines():
|
||||
with self.subTest(line=line):
|
||||
s_line = line.lower().strip()
|
||||
filepath = line.split(sep="'", maxsplit=3)[1]
|
||||
self.assertRegex(s_line, r"skipping")
|
||||
self.main_result_has(results, filepath, "skipped")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Installs cirrus-ci_env system-wide. NOT intended to be used directly
|
||||
# by humans, should only be used indirectly by running
|
||||
# ../bin/install_automation.sh <ver> cirrus-ci_env
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
source "$AUTOMATION_LIB_PATH/anchors.sh"
|
||||
source "$AUTOMATION_LIB_PATH/console_output.sh"
|
||||
|
||||
|
|
|
@ -102,18 +102,25 @@ class CirrusCfg:
|
|||
for k, v in env.items():
|
||||
if "ENCRYPTED" in str(v):
|
||||
continue
|
||||
elif k == "PATH":
|
||||
# Handled specially by Cirrus, preserve value as-is.
|
||||
def_fmt[k] = str(v)
|
||||
continue
|
||||
_ = def_fmt.dollarcurly_env_var.sub(rep, str(v))
|
||||
def_fmt[k] = def_fmt.dollar_env_var.sub(rep, _)
|
||||
out = dict()
|
||||
for k, v in def_fmt.items():
|
||||
if k in env: # Don't unnecessarily duplicate globals
|
||||
if k == "PATH":
|
||||
out[k] = str(v)
|
||||
continue
|
||||
try:
|
||||
out[k] = str(v).format_map(def_fmt)
|
||||
except ValueError as xcpt:
|
||||
if k == 'matrix':
|
||||
err(f"Unsupported '{k}' key encountered in"
|
||||
f" 'env' attribute of '{CirrusCfg._working}' task")
|
||||
raise(xcpt)
|
||||
raise xcpt
|
||||
return out
|
||||
|
||||
def render_tasks(self, tasks: Mapping[str, Any]) -> Mapping[str, Any]:
|
||||
|
@ -190,18 +197,22 @@ class CirrusCfg:
|
|||
# Order is significant, VMs always override containers
|
||||
if "gce_instance" in item:
|
||||
return "gcevm", item["gce_instance"].get("image_name", default_image)
|
||||
if "ec2_instance" in item:
|
||||
return "ec2vm", item["ec2_instance"].get("image", default_image)
|
||||
elif "osx_instance" in item or "macos_instance" in item:
|
||||
_ = item.get("osx_instance", item.get("macos_instance"))
|
||||
return "osx", _.get("image", default_image)
|
||||
elif "image" in item.get("windows_container", ""):
|
||||
return "wincntnr", item["windows_container"].get("image", default_image)
|
||||
elif "image" in item.get("container", ""):
|
||||
return "container", item["container"].get("image", default_image)
|
||||
elif "dockerfile" in item.get("container", ""):
|
||||
return "dockerfile", item["container"].get("dockerfile", default_image)
|
||||
else:
|
||||
inst_type = None
|
||||
inst_type = "unsupported"
|
||||
if self.global_type is not None:
|
||||
inst_type = default_type
|
||||
inst_image = None
|
||||
inst_image = "unknown"
|
||||
if self.global_image is not None:
|
||||
inst_image = default_image
|
||||
return inst_type, inst_image
|
||||
|
|
|
@ -31,6 +31,7 @@ env:
|
|||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6524344056676352"
|
||||
FEDORA_AMI_ID: "ami-04f37091c3ec43890"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
|
@ -61,6 +62,8 @@ timeout_in: 60m
|
|||
|
||||
gcp_credentials: ENCRYPTED[a28959877b2c9c36f151781b0a05407218cda646c7d047fc556e42f55e097e897ab63ee78369dae141dcf0b46a9d0cdd]
|
||||
|
||||
aws_credentials: ENCRYPTED[4ca070bffe28eb9b27d63c568b52970dd46f119c3a83b8e443241e895dbf1737580b4d84eed27a311a2b74287ef9f79f]
|
||||
|
||||
|
||||
# Default/small container image to execute tasks with
|
||||
container: &smallcontainer
|
||||
|
@ -555,6 +558,33 @@ rootless_integration_test_task:
|
|||
always: *int_logs_artifacts
|
||||
|
||||
|
||||
podman_machine_task:
|
||||
name: *std_name_fmt
|
||||
alias: podman_machine
|
||||
# FIXME: Added for speedy-testing
|
||||
only_if: $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'
|
||||
depends_on:
|
||||
- build
|
||||
- local_integration_test
|
||||
- remote_integration_test
|
||||
- container_integration_test
|
||||
- rootless_integration_test
|
||||
ec2_instance:
|
||||
image: "${VM_IMAGE_NAME}"
|
||||
type: m5zn.metal # Bare-metal instance is required
|
||||
region: us-east-1
|
||||
env:
|
||||
TEST_FLAVOR: "machine"
|
||||
PRIV_NAME: "rootless" # intended use-case
|
||||
DISTRO_NV: "${FEDORA_NAME}"
|
||||
VM_IMAGE_NAME: "${FEDORA_AMI_ID}"
|
||||
clone_script: *noop # Comes from cache
|
||||
gopath_cache: *ro_gopath_cache
|
||||
setup_script: *setup
|
||||
main_script: *main
|
||||
always: *int_logs_artifacts
|
||||
|
||||
|
||||
# Always run subsequent to integration tests. While parallelism is lost
|
||||
# with runtime, debugging system-test failures can be more challenging
|
||||
# for some golang developers. Otherwise the following tasks run across
|
||||
|
@ -690,6 +720,7 @@ success_task:
|
|||
- remote_integration_test
|
||||
- rootless_integration_test
|
||||
- container_integration_test
|
||||
- podman_machine
|
||||
- local_system_test
|
||||
- remote_system_test
|
||||
- rootless_system_test
|
||||
|
@ -701,6 +732,22 @@ success_task:
|
|||
clone_script: *noop
|
||||
script: /bin/true
|
||||
|
||||
win_installer_task:
|
||||
name: "Verify Win Installer Build"
|
||||
alias: win_installer
|
||||
# Don't run for multiarch container image cirrus-cron job.
|
||||
only_if: $CIRRUS_CRON != 'multiarch'
|
||||
depends_on:
|
||||
- alt_build
|
||||
windows_container:
|
||||
image: "cirrusci/windowsservercore:2019"
|
||||
env:
|
||||
PATH: "${PATH};C:\\ProgramData\\chocolatey\\bin"
|
||||
CIRRUS_SHELL: powershell
|
||||
# Fake version, we are only testing the installer functions, so version doesn't matter
|
||||
WIN_INST_VER: 9.9.9
|
||||
install_script: '.\contrib\cirrus\win-installer-install.ps1'
|
||||
main_script: '.\contrib\cirrus\win-installer-main.ps1'
|
||||
|
||||
# When a new tag is pushed, confirm that the code and commits
|
||||
# meet criteria for an official release.
|
||||
|
|
|
@ -25,6 +25,7 @@ Upgrade test: from v2.1.1
|
|||
VM img. keepalive
|
||||
Validate fedora-33 Build
|
||||
Verify Release
|
||||
Verify Win Installer Build
|
||||
Windows Cross
|
||||
compose test on fedora-33
|
||||
int podman fedora-33 root container
|
||||
|
@ -35,6 +36,7 @@ int podman ubuntu-2010 root host
|
|||
int remote fedora-33 root host
|
||||
int remote ubuntu-2004 root host
|
||||
int remote ubuntu-2010 root host
|
||||
machine podman fedora-33 rootless host
|
||||
sys podman fedora-33 root host
|
||||
sys podman fedora-33 rootless host
|
||||
sys podman ubuntu-2004 root host
|
||||
|
|
|
@ -259,6 +259,12 @@ tasks:
|
|||
TEST_FLAVOR: release
|
||||
VM_IMAGE_NAME: fedora-c6524344056676352
|
||||
_BUILD_CACHE_HANDLE: fedora-33-build-${CIRRUS_BUILD_ID}
|
||||
Verify Win Installer Build:
|
||||
alias: win_installer
|
||||
env:
|
||||
PATH: "${PATH};C:\\ProgramData\\chocolatey\\bin"
|
||||
CIRRUS_SHELL: powershell
|
||||
WIN_INST_VER: 9.9.9
|
||||
Windows Cross:
|
||||
alias: alt_build
|
||||
env:
|
||||
|
@ -345,6 +351,14 @@ tasks:
|
|||
TEST_FLAVOR: int
|
||||
VM_IMAGE_NAME: ubuntu-c6524344056676352
|
||||
_BUILD_CACHE_HANDLE: ubuntu-2010-build-${CIRRUS_BUILD_ID}
|
||||
machine podman fedora-33 rootless host:
|
||||
alias: podman_machine
|
||||
env:
|
||||
CTR_FQIN: quay.io/libpod/fedora_podman:c6524344056676352
|
||||
DISTRO_NV: fedora-33
|
||||
TEST_FLAVOR: machine
|
||||
PRIV_NAME: rootless
|
||||
VM_IMAGE_NAME: ami-04f37091c3ec43890
|
||||
sys podman fedora-33 root host:
|
||||
alias: local_system_test
|
||||
env:
|
||||
|
|
|
@ -80,6 +80,9 @@ Validate fedora-33 Build:
|
|||
Verify Release:
|
||||
- gcevm
|
||||
- fedora-c6524344056676352
|
||||
Verify Win Installer Build:
|
||||
- wincntnr
|
||||
- cirrusci/windowsservercore:2019
|
||||
Windows Cross:
|
||||
- gcevm
|
||||
- fedora-c6524344056676352
|
||||
|
@ -110,6 +113,9 @@ int remote ubuntu-2004 root host:
|
|||
int remote ubuntu-2010 root host:
|
||||
- gcevm
|
||||
- ubuntu-c6524344056676352
|
||||
machine podman fedora-33 rootless host:
|
||||
- ec2vm
|
||||
- ami-04f37091c3ec43890
|
||||
sys podman fedora-33 root host:
|
||||
- gcevm
|
||||
- fedora-c6524344056676352
|
||||
|
|
|
@ -290,6 +290,7 @@ class TestCirrusCfg(TestBase):
|
|||
self.assertEqual(len(actual_cfg.tasks), len(expected_ti))
|
||||
actual_ti = {k: [v["inst_type"], v["inst_image"]]
|
||||
for (k, v) in actual_cfg.tasks.items()}
|
||||
self.maxDiff = None # show the full dif
|
||||
self.assertDictEqual(actual_ti, expected_ti)
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Installs cirrus-ci_retrospective system-wide. NOT intended to be used directly
|
||||
# by humans, should only be used indirectly by running
|
||||
# ../bin/install_automation.sh <ver> cirrus-ci_retrospective
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
source "$AUTOMATION_LIB_PATH/anchors.sh"
|
||||
source "$AUTOMATION_LIB_PATH/console_output.sh"
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ curl_post() {
|
|||
die "Expecting non-empty data argument"
|
||||
|
||||
[[ -n "$token" ]] || \
|
||||
dbg "### Warning: \$GITHUB_TOKEN is empty, performing unauthenticated query" > /dev/stderr
|
||||
dbg "### Warning: \$GITHUB_TOKEN is empty, performing unauthenticated query" >> /dev/stderr
|
||||
# Don't expose secrets on any command-line
|
||||
local headers_tmpf
|
||||
local headers_tmpf=$(tmpfile headers)
|
||||
|
@ -81,7 +81,7 @@ EOF
|
|||
local curl_cmd="$CURL --silent --request POST --url $url --header @$headers_tmpf --data @$data_tmpf"
|
||||
dbg "### Executing '$curl_cmd'"
|
||||
local ret="0"
|
||||
$curl_cmd > /dev/stdout || ret=$?
|
||||
$curl_cmd >> /dev/stdout || ret=$?
|
||||
|
||||
# Don't leave secrets lying around in files
|
||||
rm -f "$headers_tmpf" "$data_tmpf" &> /dev/null
|
||||
|
@ -99,7 +99,7 @@ filter_json() {
|
|||
dbg "### Validating JSON in '$json_file'"
|
||||
# Confirm input json is valid and make filter problems easier to debug (below)
|
||||
local tmp_json_file=$(tmpfile json)
|
||||
if ! jq . < "$json_file" > "$tmp_json_file"; then
|
||||
if ! jq -e . < "$json_file" > "$tmp_json_file"; then
|
||||
rm -f "$tmp_json_file"
|
||||
# JQ has already shown an error message
|
||||
die "Error from jq relating to JSON: $(cat $json_file)"
|
||||
|
@ -147,11 +147,6 @@ url_query_filter_test() {
|
|||
[[ "$ret" -eq "0" ]] || \
|
||||
die "Curl command exited with non-zero code: $ret"
|
||||
|
||||
if grep -q "error" "$curl_outputf"; then
|
||||
# Barely passable attempt to catch GraphQL query errors
|
||||
die "Found the word 'error' in curl output: $(cat $curl_outputf)"
|
||||
fi
|
||||
|
||||
# Validates both JSON and filter, updates $curl_outputf
|
||||
filter_json "$filter" "$curl_outputf"
|
||||
if [[ -n "$test_args" ]]; then
|
||||
|
|
|
@ -12,7 +12,7 @@ test_cmd "Verify cirrus-ci_retrospective can be installed under $TEMPDIR" \
|
|||
env INSTALL_PREFIX=$TEMPDIR $INSTALL_SCRIPT 0.0.0 github cirrus-ci_retrospective
|
||||
|
||||
test_cmd "Verify executing cirrus-ci_retrospective.sh gives 'Expecting' error message" \
|
||||
2 '::error::.+Expecting' \
|
||||
2 '::error.+Expecting' \
|
||||
env AUTOMATION_LIB_PATH=$TEMPDIR/automation/lib $TEMPDIR/automation/bin/cirrus-ci_retrospective.sh
|
||||
|
||||
trap "rm -rf $TEMPDIR" EXIT
|
||||
|
|
|
@ -45,7 +45,7 @@ for required_var in ${req_env_vars[@]}; do
|
|||
export $required_var="$invalid_value"
|
||||
test_cmd \
|
||||
"Verify exeuction w/ \$$required_var='$invalid_value' (instead of '$valid_value') fails with helpful error message." \
|
||||
2 "::error::.+\\\$$required_var.+'$invalid_value'" \
|
||||
2 "::error.+\\\$$required_var.+'$invalid_value'" \
|
||||
$SUBJ_FILEPATH
|
||||
export $required_var="$valid_value"
|
||||
done
|
||||
|
@ -61,21 +61,21 @@ EOF
|
|||
export GITHUB_EVENT_PATH=$MOCK_EVENT_JSON_FILEPATH
|
||||
|
||||
test_cmd "Verify expected error when fed empty mock event JSON file" \
|
||||
1 "::error::.+check_suite.+key" \
|
||||
1 "::error.+check_suite.+key" \
|
||||
$SUBJ_FILEPATH
|
||||
|
||||
cat << EOF > "$MOCK_EVENT_JSON_FILEPATH"
|
||||
{"check_suite":{}}
|
||||
EOF
|
||||
test_cmd "Verify expected error when fed invalid check_suite value in mock event JSON file" \
|
||||
1 "::error::.+check_suite.+type.+null" \
|
||||
1 "::error.+check_suite.+type.+null" \
|
||||
$SUBJ_FILEPATH
|
||||
|
||||
cat << EOF > "$MOCK_EVENT_JSON_FILEPATH"
|
||||
{"check_suite": {}, "action": "foobar"}
|
||||
EOF
|
||||
test_cmd "Verify error and message containing incorrect value from mock event JSON file" \
|
||||
1 "::error::.+check_suite.+foobar" \
|
||||
1 "::error.+check_suite.+foobar" \
|
||||
$SUBJ_FILEPATH
|
||||
|
||||
cat << EOF > "$MOCK_EVENT_JSON_FILEPATH"
|
||||
|
@ -89,7 +89,7 @@ cat << EOF > "$MOCK_EVENT_JSON_FILEPATH"
|
|||
{"check_suite": {"app":{"id":null}}, "action": "completed"}
|
||||
EOF
|
||||
test_cmd "Verify expected error when 'app' id is wrong type in mock event JSON file" \
|
||||
1 "::error::.+integer.+null" \
|
||||
1 "::error.+integer.+null" \
|
||||
$SUBJ_FILEPATH
|
||||
|
||||
# Must always happen last
|
||||
|
|
|
@ -29,13 +29,34 @@ our $Default_Yml = '.cirrus.yml';
|
|||
# Try to leave one or two greens at the end: these will be used
|
||||
# for terminal nodes (e.g. "success")
|
||||
our @Colors = qw(
|
||||
blue orange red darkgoldenrod firebrick1 orangered4
|
||||
orange red darkgoldenrod firebrick1 orangered4
|
||||
darkturquoise deeppink deepskyblue3 coral dodgerblue
|
||||
bisque2 indigo darkorchid1 palevioletred2 slateblue4
|
||||
cornsilk4 deepskyblue4 navajowhite2
|
||||
slateblue1 yellow4 brown chartreuse seagreen3 darkgreen
|
||||
);
|
||||
|
||||
# Color overrides: use sys/int/etc colors from github-ci-highlight Greasemonkey
|
||||
#
|
||||
# https://github.com/edsantiago/greasemonkey/tree/master/github-ci-highlight
|
||||
#
|
||||
# No sane way to fetch colors automatically, so, just duplicate.
|
||||
our %Color_Override = (
|
||||
# FG BG
|
||||
apiv2 => 'fff:c0c',
|
||||
bud => '000:fc0',
|
||||
compose => '660:fff',
|
||||
integration => '000:960',
|
||||
system => '000:cf9',
|
||||
unit => '000:f99',
|
||||
upgrade => 'f0c:fff',
|
||||
'(?<!image.)build' => '00f:fff',
|
||||
'image.build' => 'f85:fff',
|
||||
validate => '0c0:fff',
|
||||
machine => '330:0ff',
|
||||
success => '000:0f0',
|
||||
);
|
||||
|
||||
# END user-customizable section
|
||||
###############################################################################
|
||||
|
||||
|
@ -176,11 +197,12 @@ sub write_img {
|
|||
|
||||
# Annotate: add signature line at lower left
|
||||
# FIXME: include git repo info?
|
||||
if (grep { -x "$_/convert" } split(":", $ENV{PATH})) {
|
||||
if (grep { -x "$_/magick" } split(":", $ENV{PATH})) {
|
||||
unlink $img_out_tmp;
|
||||
my $signature = strftime("Generated %Y-%m-%dT%H:%M:%S%z by $ME v$VERSION", localtime);
|
||||
my @cmd = (
|
||||
"convert",
|
||||
"magick",
|
||||
$img_out,
|
||||
'-family' => 'Courier',
|
||||
'-pointsize' => '12',
|
||||
# '-style' => 'Normal', # Argh! This gives us Bold!?
|
||||
|
@ -188,7 +210,7 @@ sub write_img {
|
|||
'-fill' => '#000',
|
||||
'-gravity' => 'SouthWest',
|
||||
"-annotate", "+5+5", $signature,
|
||||
"$img_out" => "$img_out_tmp"
|
||||
$img_out_tmp
|
||||
);
|
||||
if (system(@cmd) == 0) {
|
||||
rename $img_out_tmp => $img_out;
|
||||
|
@ -403,18 +425,44 @@ sub _size {
|
|||
}
|
||||
|
||||
##############
|
||||
# _by_size # sort helper, for putting big nodes at bottom
|
||||
# _by_type # sort helper, for clustering int/sys/machine tests
|
||||
##############
|
||||
sub _by_size {
|
||||
_size($a) <=> _size($b) ||
|
||||
$a->{name} cmp $b->{name};
|
||||
sub _by_type {
|
||||
my $ax = $a->{name};
|
||||
my $bx = $b->{name};
|
||||
|
||||
# The big test types, in the order we want to show them
|
||||
my @types = qw(integration system bud machine);
|
||||
my %type_order = map { $types[$_] => $_ } (0..$#types);
|
||||
my $type_re = join('|', @types);
|
||||
|
||||
if ($ax =~ /($type_re)/) {
|
||||
my $a_type = $1;
|
||||
if ($bx =~ /($type_re)/) {
|
||||
my $b_type = $1;
|
||||
|
||||
return $type_order{$a_type} <=> $type_order{$b_type}
|
||||
|| $ax cmp $bx;
|
||||
}
|
||||
else {
|
||||
# e.g., $b is "win installer", $a is in @types, $b < $a
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
elsif ($bx =~ /($type_re)/) {
|
||||
# e.g., $a is "win installer", $b is in @types, $a < $b
|
||||
return -1;
|
||||
}
|
||||
|
||||
# Neither a nor b is in @types
|
||||
$ax cmp $bx;
|
||||
}
|
||||
|
||||
sub depended_on_by {
|
||||
my $self = shift;
|
||||
|
||||
if (my $d = $self->{_depended_on_by}) {
|
||||
my @d = sort _by_size map { $self->{_tasklist}->find($_) } @$d;
|
||||
my @d = sort _by_type map { $self->{_tasklist}->find($_) } @$d;
|
||||
return @d;
|
||||
}
|
||||
return;
|
||||
|
@ -427,10 +475,20 @@ sub subtasks {
|
|||
my @subtasks;
|
||||
if (my $m = $self->{yml}{matrix}) {
|
||||
for my $item (@$m) {
|
||||
my $name = $self->_expand_matrix_name( $item );
|
||||
my $name = $self->_expand_name( $item );
|
||||
push @subtasks, "- " . $name . '\l';
|
||||
}
|
||||
}
|
||||
elsif (my $name = $self->{yml}{name}) {
|
||||
if ($name =~ /\$/) {
|
||||
# A name with dollars, like "$TEST_FLAVOR $PODBIN $DISTRO_NV etc",
|
||||
# is worth a box entry showing that expansion. This will be only
|
||||
# one line (as opposed to one or more for matrix stanzas) but
|
||||
# the expansion is still useful so reader can know what arch
|
||||
# and OS this is running on.
|
||||
push @subtasks, '= ' . $self->_expand_name( $name ) . '\l';
|
||||
}
|
||||
}
|
||||
|
||||
return @subtasks;
|
||||
}
|
||||
|
@ -463,9 +521,13 @@ sub env_matrix {
|
|||
}
|
||||
|
||||
|
||||
sub _expand_matrix_name {
|
||||
##################
|
||||
# _expand_name # Iteratively expand $FOO or ${FOO} or a matrix name
|
||||
##################
|
||||
sub _expand_name {
|
||||
my $self = shift;
|
||||
my $matrix_item = shift;
|
||||
my $item = shift;
|
||||
my $name;
|
||||
|
||||
# Environment: start with top-level env defined for entire yml file
|
||||
my %env;
|
||||
|
@ -478,18 +540,26 @@ sub _expand_matrix_name {
|
|||
%env = (%env, %$env);
|
||||
}
|
||||
|
||||
# ...then finally with env in the matrix
|
||||
if (my $m_env = $matrix_item->{env}) {
|
||||
%env = (%env, %$m_env);
|
||||
# ...then finally, if this is a matrix item, with its env
|
||||
if ((ref($item)||'') eq 'HASH') {
|
||||
if (my $m_env = $item->{env}) {
|
||||
%env = (%env, %$m_env);
|
||||
}
|
||||
$name = $item->{name};
|
||||
}
|
||||
|
||||
my $name = $matrix_item->{name} || $self->{yml}{name} || $self->name || '?';
|
||||
$name //= $self->{yml}{name} || $self->name || '?';
|
||||
|
||||
# FIXME: need to clean this up!
|
||||
$name =~ s/\$\{(.*?)\}/$env{$1} || "\$$1"/ge;
|
||||
$name =~ s/\$([A-Z_]+)/$env{$1} || "\$$1"/ge;
|
||||
$name =~ s/\$\{(.*?)\}/$env{$1} || "\$$1"/ge; # and again with curlies
|
||||
$name =~ s/\$([A-Z_]+)/$env{$1} || "\$$1"/ge; # and again without
|
||||
while ($name =~ /\$/) {
|
||||
my $name_old = $name;
|
||||
|
||||
$name =~ s/\$\{(.*?)\}/$env{$1} || "\$$1"/ge;
|
||||
$name =~ s/\$([A-Z_]+)/$env{$1} || "\$$1"/ge;
|
||||
|
||||
# Don't infinite-loop
|
||||
last if $name_old eq $name;
|
||||
print "$name_old -> $name\n" if $debug;
|
||||
}
|
||||
|
||||
return $name;
|
||||
}
|
||||
|
@ -668,13 +738,27 @@ sub _draw_boxes {
|
|||
my $node = $task->{name};
|
||||
return if $self->{_gv}{done}{$node}++;
|
||||
|
||||
# Terminal nodes: pop from the end of the color list (expect greens)
|
||||
my $color;
|
||||
if (! $task->depended_on_by) {
|
||||
$color = pop @{$self->{_gv}{colors}};
|
||||
my $fill = '';
|
||||
for my $term (sort keys %Color_Override) {
|
||||
if ($node =~ /(^|_)${term}(_|$)/) {
|
||||
my ($fg, $bg) = split ':', $Color_Override{$term};
|
||||
$fg =~ s/(.)/${1}0/g;
|
||||
$bg =~ s/(.)/${1}0/g;
|
||||
$color = qq{"#$fg\"};
|
||||
$fill = qq{ fillcolor="#$bg" style=filled};
|
||||
last;
|
||||
}
|
||||
}
|
||||
else {
|
||||
$color = shift @{$self->{_gv}{colors}};
|
||||
|
||||
# Terminal nodes: pop from the end of the color list (expect greens)
|
||||
if (! $color) {
|
||||
if (! $task->depended_on_by) {
|
||||
$color = pop @{$self->{_gv}{colors}};
|
||||
}
|
||||
else {
|
||||
$color = shift @{$self->{_gv}{colors}};
|
||||
}
|
||||
}
|
||||
if (! $color) {
|
||||
warn "$ME: Ran out of colors\n";
|
||||
|
@ -698,12 +782,16 @@ sub _draw_boxes {
|
|||
if (my $only_if = $task->{yml}{only_if}) {
|
||||
$shape = 'record';
|
||||
$label .= '|' if $label;
|
||||
if ($only_if =~ /CI:DOCS.*CI:BUILD/) {
|
||||
$label .= "[SKIP: CI:BUILD]\\l[SKIP: CI:DOCS]\\l";
|
||||
}
|
||||
elsif ($only_if =~ /CI:DOCS/) {
|
||||
$label .= "[SKIP: CI:DOCS]\\l";
|
||||
|
||||
# Collapse whitespace, and remove leading/trailing
|
||||
$only_if =~ s/[\s\n]+/ /g;
|
||||
$only_if =~ s/^\s+|\s+$//g;
|
||||
|
||||
# 2024-06-18 Paul CI skips
|
||||
if ($only_if =~ m{\$CIRRUS_PR\s+==\s+''\s+.*\$CIRRUS_CHANGE_TITLE.*CI:ALL.*changesInclude.*test}) {
|
||||
$label .= "[SKIP if not needed]";
|
||||
}
|
||||
|
||||
# 2020-10 used in automation_images repo
|
||||
elsif ($only_if eq q{$CIRRUS_PR != ''}) {
|
||||
$label .= "[only if PR]";
|
||||
|
@ -712,6 +800,20 @@ sub _draw_boxes {
|
|||
elsif ($only_if eq q{$CIRRUS_PR == '' && $CIRRUS_CRON != ''}) {
|
||||
$label .= "[only if cron]";
|
||||
}
|
||||
# 2022-09
|
||||
elsif ($only_if eq q{$CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*'}) {
|
||||
$label .= "[only if PR + CI:BUILD]";
|
||||
}
|
||||
elsif ($only_if eq q{${CIRRUS_CRON} == 'main'}) {
|
||||
$label .= "[only if cron on main]";
|
||||
}
|
||||
# 2022-09
|
||||
elsif ($only_if eq q{$CIRRUS_CRON == 'multiarch'}) {
|
||||
$label .= "[only if cron multiarch]";
|
||||
}
|
||||
elsif ($only_if eq q{$CIRRUS_CRON != 'multiarch'}) {
|
||||
$label .= "[SKIP: cron multiarch]";
|
||||
}
|
||||
# used in podman
|
||||
elsif ($only_if eq q{$CIRRUS_TAG != ''}) {
|
||||
$label .= "[only if tag]";
|
||||
|
@ -720,19 +822,75 @@ sub _draw_boxes {
|
|||
elsif ($only_if =~ /CIRRUS_CHANGE.*release.*bump/i) {
|
||||
$label .= "[only on release PR]";
|
||||
}
|
||||
# swagger
|
||||
elsif ($only_if =~ /CIRRUS_CHANGE_TITLE.*CI:BUILD.*CIRRUS_CRON.*multiarch/) {
|
||||
$label .= "[SKIP: CI:BUILD or cron-multiarch]";
|
||||
}
|
||||
# buildah-bud rootless is only run in nightly treadmill
|
||||
elsif ($only_if =~ /\$CIRRUS_CRON\s+==\s+'treadmill'/) {
|
||||
$label .= "[only on cron treadmill]";
|
||||
}
|
||||
# "bench stuff" job: Only run on merge and never for cirrus-cron.
|
||||
elsif ($only_if =~ /CIRRUS_BRANCH\s+==\s+'main'\s+&&\s+\$CIRRUS_CRON\s+==\s+''/) {
|
||||
$label .= "[only on merge]";
|
||||
}
|
||||
elsif ($only_if =~ /CIRRUS_BRANCH\s+!=~\s+'v.*-rhel'\s+&&\s+\$CIRRUS_BASE_BRANCH\s+!=~\s+'v.*-rhel'/) {
|
||||
$label .= "[only if no RHEL release]";
|
||||
}
|
||||
elsif ($only_if =~ /CIRRUS_CHANGE_TITLE.*CI:BUILD.*CIRRUS_CHANGE_TITLE.*CI:MACHINE/s) {
|
||||
$label .= "[SKIP: CI:BUILD or CI:MACHINE]";
|
||||
}
|
||||
elsif ($only_if =~ /CIRRUS_CHANGE_TITLE\s+!=.*CI:MACHINE.*CIRRUS_BRANCH.*main.*CIRRUS_BASE_BRANCH.*main.*\)/s) {
|
||||
$label .= "[only if: main]";
|
||||
}
|
||||
|
||||
# automation_images
|
||||
elsif ($only_if eq q{$CIRRUS_CRON == '' && $CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH}) {
|
||||
$label .= "[only if DEFAULT_BRANCH and not cron]";
|
||||
}
|
||||
elsif ($only_if eq q{$CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*no_build-push.*"}) {
|
||||
$label .= "[only if PR, but not no_build-push]";
|
||||
}
|
||||
elsif ($only_if eq q{$CIRRUS_CRON == 'lifecycle'}) {
|
||||
$label .= "[only on cron=lifecycle]";
|
||||
}
|
||||
else {
|
||||
warn "$ME: unexpected only_if: $only_if\n";
|
||||
$label .= "[only if: $only_if]";
|
||||
}
|
||||
}
|
||||
|
||||
# Special case for manual (or other??) trigger type
|
||||
my $trigger = '';
|
||||
if (my $t = $task->{yml}{trigger_type}) {
|
||||
$trigger = "\\l(TRIGGER: " . uc($t) . ")";
|
||||
}
|
||||
|
||||
# Special cases (also hardcoded) for tasks that are skipped.
|
||||
if (my $skip = $task->{yml}{skip}) {
|
||||
$shape = 'record';
|
||||
$label .= '|' if $label && $label !~ /SKIP/;
|
||||
|
||||
# Collapse whitespace, and remove leading/trailing
|
||||
$skip =~ s/[\s\n]+/ /g;
|
||||
$skip =~ s/^\s+|\s+$//g;
|
||||
|
||||
my @reasons;
|
||||
push @reasons, 'BRANCH','TAG' if $skip =~ /CIRRUS_PR.*CIRRUS_TAG/;
|
||||
push @reasons, 'TAG' if $skip eq q{$CIRRUS_TAG != ''};
|
||||
push @reasons, 'CI:DOCS' if $skip =~ /CI:DOCS/;
|
||||
|
||||
# automation_images
|
||||
if ($skip eq q{$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*' || $CIRRUS_CHANGE_TITLE =~ '.*CI:TOOLING.*'}) {
|
||||
push @reasons, "CI:DOCS or CI:TOOLING";
|
||||
}
|
||||
elsif ($skip eq q{$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'}) {
|
||||
push @reasons, "CI:DOCS";
|
||||
}
|
||||
elsif ($skip eq '$CI == $CI') {
|
||||
push @reasons, "DISABLED MANUALLY";
|
||||
}
|
||||
elsif ($skip) {
|
||||
warn "$ME: unexpected skip '$skip'\n";
|
||||
}
|
||||
|
||||
if (@reasons) {
|
||||
$label .= join('', map { "[SKIP: $_]\\l" } @reasons);
|
||||
}
|
||||
|
@ -741,12 +899,23 @@ sub _draw_boxes {
|
|||
}
|
||||
}
|
||||
|
||||
$self->{_gv}{dot} .= " \"$node\" [shape=$shape style=bold color=$color fontcolor=$color";
|
||||
$self->{_gv}{dot} .= " label=\"$node\\l\|$label\"" if $label;
|
||||
$self->{_gv}{dot} .= " \"$node\" [shape=$shape style=bold color=$color$fill fontcolor=$color";
|
||||
if ($label) {
|
||||
(my $nodename = $node) =~ s/_/ /g;
|
||||
$self->{_gv}{dot} .= " label=\"$nodename$trigger\\l\|$label\"";
|
||||
}
|
||||
$self->{_gv}{dot} .= "]\n";
|
||||
|
||||
for my $dep ($task->depended_on_by) {
|
||||
$self->{_gv}{dot} .= " \"$node\" -> \"$dep->{name}\" [color=$color]\n";
|
||||
my $c = $color;
|
||||
# For custom-override boxes, when FG is black or very light, use
|
||||
# background color for arrow.
|
||||
if ($c =~ /000000/ || $c =~ /f.f.f./) {
|
||||
if ($fill =~ /\"#([0-9a-f]{6})\"/) {
|
||||
$c = qq{"#$1"};
|
||||
}
|
||||
}
|
||||
$self->{_gv}{dot} .= " \"$node\" -> \"$dep->{name}\" [color=$c]\n";
|
||||
$self->_draw_boxes($dep);
|
||||
}
|
||||
}
|
||||
|
@ -798,7 +967,11 @@ sub _mergekeys
|
|||
foreach my $inherit (@$inherits)
|
||||
{
|
||||
$inherit = _mergekeys($inherit, $resolveStack);
|
||||
%$ref = (%$inherit, %$ref);
|
||||
|
||||
# ** changed by esm **: shallow hash merge fails for
|
||||
# remote_sys_aarch64 (as of 2022-11) because it just <<'s
|
||||
# the entire local_sys_aarch64 including its env hash
|
||||
deepmerge($ref, $inherit);
|
||||
}
|
||||
delete $ref->{'<<'};
|
||||
}
|
||||
|
@ -816,6 +989,79 @@ sub _mergekeys
|
|||
return $ref;
|
||||
}
|
||||
|
||||
|
||||
###############
|
||||
# deepmerge # deep recursive merge for hashes; needed for cirrus matrices
|
||||
###############
|
||||
sub deepmerge {
|
||||
my ($ref, $inherit) = @_;
|
||||
|
||||
for my $k (keys %$inherit) {
|
||||
my $r_ref = ref($ref->{$k}) || '';
|
||||
my $i_ref = ref($inherit->{$k}) || '';
|
||||
|
||||
if ($i_ref eq 'HASH') {
|
||||
# Two hashes
|
||||
deepmerge($ref->{$k}, $inherit->{$k});
|
||||
}
|
||||
elsif ($i_ref eq 'ARRAY') {
|
||||
# Two arrays; this is how .cirrus.yml does matrix env settings
|
||||
$ref->{$k} //= [];
|
||||
for my $element (@{$inherit->{$k}}) {
|
||||
my $e_ref = ref($element) || '';
|
||||
if ($e_ref eq 'HASH') {
|
||||
# The only situation we handle is a hashref with one
|
||||
# key named 'env', whose value is a hash. If that ever
|
||||
# changes, deal with it then.
|
||||
my $e_formatted = format_env($element);
|
||||
|
||||
my $found;
|
||||
for my $in_k (@{$ref->{$k}}) {
|
||||
$found ||= (format_env($in_k) eq $e_formatted);
|
||||
}
|
||||
push @{$ref->{$k}}, $element unless $found;
|
||||
}
|
||||
elsif ($e_ref eq 'ARRAY') {
|
||||
die "FIXME, deepmerge cannot handle arrays of arrays";
|
||||
}
|
||||
elsif (! grep { $_ eq $element } @{$ref->{$k}}) {
|
||||
# ref is an array, but element is a scalar
|
||||
push @{$ref->{$k}}, $element;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
# i is scalar
|
||||
# 2023-04-23 do not override existing values! Anchors are used
|
||||
# only for filling in defaults. Anything explicitly set in
|
||||
# the YAML block is what we really want.
|
||||
$ref->{$k} //= $inherit->{$k};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
################
|
||||
# format_env # Return an easily-compared string based on a hashref
|
||||
################
|
||||
sub format_env {
|
||||
my $href = shift;
|
||||
|
||||
# href must be: { env => { foo => "bar", ... } }
|
||||
ref($href) eq 'HASH'
|
||||
or die "$ME: Internal error: format_env(): arg is not a hash";
|
||||
exists $href->{env}
|
||||
or die "$ME: Internal error: format_env(): arg does not have 'env' key";
|
||||
ref($href->{env}) eq 'HASH'
|
||||
or die "$ME: Internal error: format_env(): arg->{env} is not a hash";
|
||||
keys(%{$href}) == 1
|
||||
or die "$ME: Internal error: format_env(): %{arg} has too many keys";
|
||||
|
||||
join("--", map {
|
||||
sprintf("%s=%s", $_, $href->{env}{$_})
|
||||
} sort keys %{$href->{env}});
|
||||
}
|
||||
|
||||
|
||||
# END omg kludge for dealing with anchors
|
||||
###############################################################################
|
||||
|
||||
|
|
|
@ -90,14 +90,14 @@ end_task:
|
|||
- "middle_2"
|
||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
"real_name_of_initial" [shape=ellipse style=bold color=a fontcolor=a]
|
||||
"real_name_of_initial" -> "end" [color=a]
|
||||
"end" [shape=ellipse style=bold color=z fontcolor=z]
|
||||
"real_name_of_initial" -> "middle_1" [color=a]
|
||||
"middle_1" [shape=ellipse style=bold color=b fontcolor=b]
|
||||
"middle_1" -> "end" [color=b]
|
||||
"end" [shape=ellipse style=bold color=z fontcolor=z]
|
||||
"real_name_of_initial" -> "middle_2" [color=a]
|
||||
"middle_2" [shape=ellipse style=bold color=c fontcolor=c]
|
||||
"middle_2" -> "end" [color=c]
|
||||
"real_name_of_initial" -> "end" [color=a]
|
||||
|
||||
<<<<<<<<<<<<<<<<<< env interpolation 1
|
||||
env:
|
||||
|
@ -510,41 +510,41 @@ success_task:
|
|||
|
||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||
"automation" [shape=ellipse style=bold color=a fontcolor=a]
|
||||
"automation" -> "success" [color=a]
|
||||
"success" [shape=ellipse style=bold color=z fontcolor=z]
|
||||
"automation" -> "build" [color=a]
|
||||
"build" [shape=record style=bold color=b fontcolor=b label="build\l|- Build for fedora-32\l- Build for fedora-31\l- Build for ubuntu-20\l- Build for ubuntu-19\l"]
|
||||
"build" -> "bindings" [color=b]
|
||||
"bindings" [shape=ellipse style=bold color=c fontcolor=c]
|
||||
"bindings" -> "success" [color=c]
|
||||
"build" -> "docker-py_test" [color=b]
|
||||
"docker-py_test" [shape=ellipse style=bold color=d fontcolor=d]
|
||||
"docker-py_test" -> "success" [color=d]
|
||||
"build" -> "endpoint" [color=b]
|
||||
"endpoint" [shape=ellipse style=bold color=e fontcolor=e]
|
||||
"endpoint" -> "success" [color=e]
|
||||
"build" -> "osx_cross" [color=b]
|
||||
"osx_cross" [shape=ellipse style=bold color=f fontcolor=f]
|
||||
"osx_cross" -> "success" [color=f]
|
||||
"build" -> "swagger" [color=b]
|
||||
"swagger" [shape=ellipse style=bold color=g fontcolor=g]
|
||||
"swagger" -> "success" [color=g]
|
||||
"build" -> "validate" [color=b]
|
||||
"validate" [shape=ellipse style=bold color=h fontcolor=h]
|
||||
"validate" -> "success" [color=h]
|
||||
"build" -> "vendor" [color=b]
|
||||
"vendor" [shape=ellipse style=bold color=i fontcolor=i]
|
||||
"vendor" -> "success" [color=i]
|
||||
"build" -> "unit_test" [color=b]
|
||||
"unit_test" [shape=record style=bold color=j fontcolor=j label="unit_test\l|- Unit tests on fedora-32\l- Unit tests on fedora-31\l- Unit tests on ubuntu-20\l- Unit tests on ubuntu-19\l"]
|
||||
"unit_test" -> "success" [color=j]
|
||||
"build" -> "alt_build" [color=b]
|
||||
"alt_build" [shape=record style=bold color=k fontcolor=k label="alt_build\l|- Build Each Commit\l- Windows Cross\l- Build Without CGO\l- Build varlink API\l- Static build\l- Test build RPM\l"]
|
||||
"alt_build" -> "success" [color=k]
|
||||
"build" -> "success" [color=b]
|
||||
"ext_svc_check" [shape=ellipse style=bold color=l fontcolor=l]
|
||||
"ext_svc_check" -> "success" [color=l]
|
||||
"ext_svc_check" -> "build" [color=l]
|
||||
"smoke" [shape=ellipse style=bold color=m fontcolor=m]
|
||||
"smoke" -> "success" [color=m]
|
||||
"smoke" -> "build" [color=m]
|
||||
"build" [shape=record style=bold color="#0000f0" fillcolor="#f0f0f0" style=filled fontcolor="#0000f0" label="build\l|- Build for fedora-32\l- Build for fedora-31\l- Build for ubuntu-20\l- Build for ubuntu-19\l"]
|
||||
"build" -> "alt_build" [color="#0000f0"]
|
||||
"alt_build" [shape=record style=bold color="#0000f0" fillcolor="#f0f0f0" style=filled fontcolor="#0000f0" label="alt build\l|- Build Each Commit\l- Windows Cross\l- Build Without CGO\l- Build varlink API\l- Static build\l- Test build RPM\l"]
|
||||
"alt_build" -> "success" [color="#0000f0"]
|
||||
"success" [shape=ellipse style=bold color="#000000" fillcolor="#00f000" style=filled fontcolor="#000000"]
|
||||
"build" -> "bindings" [color="#0000f0"]
|
||||
"bindings" [shape=ellipse style=bold color=b fontcolor=b]
|
||||
"bindings" -> "success" [color=b]
|
||||
"build" -> "docker-py_test" [color="#0000f0"]
|
||||
"docker-py_test" [shape=ellipse style=bold color=c fontcolor=c]
|
||||
"docker-py_test" -> "success" [color=c]
|
||||
"build" -> "endpoint" [color="#0000f0"]
|
||||
"endpoint" [shape=ellipse style=bold color=d fontcolor=d]
|
||||
"endpoint" -> "success" [color=d]
|
||||
"build" -> "osx_cross" [color="#0000f0"]
|
||||
"osx_cross" [shape=ellipse style=bold color=e fontcolor=e]
|
||||
"osx_cross" -> "success" [color=e]
|
||||
"build" -> "success" [color="#0000f0"]
|
||||
"build" -> "swagger" [color="#0000f0"]
|
||||
"swagger" [shape=ellipse style=bold color=f fontcolor=f]
|
||||
"swagger" -> "success" [color=f]
|
||||
"build" -> "unit_test" [color="#0000f0"]
|
||||
"unit_test" [shape=record style=bold color="#000000" fillcolor="#f09090" style=filled fontcolor="#000000" label="unit test\l|- Unit tests on fedora-32\l- Unit tests on fedora-31\l- Unit tests on ubuntu-20\l- Unit tests on ubuntu-19\l"]
|
||||
"unit_test" -> "success" [color="#f09090"]
|
||||
"build" -> "validate" [color="#0000f0"]
|
||||
"validate" [shape=record style=bold color="#00c000" fillcolor="#f0f0f0" style=filled fontcolor="#00c000" label="validate\l|= Validate fedora-32 Build\l"]
|
||||
"validate" -> "success" [color="#00c000"]
|
||||
"build" -> "vendor" [color="#0000f0"]
|
||||
"vendor" [shape=ellipse style=bold color=g fontcolor=g]
|
||||
"vendor" -> "success" [color=g]
|
||||
"automation" -> "success" [color=a]
|
||||
"ext_svc_check" [shape=ellipse style=bold color=h fontcolor=h]
|
||||
"ext_svc_check" -> "build" [color=h]
|
||||
"ext_svc_check" -> "success" [color=h]
|
||||
"smoke" [shape=ellipse style=bold color=i fontcolor=i]
|
||||
"smoke" -> "build" [color=i]
|
||||
"smoke" -> "success" [color=i]
|
||||
|
|
|
@ -10,7 +10,7 @@ set -eo pipefail
|
|||
SCRIPT_BASEDIR="$(basename $0)"
|
||||
|
||||
badusage() {
|
||||
echo "Incorrect usage: $SCRIPT_BASEDIR) <command> [options]" > /dev/stderr
|
||||
echo "Incorrect usage: $SCRIPT_BASEDIR) <command> [options]" >> /dev/stderr
|
||||
echo "ERROR: $1"
|
||||
exit 121
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ automation_version() {
|
|||
if [[ -n "$_avcache" ]]; then
|
||||
echo "$_avcache"
|
||||
else
|
||||
echo "Error determining version number" > /dev/stderr
|
||||
echo "Error determining version number" >> /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
# A Library of contextual console output-related operations.
|
||||
# Intended for use by other scripts, not to be executed directly.
|
||||
|
||||
# shellcheck source=common/lib/defaults.sh
|
||||
source $(dirname $(realpath "${BASH_SOURCE[0]}"))/defaults.sh
|
||||
|
||||
# helper, not intended for use outside this file
|
||||
|
@ -10,10 +11,11 @@ _rel_path() {
|
|||
if [[ -z "$1" ]]; then
|
||||
echo "<stdin>"
|
||||
else
|
||||
local abs_path=$(realpath "$1")
|
||||
local rel_path=$(realpath --relative-to=. $abs_path)
|
||||
local abs_path_len=${#abs_path}
|
||||
local rel_path_len=${#rel_path}
|
||||
local abs_path rel_path abs_path_len rel_path_len
|
||||
abs_path=$(realpath "$1")
|
||||
rel_path=$(realpath --relative-to=. $abs_path)
|
||||
abs_path_len=${#abs_path}
|
||||
rel_path_len=${#rel_path}
|
||||
if ((abs_path_len <= rel_path_len)); then
|
||||
echo "$abs_path"
|
||||
else
|
||||
|
@ -24,9 +26,10 @@ _rel_path() {
|
|||
|
||||
# helper, not intended for use outside this file
|
||||
_ctx() {
|
||||
local shortest_source_path grandparent_func
|
||||
# Caller's caller details
|
||||
local shortest_source_path=$(_rel_path "${BASH_SOURCE[3]}")
|
||||
local grandparent_func="${FUNCNAME[2]}"
|
||||
shortest_source_path=$(_rel_path "${BASH_SOURCE[3]}")
|
||||
grandparent_func="${FUNCNAME[2]}"
|
||||
[[ -n "$grandparent_func" ]] || \
|
||||
grandparent_func="main"
|
||||
echo "$shortest_source_path:${BASH_LINENO[2]} in ${FUNCNAME[3]}()"
|
||||
|
@ -34,9 +37,10 @@ _ctx() {
|
|||
|
||||
# helper, not intended for use outside this file.
|
||||
_fmt_ctx() {
|
||||
local stars="************************************************"
|
||||
local prefix="${1:-no prefix given}"
|
||||
local message="${2:-no message given}"
|
||||
local stars prefix message
|
||||
stars="************************************************"
|
||||
prefix="${1:-no prefix given}"
|
||||
message="${2:-no message given}"
|
||||
echo "$stars"
|
||||
echo "$prefix ($(_ctx))"
|
||||
echo "$stars"
|
||||
|
@ -44,37 +48,40 @@ _fmt_ctx() {
|
|||
|
||||
# Print a highly-visible message to stderr. Usage: warn <msg>
|
||||
warn() {
|
||||
_fmt_ctx "$WARNING_MSG_PREFIX ${1:-no warning message given}" > /dev/stderr
|
||||
_fmt_ctx "$WARNING_MSG_PREFIX ${1:-no warning message given}" >> /dev/stderr
|
||||
}
|
||||
|
||||
# Same as warn() but exit non-zero or with given exit code
|
||||
# usage: die <msg> [exit-code]
|
||||
die() {
|
||||
_fmt_ctx "$ERROR_MSG_PREFIX ${1:-no error message given}" > /dev/stderr
|
||||
_fmt_ctx "$ERROR_MSG_PREFIX ${1:-no error message given}" >> /dev/stderr
|
||||
local exit_code=${2:-1}
|
||||
((exit_code==0)) || \
|
||||
exit $exit_code
|
||||
}
|
||||
|
||||
dbg() {
|
||||
local shortest_source_path
|
||||
if ((A_DEBUG)); then
|
||||
local shortest_source_path=$(_rel_path "${BASH_SOURCE[1]}")
|
||||
shortest_source_path=$(_rel_path "${BASH_SOURCE[1]}")
|
||||
(
|
||||
echo
|
||||
echo "$DEBUG_MSG_PREFIX ${1:-No debugging message given} ($shortest_source_path:${BASH_LINENO[0]} in ${FUNCNAME[1]}())"
|
||||
) > /dev/stderr
|
||||
) >> /dev/stderr
|
||||
fi
|
||||
}
|
||||
|
||||
msg() {
|
||||
echo "${1:-No message specified}" &> /dev/stderr
|
||||
echo "${1:-No message specified}" &>> /dev/stderr
|
||||
}
|
||||
|
||||
# Mimic set +x for a single command, along with calling location and line.
|
||||
showrun() {
|
||||
local -a context
|
||||
# Tried using readarray, it broke tests for some reason, too lazy to investigate.
|
||||
# shellcheck disable=SC2207
|
||||
context=($(caller 0))
|
||||
echo "+ $@ # ${context[2]}:${context[0]} in ${context[1]}()" > /dev/stderr
|
||||
echo "+ $* # ${context[2]}:${context[0]} in ${context[1]}()" >> /dev/stderr
|
||||
"$@"
|
||||
}
|
||||
|
||||
|
@ -109,7 +116,7 @@ show_env_vars() {
|
|||
warn "The \$SECRET_ENV_RE var. unset/empty: Not filtering sensitive names!"
|
||||
fi
|
||||
|
||||
for env_var_name in $(awk 'BEGIN{for(v in ENVIRON) print v}' | grep -Eiv "$filter_rx" | sort -u); do
|
||||
for env_var_name in $(awk 'BEGIN{for(v in ENVIRON) print v}' | grep -Eiv "$filter_rx" | sort); do
|
||||
|
||||
line="${env_var_name}=${!env_var_name}"
|
||||
msg " $line"
|
||||
|
|
|
@ -16,8 +16,3 @@ A_DEBUG=${A_DEBUG:-0}
|
|||
DEBUG_MSG_PREFIX="${DEBUG_MSG_PREFIX:-DEBUG:}"
|
||||
WARNING_MSG_PREFIX="${WARNING_MSG_PREFIX:-WARNING:}"
|
||||
ERROR_MSG_PREFIX="${ERROR_MSG_PREFIX:-ERROR:}"
|
||||
|
||||
# When non-empty, should contain a regular expression that matches
|
||||
# any known or potential env. vars containing secrests or other
|
||||
# sensitive values. For example `(.+PASSWORD.*)|(.+SECRET.*)|(.+TOKEN.*)`
|
||||
SECRET_ENV_RE=''
|
||||
|
|
|
@ -2,16 +2,94 @@
|
|||
# Library of os/platform related definitions and functions
|
||||
# Not intended to be executed directly
|
||||
|
||||
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | tr -d '.')"
|
||||
OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
|
||||
OS_REL_VER="$OS_RELEASE_ID-$OS_RELEASE_VER"
|
||||
OS_RELEASE_VER="${OS_RELEASE_VER:-$(source /etc/os-release; echo $VERSION_ID | tr -d '.')}"
|
||||
OS_RELEASE_ID="${OS_RELEASE_ID:-$(source /etc/os-release; echo $ID)}"
|
||||
OS_REL_VER="${OS_REL_VER:-$OS_RELEASE_ID-$OS_RELEASE_VER}"
|
||||
|
||||
SUDO=""
|
||||
if [[ "$UID" -ne 0 ]]; then
|
||||
SUDO="sudo"
|
||||
# Ensure no user-input prompts in an automation context
|
||||
export DEBIAN_FRONTEND="${DEBIAN_FRONTEND:-noninteractive}"
|
||||
# _TEST_UID only needed for unit-testing
|
||||
# shellcheck disable=SC2154
|
||||
if ((UID)) || ((_TEST_UID)); then
|
||||
SUDO="${SUDO:-sudo}"
|
||||
if [[ "$OS_RELEASE_ID" =~ (ubuntu)|(debian) ]]; then
|
||||
if [[ ! "$SUDO" =~ noninteractive ]]; then
|
||||
SUDO="$SUDO env DEBIAN_FRONTEND=$DEBIAN_FRONTEND"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
# Regex defining all CI-related env. vars. necessary for all possible
|
||||
# testing operations on all platforms and versions. This is necessary
|
||||
# to avoid needlessly passing through global/system values across
|
||||
# contexts, such as host->container or root->rootless user
|
||||
#
|
||||
# List of envariables which must be EXACT matches
|
||||
PASSTHROUGH_ENV_EXACT="${PASSTHROUGH_ENV_EXACT:-DEST_BRANCH|IMAGE_SUFFIX|DISTRO_NV|SCRIPT_BASE}"
|
||||
|
||||
if [[ "$OS_RELEASE_ID" == "ubuntu" ]]; then
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
SUDO="$SUDO env DEBIAN_FRONTEND=$DEBIAN_FRONTEND"
|
||||
fi
|
||||
# List of envariable patterns which must match AT THE BEGINNING of the name.
|
||||
PASSTHROUGH_ENV_ATSTART="${PASSTHROUGH_ENV_ATSTART:-CI|TEST}"
|
||||
|
||||
# List of envariable patterns which can match ANYWHERE in the name
|
||||
PASSTHROUGH_ENV_ANYWHERE="${PASSTHROUGH_ENV_ANYWHERE:-_NAME|_FQIN}"
|
||||
|
||||
# List of expressions to exclude env. vars for security reasons
|
||||
SECRET_ENV_RE="${SECRET_ENV_RE:-(^PATH$)|(^BASH_FUNC)|(^_.*)|(.*PASSWORD.*)|(.*TOKEN.*)|(.*SECRET.*)}"
|
||||
|
||||
# Return a list of environment variables that should be passed through
|
||||
# to lower levels (tests in containers, or via ssh to rootless).
|
||||
# We return the variable names only, not their values. It is up to our
|
||||
# caller to reference values.
|
||||
passthrough_envars() {
|
||||
local passthrough_env_re="(^($PASSTHROUGH_ENV_EXACT)\$)|(^($PASSTHROUGH_ENV_ATSTART))|($PASSTHROUGH_ENV_ANYWHERE)"
|
||||
local envar
|
||||
|
||||
for envar in SECRET_ENV_RE PASSTHROUGH_ENV_EXACT PASSTHROUGH_ENV_ATSTART PASSTHROUGH_ENV_ANYWHERE passthrough_env_re; do
|
||||
if [[ -z "${!envar}" ]]; then
|
||||
echo "Error: Required env. var. \$$envar is unset or empty in call to passthrough_envars()" >> /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Warning: Will pass env. vars. matching the following regex:
|
||||
$passthrough_env_re" >> /dev/stderr
|
||||
|
||||
compgen -A variable | grep -Ev "$SECRET_ENV_RE" | grep -E "$passthrough_env_re"
|
||||
}
|
||||
|
||||
# On more occasions than we'd like, it's necessary to put temporary
|
||||
# platform-specific workarounds in place. To help ensure they'll
|
||||
# actually be temporary, it's useful to place a time limit on them.
|
||||
# This function accepts two arguments:
|
||||
# - A (required) future date of the form YYYYMMDD (UTC based).
|
||||
# - An (optional) message string to display upon expiry of the timebomb.
|
||||
timebomb() {
|
||||
local expire="$1"
|
||||
|
||||
if ! expr "$expire" : '[0-9]\{8\}$' > /dev/null; then
|
||||
echo "timebomb: '$expire' must be UTC-based and of the form YYYYMMDD"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $(date -u +%Y%m%d) -lt $(date -u -d "$expire" +%Y%m%d) ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
declare -a frame
|
||||
read -a frame < <(caller)
|
||||
|
||||
cat << EOF >> /dev/stderr
|
||||
***********************************************************
|
||||
* TIME BOMB EXPIRED!
|
||||
*
|
||||
* >> ${frame[1]}:${frame[0]}: ${2:-No reason given, tsk tsk}
|
||||
*
|
||||
* Temporary workaround expired on ${expire:0:4}-${expire:4:2}-${expire:6:2}.
|
||||
*
|
||||
* Please review the above source file and either remove the
|
||||
* workaround or, if absolutely necessary, extend it.
|
||||
*
|
||||
* Please also check for other timebombs while you're at it.
|
||||
***********************************************************
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
|
|
@ -6,6 +6,6 @@ set -e
|
|||
|
||||
cd $(dirname $0)
|
||||
for testscript in test???-*.sh; do
|
||||
echo -e "\nExecuting $testscript..." > /dev/stderr
|
||||
echo -e "\nExecuting $testscript..." >> /dev/stderr
|
||||
./$testscript
|
||||
done
|
||||
|
|
|
@ -3,9 +3,14 @@
|
|||
# Unit-tests for library script in the current directory
|
||||
# Also verifies test script is derived from library filename
|
||||
|
||||
# shellcheck source-path=./
|
||||
source $(dirname ${BASH_SOURCE[0]})/testlib.sh || exit 1
|
||||
# Must be statically defined, 'source-path' directive can't work here.
|
||||
# shellcheck source=../lib/platform.sh disable=SC2154
|
||||
source "$TEST_DIR/$SUBJ_FILENAME" || exit 2
|
||||
|
||||
# For whatever reason, SCRIPT_PATH cannot be resolved.
|
||||
# shellcheck disable=SC2154
|
||||
test_cmd "Library $SUBJ_FILENAME is not executable" \
|
||||
0 "" \
|
||||
test ! -x "$SCRIPT_PATH/$SUBJ_FILENAME"
|
||||
|
@ -23,5 +28,73 @@ for var in OS_RELEASE_VER OS_REL_VER; do
|
|||
test "$NODOT" == "${!var}"
|
||||
done
|
||||
|
||||
for OS_RELEASE_ID in 'debian' 'ubuntu'; do
|
||||
(
|
||||
export _TEST_UID=$RANDOM # Normally $UID is read-only
|
||||
# Must be statically defined, 'source-path' directive can't work here.
|
||||
# shellcheck source=../lib/platform.sh disable=SC2154
|
||||
source "$TEST_DIR/$SUBJ_FILENAME" || exit 2
|
||||
|
||||
# The point of this test is to confirm it's defined
|
||||
# shellcheck disable=SC2154
|
||||
test_cmd "The '\$SUDO' env. var. is non-empty when \$_TEST_UID is non-zero" \
|
||||
0 "" \
|
||||
test -n "$SUDO"
|
||||
|
||||
test_cmd "The '\$SUDO' env. var. contains 'noninteractive' when '\$_TEST_UID' is non-zero" \
|
||||
0 "noninteractive" \
|
||||
echo "$SUDO"
|
||||
)
|
||||
done
|
||||
|
||||
test_cmd "The passthrough_envars() func. has output by default." \
|
||||
0 ".+" \
|
||||
passthrough_envars
|
||||
|
||||
(
|
||||
# Confirm defaults may be overriden
|
||||
PASSTHROUGH_ENV_EXACT="FOOBARBAZ"
|
||||
PASSTHROUGH_ENV_ATSTART="FOO"
|
||||
PASSTHROUGH_ENV_ANYWHERE="BAR"
|
||||
export FOOBARBAZ="testing"
|
||||
|
||||
test_cmd "The passthrough_envars() func. w/ overriden expr. only prints name of test variable." \
|
||||
0 "FOOBARBAZ" \
|
||||
passthrough_envars
|
||||
)
|
||||
|
||||
# Test from a mostly empty environment to limit possibility of expr mismatch flakes
|
||||
declare -a printed_envs
|
||||
readarray -t printed_envs <<<$(env --ignore-environment PATH="$PATH" FOOBARBAZ="testing" \
|
||||
SECRET_ENV_RE="(^PATH$)|(^BASH_FUNC)|(^_.*)|(FOOBARBAZ)|(SECRET_ENV_RE)" \
|
||||
CI="true" AUTOMATION_LIB_PATH="/path/to/some/place" \
|
||||
bash -c "source $TEST_DIR/$SUBJ_FILENAME && passthrough_envars")
|
||||
|
||||
test_cmd "The passthrough_envars() func. w/ overriden \$SECRET_ENV_RE hides test variable." \
|
||||
1 "0" \
|
||||
expr match "${printed_envs[*]}" '.*FOOBARBAZ.*'
|
||||
|
||||
test_cmd "The passthrough_envars() func. w/ overriden \$SECRET_ENV_RE returns CI variable." \
|
||||
0 "[1-9]+[0-9]*" \
|
||||
expr match "${printed_envs[*]}" '.*CI.*'
|
||||
|
||||
test_cmd "timebomb() function requires at least one argument" \
|
||||
1 "must be UTC-based and of the form YYYYMMDD" \
|
||||
timebomb
|
||||
|
||||
TZ=UTC12 \
|
||||
test_cmd "timebomb() function ignores TZ and compares < UTC-forced current date" \
|
||||
1 "TIME BOMB EXPIRED" \
|
||||
timebomb $(TZ=UTC date +%Y%m%d)
|
||||
|
||||
test_cmd "timebomb() alerts user when no description given" \
|
||||
1 "No reason given" \
|
||||
timebomb 00010101
|
||||
|
||||
EXPECTED_REASON="test${RANDOM}test"
|
||||
test_cmd "timebomb() gives reason when one was provided" \
|
||||
1 "$EXPECTED_REASON" \
|
||||
timebomb 00010101 "$EXPECTED_REASON"
|
||||
|
||||
# Must be last call
|
||||
exit_with_status
|
||||
|
|
|
@ -88,7 +88,7 @@ test_cmd() {
|
|||
echo "# $@" > /dev/stderr
|
||||
fi
|
||||
|
||||
# Using egrep vs file safer than shell builtin test
|
||||
# Using grep vs file safer than shell builtin test
|
||||
local a_out_f=$(mktemp -p '' "tmp_${FUNCNAME[0]}_XXXXXXXX")
|
||||
local a_exit=0
|
||||
|
||||
|
@ -108,7 +108,7 @@ test_cmd() {
|
|||
if ((TEST_DEBUG)); then
|
||||
echo "Received $(wc -l $a_out_f | awk '{print $1}') output lines of $(wc -c $a_out_f | awk '{print $1}') bytes total"
|
||||
fi
|
||||
if egrep -q "$e_out_re" "${a_out_f}.oneline"; then
|
||||
if grep -Eq "$e_out_re" "${a_out_f}.oneline"; then
|
||||
_test_report "Command $1 exited as expected with expected output" "0" "$a_out_f"
|
||||
else
|
||||
_test_report "Expecting regex '$e_out_re' match to (whitespace-squashed) output" "1" "$a_out_f"
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"github>containers/automation//renovate/defaults.json5"
|
||||
]
|
||||
}
|
|
@ -1,8 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Installs common Github Action utilities system-wide. NOT intended to be used directly
|
||||
# by humans, should only be used indirectly by running
|
||||
# ../bin/install_automation.sh <ver> github
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
source "$AUTOMATION_LIB_PATH/anchors.sh"
|
||||
source "$AUTOMATION_LIB_PATH/console_output.sh"
|
||||
|
||||
|
@ -21,7 +24,6 @@ if [[ $UID -eq 0 ]]; then
|
|||
fi
|
||||
|
||||
cd $(dirname $(realpath "${BASH_SOURCE[0]}"))
|
||||
install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/bin" ./bin/*
|
||||
install -v $INST_PERM_ARG -D -t "$INSTALL_PREFIX/lib" ./lib/*
|
||||
|
||||
# Needed for installer testing
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# This file is intended for sourcing by the cirrus-ci_retrospective workflow
|
||||
# It should not be used under any other context.
|
||||
|
||||
source $(dirname $BASH_SOURCE[0])/github_common.sh || exit 1
|
||||
source $(dirname ${BASH_SOURCE[0]})/github_common.sh || exit 1
|
||||
|
||||
# Cirrus-CI Build status codes that represent completion
|
||||
COMPLETE_STATUS_RE='FAILED|COMPLETED|ABORTED|ERRORED'
|
||||
|
@ -63,7 +63,7 @@ load_ccir() {
|
|||
was_pr='true'
|
||||
# Don't race vs another cirrus-ci build triggered _after_ GH action workflow started
|
||||
# since both may share the same check_suite. e.g. task re-run or manual-trigger
|
||||
if echo "$bst" | egrep -q "$COMPLETE_STATUS_RE"; then
|
||||
if echo "$bst" | grep -E -q "$COMPLETE_STATUS_RE"; then
|
||||
if [[ -n "$tst" ]] && [[ "$tst" == "PAUSED" ]]; then
|
||||
dbg "Detected action status $tst"
|
||||
do_intg='true'
|
||||
|
|
|
@ -5,22 +5,50 @@
|
|||
# Important paths defined here
|
||||
AUTOMATION_LIB_PATH="${AUTOMATION_LIB_PATH:-$(realpath $(dirname ${BASH_SOURCE[0]})/../../common/lib)}"
|
||||
|
||||
# Override default library message prefixes to those consumed by Github Actions
|
||||
# https://help.github.com/en/actions/reference/workflow-commands-for-github-actions
|
||||
# Doesn't work properly w/o $ACTIONS_STEP_DEBUG=true
|
||||
DEBUG_MSG_PREFIX="::debug::"
|
||||
# Translation to usage throughout common-library
|
||||
if [[ "${ACTIONS_STEP_DEBUG:-false}" == 'true' ]]; then
|
||||
DEBUG=1
|
||||
fi
|
||||
# Highlight these messages in the Github Action WebUI
|
||||
WARNING_MSG_PREFIX="::warning::"
|
||||
ERROR_MSG_PREFIX="::error::"
|
||||
|
||||
source $AUTOMATION_LIB_PATH/common_lib.sh || exit 1
|
||||
|
||||
# Wrap the die() function to add github-action sugar that identifies file
|
||||
# & line number within the UI, before exiting non-zero.
|
||||
rename_function die _die
|
||||
die() {
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message
|
||||
local ERROR_MSG_PREFIX
|
||||
ERROR_MSG_PREFIX="::error file=${BASH_SOURCE[1]},line=${BASH_LINENO[0]}::"
|
||||
_die "$@"
|
||||
}
|
||||
|
||||
# Wrap the warn() function to add github-action sugar that identifies file
|
||||
# & line number within the UI.
|
||||
rename_function warn _warn
|
||||
warn() {
|
||||
local WARNING_MSG_PREFIX
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-warning-message
|
||||
WARNING_MSG_PREFIX="::warning file=${BASH_SOURCE[1]},line=${BASH_LINENO[0]}::"
|
||||
_warn "$@"
|
||||
}
|
||||
|
||||
# Idomatic debug messages in github-actions are worse than useless. They do
|
||||
# not embed file/line information. They are completely hidden unless
|
||||
# the $ACTIONS_STEP_DEBUG step or job variable is set 'true'. If setting
|
||||
# this variable as a secret, can have unintended conseuqences:
|
||||
# https://docs.github.com/en/actions/monitoring-and-troubleshooting-workflows/using-workflow-run-logs#viewing-logs-to-diagnose-failures
|
||||
# Wrap the dbg() function to add github-action sugar at the "notice" level
|
||||
# so that it may be observed in output by regular users without danger.
|
||||
rename_function dbg _dbg
|
||||
dbg() {
|
||||
# When set true, simply enable automation library debugging.
|
||||
if [[ "${ACTIONS_STEP_DEBUG:-false}" == 'true' ]]; then export A_DEBUG=1; fi
|
||||
|
||||
# notice-level messages actually show up in the UI use them for debugging
|
||||
# https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-a-notice-message
|
||||
local DEBUG_MSG_PREFIX
|
||||
DEBUG_MSG_PREFIX="::notice file=${BASH_SOURCE[1]},line=${BASH_LINENO[0]}::"
|
||||
_dbg "$@"
|
||||
}
|
||||
|
||||
# usage: set_out_var <name> [value...]
|
||||
set_out_var() {
|
||||
A_DEBUG=0 req_env_vars GITHUB_OUTPUT
|
||||
name=$1
|
||||
shift
|
||||
value="$@"
|
||||
|
@ -28,5 +56,6 @@ set_out_var() {
|
|||
die "Expecting first parameter to be non-empty value for the output variable name"
|
||||
dbg "Setting Github Action step output variable '$name' to '$value'"
|
||||
# Special string recognized by Github Actions
|
||||
printf "\n::set-output name=$name::%s\n" "$value"
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-output-parameter
|
||||
echo "$name=$value" >> $GITHUB_OUTPUT
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@ test_cmd 'Default shell variables are initialized empty/false' \
|
|||
# Remaining tests all require debugging output to be enabled
|
||||
A_DEBUG=1
|
||||
|
||||
test_cmd 'The debugging function does not throw any errors and uses special debug output' \
|
||||
0 '::debug::' \
|
||||
test_cmd 'The debugging function does not throw any errors and redirects to notice-level output' \
|
||||
0 '::notice' \
|
||||
dbg_ccir
|
||||
|
||||
test_cmd "The \$MONITOR_TASK variable is defined an non-empty" \
|
||||
|
|
|
@ -3,39 +3,61 @@
|
|||
source $(dirname $BASH_SOURCE[0])/testlib.sh
|
||||
|
||||
# This is necessary when executing from a Github Action workflow so it ignores
|
||||
# all magic output tokens
|
||||
echo "::stop-commands::TESTING"
|
||||
trap "echo '::TESTING::'" EXIT
|
||||
|
||||
test_cmd "The library $TEST_DIR/$SUBJ_FILENAME loads" \
|
||||
0 '' \
|
||||
source $TEST_DIR/$SUBJ_FILENAME
|
||||
|
||||
A_DEBUG=1
|
||||
ACTIONS_STEP_DEBUG=true
|
||||
# Should update $A_DEBUG value
|
||||
source $TEST_DIR/$SUBJ_FILENAME || exit 1 # can't continue w/o loaded library
|
||||
|
||||
test_cmd "The debug message prefix is compatible with github actions commands" \
|
||||
0 '::debug:: This is a test debug message' \
|
||||
dbg 'This is a test debug message'
|
||||
# all magic output sugar.
|
||||
_MAGICTOKEN="TEST${RANDOM}TEST" # must be randomly generated / unguessable
|
||||
echo "::stop-commands::$_MAGICTOKEN"
|
||||
trap "echo '::$_MAGICTOKEN::'" EXIT
|
||||
|
||||
unset ACTIONS_STEP_DEBUG
|
||||
unset A_DEBUG
|
||||
# Should update $A_DEBUG value
|
||||
source $TEST_DIR/$SUBJ_FILENAME
|
||||
source $TEST_DIR/$SUBJ_FILENAME || exit 1 # can't continue w/o loaded library
|
||||
|
||||
test_cmd "No debug message shows when ACTIONS_STEP_DEBUG is undefined" \
|
||||
test_cmd "No debug message shows when A_DEBUG and ACTIONS_STEP_DEBUG are undefined" \
|
||||
0 '' \
|
||||
dbg 'This debug message should not appear'
|
||||
|
||||
test_cmd "The warning message prefix is compatible with github actions commands" \
|
||||
0 '::warning:: This is a test warning message' \
|
||||
export A_DEBUG=1
|
||||
test_cmd "A debug notice message shows when A_DEBUG is true" \
|
||||
0 '::notice file=.+,line=.+:: This is a debug message' \
|
||||
dbg "This is a debug message"
|
||||
unset A_DEBUG
|
||||
|
||||
export ACTIONS_STEP_DEBUG="true"
|
||||
test_cmd "A debug notice message shows when ACTIONS_STEP_DEBUG is true" \
|
||||
0 '::notice file=.+,line=.+:: This is also a debug message' \
|
||||
dbg "This is also a debug message"
|
||||
unset ACTIONS_STEP_DEBUG
|
||||
unset A_DEBUG
|
||||
|
||||
test_cmd "Warning messages contain github-action sugar." \
|
||||
0 '::warning file=.+,line=.+:: This is a test warning message' \
|
||||
warn 'This is a test warning message'
|
||||
|
||||
test_cmd "The github actions command for setting output parameter is formatted as expected" \
|
||||
0 '::set-output name=TESTING_NAME::TESTING VALUE' \
|
||||
test_cmd "Error messages contain github-action sugar." \
|
||||
0 '::error file=.+,line=.+:: This is a test error message' \
|
||||
die 'This is a test error message' 0
|
||||
|
||||
unset GITHUB_OUTPUT_FUDGED
|
||||
if [[ -z "$GITHUB_OUTPUT" ]]; then
|
||||
# Not executing under github-actions
|
||||
GITHUB_OUTPUT=$(mktemp -p '' tmp_$(basename ${BASH_SOURCE[0]})_XXXX)
|
||||
GITHUB_OUTPUT_FUDGED=1
|
||||
fi
|
||||
|
||||
test_cmd "The set_out_var function normally produces no output" \
|
||||
0 '' \
|
||||
set_out_var TESTING_NAME TESTING VALUE
|
||||
|
||||
# Must be the last command in this file
|
||||
export A_DEBUG=1
|
||||
test_cmd "The set_out_var function is debugable" \
|
||||
0 "::notice file=.+line=.+:: Setting Github.+'DEBUG_TESTING_NAME' to 'DEBUGGING TESTING VALUE'" \
|
||||
set_out_var DEBUG_TESTING_NAME DEBUGGING TESTING VALUE
|
||||
unset A_DEBUG
|
||||
|
||||
test_cmd "Previous set_out_var function properly sets a step-output value" \
|
||||
0 'TESTING_NAME=TESTING VALUE' \
|
||||
cat $GITHUB_OUTPUT
|
||||
|
||||
# Must be the last commands in this file
|
||||
if ((GITHUB_OUTPUT_FUDGED)); then rm -f "$GITHUB_OUTPUT"; fi
|
||||
exit_with_status
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
/Cron.log
|
||||
/utilization.csv
|
||||
/dh_status.txt*
|
||||
/pw_status.txt*
|
||||
/html/utilization.png*
|
|
@ -0,0 +1,200 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script is intended for use by humans to allocate a dedicated-host
|
||||
# and create an instance on it for testing purposes. When executed,
|
||||
# it will create a temporary clone of the repository with the necessary
|
||||
# modifications to manipulate the test host. It's the user's responsibility
|
||||
# to cleanup this directory after manually removing the instance (see below).
|
||||
#
|
||||
# **Note**: Due to Apple/Amazon restrictions on the removal of these
|
||||
# resources, cleanup must be done manually. You will need to shutdown and
|
||||
# terminate the instance, then wait 24-hours before releasing the
|
||||
# dedicated-host. The hosts cost money w/n an instance is running.
|
||||
#
|
||||
# The script assumes:
|
||||
#
|
||||
# * The current $USER value reflects your actual identity such that
|
||||
# the test instance may be labeled appropriatly for auditing.
|
||||
# * The `aws` CLI tool is installed on $PATH.
|
||||
# * Appropriate `~/.aws/credentials` credentials are setup.
|
||||
# * The us-east-1 region is selected in `~/.aws/config`.
|
||||
# * The $POOLTOKEN env. var. is set to value available from
|
||||
# https://cirrus-ci.com/pool/1cf8c7f7d7db0b56aecd89759721d2e710778c523a8c91c7c3aaee5b15b48d05
|
||||
# * The local ssh-agent is able to supply the appropriate private key (stored in BW).
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# shellcheck source-path=SCRIPTDIR
|
||||
source $(dirname ${BASH_SOURCE[0]})/pw_lib.sh
|
||||
|
||||
# Support debugging all mac_pw_pool scripts or only this one
|
||||
I_DEBUG="${I_DEBUG:0}"
|
||||
if ((I_DEBUG)); then
|
||||
X_DEBUG=1
|
||||
warn "Debugging enabled."
|
||||
fi
|
||||
|
||||
dbg "\$USER=$USER"
|
||||
|
||||
[[ -n "$USER" ]] || \
|
||||
die "The variable \$USER must not be empty"
|
||||
|
||||
[[ -n "$POOLTOKEN" ]] || \
|
||||
die "The variable \$POOLTOKEN must not be empty"
|
||||
|
||||
INST_NAME="${USER}Testing"
|
||||
LIB_DIRNAME=$(realpath --relative-to=$REPO_DIRPATH $LIB_DIRPATH)
|
||||
# /tmp is usually a tmpfs, don't let an accidental reboot ruin
|
||||
# access to a test DH/instance for a developer.
|
||||
TMP_CLONE_DIRPATH="/var/tmp/${LIB_DIRNAME}_${INST_NAME}"
|
||||
|
||||
dbg "\$TMP_CLONE_DIRPATH=$TMP_CLONE_DIRPATH"
|
||||
|
||||
if [[ -d "$TMP_CLONE_DIRPATH" ]]; then
|
||||
die "Found existing '$TMP_CLONE_DIRPATH', assuming in-use/relevant; If not, manual cleanup is required."
|
||||
fi
|
||||
|
||||
msg "Creating temporary clone dir and transfering any uncommited files."
|
||||
|
||||
git clone --no-local --no-hardlinks --depth 1 --single-branch --no-tags --quiet "file://$REPO_DIRPATH" "$TMP_CLONE_DIRPATH"
|
||||
declare -a uncommited_filepaths
|
||||
readarray -t uncommited_filepaths <<<$(
|
||||
pushd "$REPO_DIRPATH" &> /dev/null
|
||||
# Obtaining uncommited relative staged filepaths
|
||||
git diff --name-only HEAD
|
||||
# Obtaining uncommited relative unstaged filepaths
|
||||
git ls-files . --exclude-standard --others
|
||||
popd &> /dev/null
|
||||
)
|
||||
|
||||
dbg "Copying \$uncommited_filepaths[*]=${uncommited_filepaths[*]}"
|
||||
|
||||
for uncommited_file in "${uncommited_filepaths[@]}"; do
|
||||
uncommited_file_src="$REPO_DIRPATH/$uncommited_file"
|
||||
uncommited_file_dest="$TMP_CLONE_DIRPATH/$uncommited_file"
|
||||
uncommited_file_dest_parent=$(dirname "$uncommited_file_dest")
|
||||
#dbg "Working on uncommited file '$uncommited_file_src'"
|
||||
if [[ -r "$uncommited_file_src" ]]; then
|
||||
mkdir -p "$uncommited_file_dest_parent"
|
||||
#dbg "$uncommited_file_src -> $uncommited_file_dest"
|
||||
cp -a "$uncommited_file_src" "$uncommited_file_dest"
|
||||
fi
|
||||
done
|
||||
|
||||
declare -a modargs
|
||||
# Format: <pw_lib.sh var name> <new value> <old value>
|
||||
modargs=(
|
||||
# Necessary to prevent in-production macs from trying to use testing instance
|
||||
"DH_REQ_VAL $INST_NAME $DH_REQ_VAL"
|
||||
# Necessary to make test dedicated host stand out when auditing the set in the console
|
||||
"DH_PFX $INST_NAME $DH_PFX"
|
||||
# The default launch template name includes $DH_PFX, ensure the production template name is used.
|
||||
# N/B: The old/unmodified pw_lib.sh is still loaded for the running script
|
||||
"TEMPLATE_NAME $TEMPLATE_NAME Cirrus${DH_PFX}PWinstance"
|
||||
# Permit developer to use instance for up to 3 days max (orphan vm cleaning process will nail it after that).
|
||||
"PW_MAX_HOURS 72 $PW_MAX_HOURS"
|
||||
# Permit developer to execute as many Cirrus-CI tasks as they want w/o automatic shutdown.
|
||||
"PW_MAX_TASKS 9999 $PW_MAX_TASKS"
|
||||
)
|
||||
|
||||
for modarg in "${modargs[@]}"; do
|
||||
set -- $modarg # Convert the "tuple" into the param args $1 $2...
|
||||
dbg "Modifying pw_lib.sh \$$1 definition to '$2' (was '$3')"
|
||||
sed -i -r -e "s/^$1=.*/$1=\"$2\"/" "$TMP_CLONE_DIRPATH/$LIB_DIRNAME/pw_lib.sh"
|
||||
# Ensure future script invocations use the new values
|
||||
unset $1
|
||||
done
|
||||
|
||||
cd "$TMP_CLONE_DIRPATH/$LIB_DIRNAME"
|
||||
source ./pw_lib.sh
|
||||
|
||||
# Before going any further, make sure there isn't an existing
|
||||
# dedicated-host named ${INST_NAME}-0. If there is, it can
|
||||
# be re-used instead of failing the script outright.
|
||||
existing_dh_json=$(mktemp -p "." dh_allocate_XXXXX.json)
|
||||
$AWS ec2 describe-hosts --filter "Name=tag:Name,Values=${INST_NAME}-0" --query 'Hosts[].HostId' > "$existing_dh_json"
|
||||
if grep -Fqx '[]' "$existing_dh_json"; then
|
||||
|
||||
msg "Creating the dedicated host '${INST_NAME}-0'"
|
||||
declare dh_allocate_json
|
||||
dh_allocate_json=$(mktemp -p "." dh_allocate_XXXXX.json)
|
||||
|
||||
declare -a awsargs
|
||||
# Word-splitting of $AWS is desireable
|
||||
# shellcheck disable=SC2206
|
||||
awsargs=(
|
||||
$AWS
|
||||
ec2 allocate-hosts
|
||||
--availability-zone us-east-1a
|
||||
--instance-type mac2.metal
|
||||
--auto-placement off
|
||||
--host-recovery off
|
||||
--host-maintenance off
|
||||
--quantity 1
|
||||
--tag-specifications
|
||||
"ResourceType=dedicated-host,Tags=[{Key=Name,Value=${INST_NAME}-0},{Key=$DH_REQ_TAG,Value=$DH_REQ_VAL},{Key=PWPoolReady,Value=true},{Key=automation,Value=false}]"
|
||||
)
|
||||
|
||||
# N/B: Apple/Amazon require min allocation time of 24hours!
|
||||
dbg "Executing: ${awsargs[*]}"
|
||||
"${awsargs[@]}" > "$dh_allocate_json" || \
|
||||
die "Provisioning new dedicated host $INST_NAME failed. Manual debugging & cleanup required."
|
||||
|
||||
dbg $(jq . "$dh_allocate_json")
|
||||
dhid=$(jq -r -e '.HostIds[0]' "$dh_allocate_json")
|
||||
[[ -n "$dhid" ]] || \
|
||||
die "Obtaining DH ID of new host. Manual debugging & cleanup required."
|
||||
|
||||
# There's a small delay between allocating the dedicated host and LaunchInstances.sh
|
||||
# being able to interact with it. There's no sensible way to monitor for this state :(
|
||||
sleep 3s
|
||||
else # A dedicated host already exists
|
||||
dhid=$(jq -r -e '.[0]' "$existing_dh_json")
|
||||
fi
|
||||
|
||||
# Normally allocation is fairly instant, but not always. Confirm we're able to actually
|
||||
# launch a mac instance onto the dedicated host.
|
||||
for ((attempt=1 ; attempt < 11 ; attempt++)); do
|
||||
msg "Attempt #$attempt launching a new instance on dedicated host"
|
||||
./LaunchInstances.sh --force
|
||||
if grep -E "^${INST_NAME}-0 i-" dh_status.txt; then
|
||||
attempt=-1 # signal success
|
||||
break
|
||||
fi
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
[[ "$attempt" -eq -1 ]] || \
|
||||
die "Failed to use LaunchInstances.sh. Manual debugging & cleanup required."
|
||||
|
||||
# At this point the script could call SetupInstances.sh in another loop
|
||||
# but it takes about 20-minutes to complete. Also, the developer may
|
||||
# not need it, they may simply want to ssh into the instance to poke
|
||||
# around. i.e. they don't need to run any Cirrus-CI jobs on the test
|
||||
# instance.
|
||||
warn "---"
|
||||
warn "NOT copying/running setup.sh to new instance (in case manual activities are desired)."
|
||||
warn "---"
|
||||
|
||||
w="PLEASE REMEMBER TO terminate instance, wait two hours, then
|
||||
remove the dedicated-host in the web console, or run
|
||||
'aws ec2 release-hosts --host-ids=$dhid'."
|
||||
|
||||
msg "---"
|
||||
msg "Dropping you into a shell inside a temp. repo clone:
|
||||
($TMP_CLONE_DIRPATH/$LIB_DIRNAME)"
|
||||
msg "---"
|
||||
msg "Once it finishes booting (5m), you may use './InstanceSSH.sh ${INST_NAME}-0'
|
||||
to access it. Otherwise to fully setup the instance for Cirrus-CI, you need
|
||||
to execute './SetupInstances.sh' repeatedly until the ${INST_NAME}-0 line in
|
||||
'pw_status.txt' includes the text 'complete alive'. That process can take 20+
|
||||
minutes. Once alive, you may then use Cirrus-CI to test against this specific
|
||||
instance with any 'persistent_worker' task having a label of
|
||||
'$DH_REQ_TAG=$DH_REQ_VAL' set."
|
||||
msg "---"
|
||||
warn "$w"
|
||||
|
||||
export POOLTOKEN # ensure availability in sub-shell
|
||||
bash -l
|
||||
|
||||
warn "$w"
|
|
@ -0,0 +1,70 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Intended to be run from $HOME/deve/automation/mac_pw_pool/
|
||||
# using a crontab like:
|
||||
|
||||
# # Every date/timestamp in PW Pool management is UTC-relative
|
||||
# # make cron do the same for consistency.
|
||||
# CRON_TZ=UTC
|
||||
#
|
||||
# PATH=/home/shared/.local/bin:/home/shared/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin
|
||||
#
|
||||
# # Keep log from filling up disk & make sure webserver is running
|
||||
# # (5am UTC is during CI-activity lul)
|
||||
# 59 4 * * * $HOME/devel/automation/mac_pw_pool/nightly_maintenance.sh &>> $CRONLOG
|
||||
#
|
||||
# # PW Pool management (usage drop-off from 03:00-15:00 UTC)
|
||||
# POOLTOKEN=<from https://cirrus-ci.com/pool/1cf8c7f7d7db0b56aecd89759721d2e710778c523a8c91c7c3aaee5b15b48d05>
|
||||
# CRONLOG=/home/shared/devel/automation/mac_pw_pool/Cron.log
|
||||
# */5 * * * * /home/shared/devel/automation/mac_pw_pool/Cron.sh &>> $CRONLOG
|
||||
|
||||
# shellcheck disable=SC2154
|
||||
[ "${FLOCKER}" != "$0" ] && exec env FLOCKER="$0" flock -e -w 300 "$0" "$0" "$@" || :
|
||||
|
||||
# shellcheck source=./pw_lib.sh
|
||||
source $(dirname "${BASH_SOURCE[0]}")/pw_lib.sh
|
||||
|
||||
cd $SCRIPT_DIRPATH || die "Cannot enter '$SCRIPT_DIRPATH'"
|
||||
|
||||
# SSH agent required to provide key for accessing workers
|
||||
# Started with `ssh-agent -s > /run/user/$UID/ssh-agent.env`
|
||||
# followed by adding/unlocking the necessary keys.
|
||||
# shellcheck disable=SC1090
|
||||
source /run/user/$UID/ssh-agent.env
|
||||
|
||||
date -u -Iminutes
|
||||
now_minutes=$(date -u +%M)
|
||||
|
||||
if (($now_minutes%10==0)); then
|
||||
$SCRIPT_DIRPATH/LaunchInstances.sh
|
||||
echo "Exit: $?"
|
||||
fi
|
||||
|
||||
$SCRIPT_DIRPATH/SetupInstances.sh
|
||||
echo "Exit: $?"
|
||||
|
||||
[[ -r "$PWSTATE" ]] || \
|
||||
die "Can't read $PWSTATE to generate utilization data."
|
||||
|
||||
uzn_file="$SCRIPT_DIRPATH/utilization.csv"
|
||||
# Run input through `date` to validate values are usable timestamps
|
||||
timestamp=$(date -u -Iseconds -d \
|
||||
$(grep -E '^# SetupInstances\.sh run ' "$PWSTATE" | \
|
||||
awk '{print $4}'))
|
||||
pw_state=$(grep -E -v '^($|#+| +)' "$PWSTATE")
|
||||
n_workers=$(grep 'complete alive' <<<"$pw_state" | wc -l)
|
||||
n_tasks=$(awk "BEGIN{B=0} /${DH_PFX}-[0-9]+ complete alive/{B+=\$4} END{print B}" <<<"$pw_state")
|
||||
n_taskf=$(awk "BEGIN{E=0} /${DH_PFX}-[0-9]+ complete alive/{E+=\$5} END{print E}" <<<"$pw_state")
|
||||
printf "%s,%i,%i,%i\n" "$timestamp" "$n_workers" "$n_tasks" "$n_taskf" | tee -a "$uzn_file"
|
||||
|
||||
# Prevent uncontrolled growth of utilization.csv. Assume this script
|
||||
# runs every $interval minutes, keep only $history_hours worth of data.
|
||||
interval_minutes=5
|
||||
history_hours=36
|
||||
lines_per_hour=$((60/$interval_minutes))
|
||||
max_uzn_lines=$(($history_hours * $lines_per_hour))
|
||||
tail -n $max_uzn_lines "$uzn_file" > "${uzn_file}.tmp"
|
||||
mv "${uzn_file}.tmp" "$uzn_file"
|
||||
|
||||
# If possible, generate the webpage utilization graph
|
||||
gnuplot -c Utilization.gnuplot || true
|
|
@ -0,0 +1,39 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# Helper for humans to access an existing instance. It depends on:
|
||||
#
|
||||
# * You know the instance-id or name.
|
||||
# * All requirements listed in the top `LaunchInstances.sh` comment.
|
||||
# * The local ssh-agent is able to supply the appropriate private key.
|
||||
|
||||
# shellcheck source-path=SCRIPTDIR
|
||||
source $(dirname ${BASH_SOURCE[0]})/pw_lib.sh
|
||||
|
||||
SSH="ssh $SSH_ARGS" # N/B: library default nulls stdin
|
||||
if nc -z localhost 5900; then
|
||||
# Enable access to VNC if it's running
|
||||
# ref: https://repost.aws/knowledge-center/ec2-mac-instance-gui-access
|
||||
SSH+=" -L 5900:localhost:5900"
|
||||
fi
|
||||
|
||||
[[ -n "$1" ]] || \
|
||||
die "Must provide EC2 instance ID as first argument"
|
||||
|
||||
case "$1" in
|
||||
i-*)
|
||||
inst_json=$($AWS ec2 describe-instances --instance-ids "$1") ;;
|
||||
*)
|
||||
inst_json=$($AWS ec2 describe-instances --filter "Name=tag:Name,Values=$1") ;;
|
||||
esac
|
||||
|
||||
shift
|
||||
|
||||
pub_dns=$(jq -r -e '.Reservations?[0]?.Instances?[0]?.PublicDnsName?' <<<"$inst_json")
|
||||
if [[ -z "$pub_dns" ]] || [[ "$pub_dns" == "null" ]]; then
|
||||
die "Instance '$1' does not exist, or have a public DNS address allocated (yet)."
|
||||
fi
|
||||
|
||||
echo "+ $SSH ec2-user@$pub_dns $*" >> /dev/stderr
|
||||
exec $SSH ec2-user@$pub_dns "$@"
|
|
@ -0,0 +1,310 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# Script intended to be executed by humans (and eventually automation) to
|
||||
# ensure instances are launched from the current template version, on all
|
||||
# available Cirrus-CI Persistent Worker M1 Mac dedicated hosts. These
|
||||
# dedicated host (slots) are selected at runtime based their possessing a
|
||||
# 'true' value for their `PWPoolReady` tag. The script assumes:
|
||||
#
|
||||
# * The `aws` CLI tool is installed on $PATH.
|
||||
# * Appropriate `~/.aws/credentials` credentials are setup.
|
||||
# * The us-east-1 region is selected in `~/.aws/config`.
|
||||
#
|
||||
# N/B: Dedicated Host names and instance names are assumed to be identical,
|
||||
# only the IDs differ.
|
||||
|
||||
# shellcheck source-path=SCRIPTDIR
|
||||
source $(dirname ${BASH_SOURCE[0]})/pw_lib.sh
|
||||
|
||||
L_DEBUG="${L_DEBUG:0}"
|
||||
if ((L_DEBUG)); then
|
||||
X_DEBUG=1
|
||||
warn "Debugging enabled - temp. dir will not be cleaned up '$TEMPDIR' $(ctx 0)."
|
||||
trap EXIT
|
||||
fi
|
||||
|
||||
# Helper intended for use inside `name_hostid` loop.
|
||||
# arg1 either "INST" or "HOST"
|
||||
# arg2: Brief failure message
|
||||
# arg3: Failure message details
|
||||
handle_failure() {
|
||||
[[ -n "$inststate" ]] || die "Expecting \$inststate to be set $(ctx 2)"
|
||||
[[ -n "$name" ]] || die "Expecting \$name to be set $(ctx 2)"
|
||||
if [[ "$1" != "INST" ]] && [[ "$1" != "HOST" ]]; then
|
||||
die "Expecting either INST or HOST as argument $(ctx 2)"
|
||||
fi
|
||||
[[ -n "$2" ]] || die "Expecting brief failure message $(ctx 2)"
|
||||
[[ -n "$3" ]] || die "Expecting detailed failure message $(ctx 2)"
|
||||
|
||||
warn "$2 $(ctx 2)"
|
||||
(
|
||||
# Script is sensitive to this first-line format
|
||||
echo "# $name $1 ERROR: $2"
|
||||
# Make it obvious which host/instance the details pertain to
|
||||
awk -e '{print "# "$0}'<<<"$3"
|
||||
) > "$inststate"
|
||||
}
|
||||
|
||||
# Wrapper around handle_failure()
|
||||
host_failure() {
|
||||
[[ -r "$hostoutput" ]] || die "Expecting readable $hostoutput file $(ctx)"
|
||||
handle_failure HOST "$1" "aws CLI output: $(<$hostoutput)"
|
||||
}
|
||||
|
||||
inst_failure() {
|
||||
[[ -r "$instoutput" ]] || die "Expecting readable $instoutput file $(ctx)"
|
||||
handle_failure INST "$1" "aws CLI output: $(<$instoutput)"
|
||||
}
|
||||
|
||||
# Find dedicated hosts to operate on.
|
||||
dh_name_flt="Name=tag:Name,Values=${DH_PFX}-*"
|
||||
dh_tag_flt="Name=tag:$DH_REQ_TAG,Values=$DH_REQ_VAL"
|
||||
dh_qry='Hosts[].{HostID:HostId, Name:[Tags[?Key==`Name`].Value][] | [0]}'
|
||||
dh_searchout="$TEMPDIR/hosts.output" # JSON or error message
|
||||
if ! $AWS ec2 describe-hosts --filter "$dh_name_flt" "$dh_tag_flt" --query "$dh_qry" &> "$dh_searchout"; then
|
||||
die "Searching for dedicated hosts $(ctx 0):
|
||||
$(<$dh_searchout)"
|
||||
fi
|
||||
|
||||
# Array item format: "<Name> <ID>"
|
||||
dh_fmt='.[] | .Name +" "+ .HostID'
|
||||
# Avoid always processing hosts in the same alpha-sorted order, as that would
|
||||
# mean hosts at the end of the list consistently wait the longest for new
|
||||
# instances to be created (see creation-stagger code below).
|
||||
if ! readarray -t NAME2HOSTID <<<$(json_query "$dh_fmt" "$dh_searchout" | sort --random-sort); then
|
||||
die "Extracting dedicated host 'Name' and 'HostID' fields $(ctx 0):
|
||||
$(<$dh_searchout)"
|
||||
fi
|
||||
|
||||
n_dh=0
|
||||
n_dh_total=${#NAME2HOSTID[@]}
|
||||
if [[ -z "${NAME2HOSTID[*]}" ]] || ! ((n_dh_total)); then
|
||||
msg "No dedicated hosts found"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
latest_launched="1970-01-01T00:00+00:00" # in case $DHSTATE is missing
|
||||
dcmpfmt="+%Y%m%d%H%M" # date comparison format compatible with numeric 'test'
|
||||
# To find the latest instance launch time, script can't rely on reading
|
||||
# $DHSTATE or $PWSTATE because they may not exist or be out of date.
|
||||
# Search for all running instances by name and running state, returning
|
||||
# their launch timestamps.
|
||||
declare -a pw_filt
|
||||
pw_filts=(
|
||||
"Name=tag:Name,Values=${DH_PFX}-*"
|
||||
'Name=tag:PWPoolReady,Values=true'
|
||||
"Name=tag:$DH_REQ_TAG,Values=$DH_REQ_VAL"
|
||||
'Name=instance-state-name,Values=running'
|
||||
)
|
||||
pw_query='Reservations[].Instances[].LaunchTime'
|
||||
inst_lt_f=$TEMPDIR/inst_launch_times
|
||||
dbg "Obtaining launch times for all running ${DH_PFX}-* instances"
|
||||
dbg "$AWS ec2 describe-instances --filters '${pw_filts[*]}' --query '$pw_query' &> '$inst_lt_f'"
|
||||
if ! $AWS ec2 describe-instances --filters "${pw_filts[@]}" --query "$pw_query" &> "$inst_lt_f"; then
|
||||
die "Can not query instances:
|
||||
$(<$inst_lt_f)"
|
||||
else
|
||||
declare -a launchtimes
|
||||
if ! readarray -t launchtimes<<<$(json_query '.[]?' "$inst_lt_f") ||
|
||||
[[ "${#launchtimes[@]}" -eq 0 ]] ||
|
||||
[[ "${launchtimes[0]}" == "" ]]; then
|
||||
warn "Found no running instances, this should not happen."
|
||||
else
|
||||
dbg "launchtimes=[${launchtimes[*]}]"
|
||||
for launch_time in "${launchtimes[@]}"; do
|
||||
if [[ "$launch_time" == "" ]] || [[ "$launch_time" == "null" ]]; then
|
||||
warn "Ignoring empty/null instance launch time."
|
||||
continue
|
||||
fi
|
||||
# Assume launch_time is never malformed
|
||||
launched_hour=$(date -u -d "$launch_time" "$dcmpfmt")
|
||||
latest_launched_hour=$(date -u -d "$latest_launched" "$dcmpfmt")
|
||||
dbg "instance launched on $launched_hour; latest launch hour: $latest_launched_hour"
|
||||
if [[ $launched_hour -gt $latest_launched_hour ]]; then
|
||||
dbg "Updating latest launched timestamp"
|
||||
latest_launched="$launch_time"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Increase readability for humans by always ensuring the two important
|
||||
# date stamps line up regardless of the length of $n_dh_total.
|
||||
_n_dh_sp=$(printf ' %.0s' seq 1 ${#n_dh_total})
|
||||
msg "Operating on $n_dh_total dedicated hosts at $(date -u -Iseconds)"
|
||||
msg " ${_n_dh_sp}Last instance launch on $latest_launched"
|
||||
echo -e "# $(basename ${BASH_SOURCE[0]}) run $(date -u -Iseconds)\n#" > "$TEMPDIR/$(basename $DHSTATE)"
|
||||
|
||||
# When initializing a new pool of workers, it would take many hours
|
||||
# to wait for the staggered creation mechanism on each host. This
|
||||
# would negativly impact worker utilization. Provide a workaround.
|
||||
force=0
|
||||
# shellcheck disable=SC2199
|
||||
if [[ "$@" =~ --force ]]; then
|
||||
warn "Forcing instance creation: Ignoring staggered creation limits."
|
||||
force=1
|
||||
fi
|
||||
|
||||
for name_hostid in "${NAME2HOSTID[@]}"; do
|
||||
n_dh=$(($n_dh+1))
|
||||
_I=" "
|
||||
msg " " # make output easier to read
|
||||
|
||||
read -r name hostid junk<<<"$name_hostid"
|
||||
msg "Working on Dedicated Host #$n_dh/$n_dh_total '$name' for HostID '$hostid'."
|
||||
|
||||
hostoutput="$TEMPDIR/${name}_host.output" # JSON or error message from aws describe-hosts
|
||||
instoutput="$TEMPDIR/${name}_inst.output" # JSON or error message from aws describe-instance or run-instance
|
||||
inststate="$TEMPDIR/${name}_inst.state" # Line to append to $DHSTATE
|
||||
|
||||
if ! $AWS ec2 describe-hosts --host-ids $hostid &> "$hostoutput"; then
|
||||
host_failure "Failed to look up dedicated host."
|
||||
continue
|
||||
# Allow hosts to be taken out of service easily/manually by editing its tags.
|
||||
# Also detect any JSON parsing problems in the output.
|
||||
elif ! PWPoolReady=$(json_query '.Hosts?[0]?.Tags? | map(select(.Key == "PWPoolReady")) | .[].Value' "$hostoutput"); then
|
||||
host_failure "Empty/null/failed JSON query of PWPoolReady tag."
|
||||
continue
|
||||
elif [[ "$PWPoolReady" != "true" ]]; then
|
||||
msg "Dedicated host tag 'PWPoolReady' == '$PWPoolReady' != 'true'."
|
||||
echo "# $name HOST DISABLED: PWPoolReady==$PWPoolReady" > "$inststate"
|
||||
continue
|
||||
fi
|
||||
|
||||
if ! hoststate=$(json_query '.Hosts?[0]?.State?' "$hostoutput"); then
|
||||
host_failure "Empty/null/failed JSON query of dedicated host state."
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$hoststate" == "pending" ]] || \
|
||||
[[ "$hoststate" == "under-assessment" ]] || \
|
||||
[[ "$hoststate" == "released" ]]
|
||||
then
|
||||
# When an instance is terminated, its dedicated host goes into an unusable state
|
||||
# for about 1-1/2 hours. There's absolutely nothing that can be done to avoid
|
||||
# this or work around it. Ignore hosts in this state, assuming a later run of the
|
||||
# script will start an instance on the (hopefully) available host).
|
||||
#
|
||||
# I have no idea what 'under-assessment' means, and it doesn't last as long as 'pending',
|
||||
# but functionally it behaves the same.
|
||||
#
|
||||
# Hosts in 'released' state are about to go away, hopefully due to operator action.
|
||||
# Don't treat this as an error.
|
||||
msg "Dedicated host is untouchable due to '$hoststate' state."
|
||||
# Reference the actual output text, in case of false-match or unexpected contents.
|
||||
echo "# $name HOST BUSY: $hoststate" > "$inststate"
|
||||
continue
|
||||
elif [[ "$hoststate" != "available" ]]; then
|
||||
# The "available" state means the host is ready for zero or more instances to be created.
|
||||
# Detect all other states (they should be extremely rare).
|
||||
host_failure "Unsupported dedicated host state '$hoststate'."
|
||||
continue
|
||||
fi
|
||||
|
||||
# Counter-intuitively, dedicated hosts can support more than one running instance. Except
|
||||
# for Mac instances, but this is not reflected anywhere in the JSON. Trying to start a new
|
||||
# Mac instance on an already occupied host is bound to fail. Inconveniently this error
|
||||
# will look an aweful lot like many other types of errors, confusing any human examining
|
||||
# $DHSTATE. Detect dedicated-hosts with existing instances.
|
||||
InstanceId=$(set +e; jq -r '.Hosts?[0]?.Instances?[0].InstanceId?' "$hostoutput")
|
||||
dbg "InstanceId='$InstanceId'"
|
||||
|
||||
# Stagger creation of instances by $CREATE_STAGGER_HOURS
|
||||
launch_new=0
|
||||
if [[ "$InstanceId" == "null" ]] || [[ "$InstanceId" == "" ]]; then
|
||||
launch_threshold=$(date -u -Iseconds -d "$latest_launched + $CREATE_STAGGER_HOURS hours")
|
||||
launch_threshold_hour=$(date -u -d "$launch_threshold" "$dcmpfmt")
|
||||
now_hour=$(date -u "$dcmpfmt")
|
||||
dbg "launch_threshold_hour=$launch_threshold_hour"
|
||||
dbg " now_hour=$now_hour"
|
||||
if [[ "$force" -eq 0 ]] && [[ $now_hour -lt $launch_threshold_hour ]]; then
|
||||
msg "Cannot launch new instance until $launch_threshold"
|
||||
echo "# $name HOST THROTTLE: Inst. creation delayed until $launch_threshold" > "$inststate"
|
||||
continue
|
||||
else
|
||||
launch_new=1
|
||||
fi
|
||||
fi
|
||||
|
||||
if ((launch_new)); then
|
||||
msg "Creating new $name instance on $name host."
|
||||
if ! $AWS ec2 run-instances \
|
||||
--launch-template LaunchTemplateName=${TEMPLATE_NAME} \
|
||||
--tag-specifications \
|
||||
"ResourceType=instance,Tags=[{Key=Name,Value=$name},{Key=$DH_REQ_TAG,Value=$DH_REQ_VAL},{Key=PWPoolReady,Value=true},{Key=automation,Value=true}]" \
|
||||
--placement "HostId=$hostid" &> "$instoutput"; then
|
||||
inst_failure "Failed to create new instance on available host."
|
||||
continue
|
||||
else
|
||||
# Block further launches (assumes script is running in a 10m while loop).
|
||||
latest_launched=$(date -u -Iseconds)
|
||||
msg "Successfully created new instance; Waiting for 'running' state (~1m typical)..."
|
||||
# N/B: New Mac instances take ~5-10m to actually become ssh-able
|
||||
if ! InstanceId=$(json_query '.Instances?[0]?.InstanceId' "$instoutput"); then
|
||||
inst_failure "Empty/null/failed JSON query of brand-new InstanceId"
|
||||
continue
|
||||
fi
|
||||
# Instance "running" status is good enough for this script, and since network
|
||||
# accessibility can take 5-20m post creation.
|
||||
# Polls 40 times with 15-second delay (non-configurable).
|
||||
if ! $AWS ec2 wait instance-running \
|
||||
--instance-ids $InstanceId &> "${instoutput}.wait"; then
|
||||
# inst_failure() would include unhelpful $instoutput detail
|
||||
(
|
||||
echo "# $name INST ERROR: Running-state timeout."
|
||||
awk -e '{print "# "$0}' "${instoutput}.wait"
|
||||
) > "$inststate"
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# If an instance was created, $instoutput contents are already obsolete.
|
||||
# If an existing instance, $instoutput doesn't exist.
|
||||
if ! $AWS ec2 describe-instances --instance-ids $InstanceId &> "$instoutput"; then
|
||||
inst_failure "Failed to describe host instance."
|
||||
continue
|
||||
fi
|
||||
|
||||
# Describe-instance has unnecessarily complex structure, simplify it.
|
||||
if ! json_query '.Reservations?[0]?.Instances?[0]?' "$instoutput" > "${instoutput}.simple"; then
|
||||
inst_failure "Empty/null/failed JSON simplification of describe-instances."
|
||||
fi
|
||||
mv "$instoutput" "${instoutput}.describe" # leave for debugging
|
||||
mv "${instoutput}.simple" "${instoutput}"
|
||||
|
||||
msg "Parsing new or existing instance ($InstanceId) details."
|
||||
if ! InstanceId=$(json_query '.InstanceId' $instoutput); then
|
||||
inst_failure "Empty/null/failed JSON query of InstanceId"
|
||||
continue
|
||||
elif ! InstName=$(json_query '.Tags | map(select(.Key == "Name")) | .[].Value' $instoutput) || \
|
||||
[[ "$InstName" != "$name" ]]; then
|
||||
inst_failure "Inst. name '$InstName' != DH name '$name'"
|
||||
elif ! LaunchTime=$(json_query '.LaunchTime' $instoutput); then
|
||||
inst_failure "Empty/null/failed JSON query of LaunchTime"
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "$name $InstanceId $LaunchTime" > "$inststate"
|
||||
done
|
||||
|
||||
_I=""
|
||||
msg " "
|
||||
msg "Processing all dedicated host and instance states."
|
||||
# Consuming state file in alpha-order is easier on human eyes
|
||||
readarray -t NAME2HOSTID <<<$(json_query "$dh_fmt" "$dh_searchout" | sort)
|
||||
for name_hostid in "${NAME2HOSTID[@]}"; do
|
||||
read -r name hostid<<<"$name_hostid"
|
||||
inststate="$TEMPDIR/${name}_inst.state"
|
||||
[[ -r "$inststate" ]] || \
|
||||
die "Expecting to find instance-state file $inststate for host '$name' $(ctx 0)."
|
||||
cat "$inststate" >> "$TEMPDIR/$(basename $DHSTATE)"
|
||||
done
|
||||
|
||||
dbg "Creating/updating state file"
|
||||
if [[ -r "$DHSTATE" ]]; then
|
||||
cp "$DHSTATE" "${DHSTATE}~"
|
||||
fi
|
||||
mv "$TEMPDIR/$(basename $DHSTATE)" "$DHSTATE"
|
|
@ -0,0 +1,138 @@
|
|||
# Cirrus-CI persistent worker maintenance
|
||||
|
||||
These scripts are intended to be used from a repository clone,
|
||||
by cron, on an always-on cloud machine. They make a lot of
|
||||
other assumptions, some of which may not be well documented.
|
||||
Please see the comments at the top of each scripts for more
|
||||
detailed/specific information.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* The `aws` binary present somewhere on `$PATH`.
|
||||
* Standard AWS `credentials` and `config` files exist under `~/.aws`
|
||||
and set the region to `us-east-1`.
|
||||
* A copy of the ssh-key referenced by `CirrusMacM1PWinstance` launch template
|
||||
under "Assumptions" below.
|
||||
* The ssh-key has been added to a running ssh-agent.
|
||||
* The running ssh-agent sh-compatible env. vars. are stored in
|
||||
`/run/user/$UID/ssh-agent.env`
|
||||
* The env. var. `POOLTOKEN` is set to the Cirrus-CI persistent worker pool
|
||||
token value.
|
||||
|
||||
## Assumptions
|
||||
|
||||
* You've read all scripts in this directory, generally follow
|
||||
their purpose, and meet any requirements stated within the
|
||||
header comment.
|
||||
* You've read the [private documentation](https://docs.google.com/document/d/1PX6UyqDDq8S72Ko9qe_K3zoV2XZNRQjGxPiWEkFmQQ4/edit)
|
||||
and understand the safety/security section.
|
||||
* You have permissions to access all referenced AWS resources.
|
||||
* There are one or more dedicated hosts allocated and have set:
|
||||
* A name tag like `MacM1-<some number>` (NO SPACES!)
|
||||
* The `mac2` instance family
|
||||
* The `mac2.metal` instance type
|
||||
* Disabled "Instance auto-placement", "Host recovery", and "Host maintenance"
|
||||
* Quantity: 1
|
||||
* Tags: `automation=false`, `purpose=prod`, and `PWPoolReady=true`
|
||||
* The EC2 `CirrusMacM1PWinstance` instance-template exists and sets:
|
||||
* Shutdown-behavior: terminate
|
||||
* Same "key pair" referenced under `Prerequisites`
|
||||
* All other required instance parameters complete
|
||||
* A user-data script that shuts down the instance after 2 days.
|
||||
|
||||
## Operation (Theory)
|
||||
|
||||
The goal is to maintain sufficient alive/running/working instances
|
||||
to service most Cirrus-CI tasks pointing at the pool. This is
|
||||
best achieved with slower maintenance of hosts compared to setup
|
||||
of ready instances. This is because hosts can be inaccessible for
|
||||
up to 2 hours, but instances come up in ~10-20m, ready to run tasks.
|
||||
|
||||
Either hosts and/or instances may be removed from management by
|
||||
setting "false" or removing their `PWPoolReady=true` tag. Otherwise,
|
||||
the pool should be maintained by installing the crontab lines
|
||||
indicated in the `Cron.sh` script.
|
||||
|
||||
Cirrus-CI will assign tasks (specially) targeted at the pool, to an
|
||||
instance with a running listener (`cirrus worker run` process). If
|
||||
there are none, the task will queue forever (there might be a 24-hour
|
||||
timeout, I can't remember). From a PR perspective, there is little
|
||||
control over which instance you get. It could easily be one where
|
||||
a previous task barfed all over and rendered unusable.
|
||||
|
||||
## Initialization
|
||||
|
||||
It is assumed that neither the `Cron.sh` nor any related maintenance
|
||||
scripts are installed (in crontab) or currently running.
|
||||
|
||||
Once several dedicated hosts have been manually created, they
|
||||
should initially have no instances on them. If left alone, the
|
||||
maintenance scripts will eventually bring them all up, however
|
||||
complete creation and setup will take many hours. This may be
|
||||
bypassed by *manually* running `LaunchInstances.sh --force`.
|
||||
|
||||
In order to prevent all the instances from being recycled at the same
|
||||
(future) time, the shutdown time installed by `SetupInstances.sh` also
|
||||
needs to be adjusted. The operator should first wait about 20 minutes
|
||||
for all new instances to fully boot. Followed by a call to
|
||||
`SetupInstances.sh --force`.
|
||||
|
||||
Now the `Cron.sh` cron-job may be installed, enabled and started.
|
||||
|
||||
## Manual Testing
|
||||
|
||||
Verifying changes to these scripts / cron-job must be done manually.
|
||||
To support this, every dedicated host and instance has a `purpose`
|
||||
tag, which must correspond to the value indicated in `pw_lib.sh`
|
||||
and in the target repo `.cirrus.yml`. To test script and/or
|
||||
CI changes:
|
||||
|
||||
1. Make sure you have locally met all requirements spelled out in the
|
||||
header-comment of `AllocateTestDH.sh`.
|
||||
1. Execute `AllocateTestDH.sh`. It will operate out of a temporary
|
||||
clone of the repository to prevent pushing required test-modifications
|
||||
upstream.
|
||||
1. Repeatedly execute `SetupInstances.sh`. It will update `pw_status.txt`
|
||||
with any warnings/errors. When successful, lines will include
|
||||
the host name, "complete", and "alive" status strings.
|
||||
1. If instance debugging is needed, the `InstanceSSH.sh` script may be
|
||||
used. Simply pass the name of the host you want to access. Every
|
||||
instance should have a `setup.log` file in the `ec2-user` homedir. There
|
||||
should also be `/private/tmp/<name>-worker.log` with entries from the
|
||||
pool listener process.
|
||||
1. To test CI changes against the test instance(s), push a PR that includes
|
||||
`.cirrus.yml` changes to the task's `persistent_worker` dictionary's
|
||||
`purpose` attribute. Set the value the same as the tag in step 1.
|
||||
1. When you're done with all testing, terminate the instance. Then wait
|
||||
a full 24-hours before "releasing" the dedicated host. Both operations
|
||||
can be performed using the AWS EC2 WebUI. Please remember to do the
|
||||
release step, as the $-clock continues to run while it's allocated.
|
||||
|
||||
Note: Instances are set to auto-terminate on shutdown. They should
|
||||
self shutdown after 24-hours automatically. After termination for
|
||||
any cause, there's about a 2-hour waiting period before a new instance
|
||||
can be allocated. The `LaunchInstances.sh` script is able deal with this
|
||||
properly.
|
||||
|
||||
|
||||
## Script Debugging Hints
|
||||
|
||||
* On each MacOS instance:
|
||||
* The pool listener process (running as the worker user) keeps a log under `/private/tmp`. The
|
||||
file includes the registered name of the worker. For example, on MacM1-7 you would find `/private/tmp/MacM1-7-worker.log`.
|
||||
This log shows tasks taken on, completed, and any errors reported back from Cirrus-CI internals.
|
||||
* In the ec2-user's home directory is a `setup.log` file. This stores the output from executing
|
||||
`setup.sh`. It also contains any warnings/errors from the (very important) `service_pool.sh` script - which should
|
||||
_always_ be running in the background.
|
||||
* There are several drop-files in the `ec2-user` home directory which are checked by `SetupInstances.sh`
|
||||
to record state. If removed, along with `setup.log`, the script will re-execute (a possibly newer version of) `setup.sh`.
|
||||
* On the management host:
|
||||
* Automated operations are setup and run by `Cron.sh`, and logged to `Cron.log`. When running scripts manually, `Cron.sh`
|
||||
can serve as a template for the intended order of operations.
|
||||
* Critical operations are protected by a mandatory, exclusive file lock on `mac_pw_pool/Cron.sh`. Should
|
||||
there be a deadlock, management of the pool (by `Cron.sh`) will stop. However the effects of this will not be observed
|
||||
until workers begin hitting their lifetime and/or task limits.
|
||||
* Without intervention, the `nightly_maintenance.sh` script will update the containers/automation repo clone on the
|
||||
management VM. This happens if the repo becomes out of sync by more than 7 days (or as defined in the script).
|
||||
When the repo is updated, the `pw_pool_web` container will be restarted. The container will also be restarted if its
|
||||
found to not be running.
|
|
@ -0,0 +1,463 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# Script intended to be executed by humans (and eventually automation)
|
||||
# to provision any/all accessible Cirrus-CI Persistent Worker instances
|
||||
# as they become available. This is intended to operate independently
|
||||
# from `LaunchInstances.sh` soas to "hide" the nearly 2-hours of cumulative
|
||||
# startup and termination wait times. This script depends on:
|
||||
#
|
||||
# * All requirements listed in the top `LaunchInstances.sh` comment.
|
||||
# * The $DHSTATE file created/updated by `LaunchInstances.sh`.
|
||||
# * The $POOLTOKEN env. var. is defined
|
||||
# * The local ssh-agent is able to supply the appropriate private key.
|
||||
|
||||
# shellcheck source-path=SCRIPTDIR
|
||||
source $(dirname ${BASH_SOURCE[0]})/pw_lib.sh
|
||||
|
||||
# Update temporary-dir status file for instance $name
|
||||
# status type $1 and value $2. Where status type is
|
||||
# 'setup', 'listener', 'tasks', 'taskf' or 'comment'.
|
||||
set_pw_status() {
|
||||
[[ -n "$name" ]] || \
|
||||
die "Expecting \$name to be set"
|
||||
case $1 in
|
||||
setup) ;;
|
||||
listener) ;;
|
||||
tasks) ;; # started
|
||||
taskf) ;; # finished
|
||||
ftasks) ;;
|
||||
comment) ;;
|
||||
*) die "Status type must be 'setup', 'listener', 'tasks', 'taskf' or 'comment'"
|
||||
esac
|
||||
if [[ "$1" != "comment" ]] && [[ -z "$2" ]]; then
|
||||
die "Expecting comment text (status argument) to be non-empty."
|
||||
fi
|
||||
echo -n "$2" > $TEMPDIR/${name}.$1
|
||||
}
|
||||
|
||||
# Wrapper around msg() and warn() which also set_pw_status() comment.
|
||||
pwst_msg() { set_pw_status comment "$1"; msg "$1"; }
|
||||
pwst_warn() { set_pw_status comment "$1"; warn "$1"; }
|
||||
|
||||
# Attempt to signal $SPOOL_SCRIPT to stop picking up new CI tasks but
|
||||
# support PWPoolReady being reset to 'true' in the future to signal
|
||||
# a new $SETUP_SCRIPT run. Cancel future $SHDWN_SCRIPT action.
|
||||
# Requires both $pub_dns and $name are set
|
||||
stop_listener(){
|
||||
dbg "Attempting to stop pool listener and reset setup state"
|
||||
$SSH ec2-user@$pub_dns rm -f \
|
||||
"/private/tmp/${name}_cfg_*" \
|
||||
"./.setup.done" \
|
||||
"./.setup.started" \
|
||||
"/var/tmp/shutdown.sh"
|
||||
}
|
||||
|
||||
# Forcibly shutdown an instance immediately, printing warning and status
|
||||
# comment from first argument. Requires $name, $instance_id, and $pub_dns
|
||||
# to be set.
|
||||
force_term(){
|
||||
local varname
|
||||
local termoutput
|
||||
termoutput="$TEMPDIR/${name}_term.output"
|
||||
local term_msg
|
||||
term_msg="${1:-no inst_panic() message provided} Terminating immediately! $(ctx)"
|
||||
|
||||
for varname in name instance_id pub_dns; do
|
||||
[[ -n "${!varname}" ]] || \
|
||||
die "Expecting \$$varname to be set/non-empty."
|
||||
done
|
||||
|
||||
# $SSH has built-in -n; ignore failure, inst may be in broken state already
|
||||
echo "$term_msg" | ssh $SSH_ARGS ec2-user@$pub_dns sudo wall || true
|
||||
# Set status and print warning message
|
||||
pwst_warn "$term_msg"
|
||||
|
||||
# Instance is going to be terminated, immediately stop any attempts to
|
||||
# restart listening for jobs. Ignore failure if unreachable for any reason -
|
||||
# we/something else could have already started termination previously
|
||||
stop_listener || true
|
||||
|
||||
# Termination can take a few minutes, block further use of instance immediately.
|
||||
$AWS ec2 create-tags --resources $instance_id --tags "Key=PWPoolReady,Value=false" || true
|
||||
|
||||
# Prefer possibly recovering a broken pool over debug-ability.
|
||||
if ! $AWS ec2 terminate-instances --instance-ids $instance_id &> "$termoutput"; then
|
||||
# Possible if the instance recently/previously started termination process.
|
||||
warn "Could not terminate instance $instance_id $(ctx 0):
|
||||
$(<$termoutput)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Set non-zero to enable debugging / prevent removal of temp. dir.
|
||||
S_DEBUG="${S_DEBUG:0}"
|
||||
if ((S_DEBUG)); then
|
||||
X_DEBUG=1
|
||||
warn "Debugging enabled - temp. dir will not be cleaned up '$TEMPDIR' $(ctx 0)."
|
||||
trap EXIT
|
||||
fi
|
||||
|
||||
[[ -n "$POOLTOKEN" ]] || \
|
||||
die "Expecting \$POOLTOKEN to be defined/non-empty $(ctx 0)."
|
||||
|
||||
[[ -r "$DHSTATE" ]] || \
|
||||
die "Can't read from state file: $DHSTATE"
|
||||
|
||||
if [[ -z "$SSH_AUTH_SOCK" ]] || [[ -z "$SSH_AGENT_PID" ]]; then
|
||||
die "Cannot access an ssh-agent. Please run 'ssh-agent -s > /run/user/$UID/ssh-agent.env' and 'ssh-add /path/to/required/key'."
|
||||
fi
|
||||
|
||||
declare -a _dhstate
|
||||
readarray -t _dhstate <<<$(grep -E -v '^($|#+| +)' "$DHSTATE" | sort)
|
||||
n_inst=0
|
||||
n_inst_total="${#_dhstate[@]}"
|
||||
if [[ -z "${_dhstate[*]}" ]] || ! ((n_inst_total)); then
|
||||
msg "No operable hosts found in $DHSTATE:
|
||||
$(<$DHSTATE)"
|
||||
# Assume this script is running in a loop, and unf. there are
|
||||
# simply no dedicated-hosts in 'available' state.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# N/B: Assumes $DHSTATE represents reality
|
||||
msg "Operating on $n_inst_total instances from $(head -1 $DHSTATE)"
|
||||
echo -e "# $(basename ${BASH_SOURCE[0]}) run $(date -u -Iseconds)\n#" > "$TEMPDIR/$(basename $PWSTATE)"
|
||||
|
||||
# Previous instance state needed for some optional checks
|
||||
declare -a _pwstate
|
||||
n_pw_total=0
|
||||
if [[ -r "$PWSTATE" ]]; then
|
||||
readarray -t _pwstate <<<$(grep -E -v '^($|#+| +)' "$PWSTATE" | sort)
|
||||
n_pw_total="${#_pwstate[@]}"
|
||||
# Handle single empty-item array
|
||||
if [[ -z "${_pwstate[*]}" ]] || ! ((n_pw_total)); then
|
||||
_pwstate=()
|
||||
_n_pw_total=0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Assuming the `--force` option was used to initialize a new pool of
|
||||
# workers, then instances need to be configured with a staggered
|
||||
# self-termination shutdown delay. This prevents all the instances
|
||||
# from being terminated at the same time, potentially impacting
|
||||
# CI usage.
|
||||
runtime_hours_reduction=0
|
||||
# shellcheck disable=SC2199
|
||||
if [[ "$@" =~ --force ]]; then
|
||||
warn "Forcing instance creation w/ staggered existence limits."
|
||||
runtime_hours_reduction=$CREATE_STAGGER_HOURS
|
||||
fi
|
||||
|
||||
for _dhentry in "${_dhstate[@]}"; do
|
||||
read -r name instance_id launch_time junk<<<"$_dhentry"
|
||||
_I=" "
|
||||
msg " "
|
||||
n_inst=$(($n_inst+1))
|
||||
msg "Working on Instance #$n_inst/$n_inst_total '$name' with ID '$instance_id'."
|
||||
|
||||
# Clear buffers used for updating status files
|
||||
n_started_tasks=0
|
||||
n_finished_tasks=0
|
||||
|
||||
instoutput="$TEMPDIR/${name}_inst.output"
|
||||
ncoutput="$TEMPDIR/${name}_nc.output"
|
||||
logoutput="$TEMPDIR/${name}_log.output"
|
||||
|
||||
# Most operations below 'continue' looping on error. Ensure status files match.
|
||||
set_pw_status tasks 0
|
||||
set_pw_status taskf 0
|
||||
set_pw_status setup error
|
||||
set_pw_status listener error
|
||||
set_pw_status comment ""
|
||||
|
||||
if ! $AWS ec2 describe-instances --instance-ids $instance_id &> "$instoutput"; then
|
||||
pwst_warn "Could not query instance $instance_id $(ctx 0)."
|
||||
continue
|
||||
fi
|
||||
|
||||
dbg "Verifying required $DH_REQ_TAG=$DH_REQ_VAL"
|
||||
tagq=".Reservations?[0]?.Instances?[0]?.Tags | map(select(.Key == \"$DH_REQ_TAG\")) | .[].Value"
|
||||
if ! inst_tag=$(json_query "$tagq" "$instoutput"); then
|
||||
pwst_warn "Could not look up instance $DH_REQ_TAG tag"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$inst_tag" != "$DH_REQ_VAL" ]]; then
|
||||
pwst_warn "Required inst. '$DH_REQ_TAG' tag != '$DH_REQ_VAL'"
|
||||
continue
|
||||
fi
|
||||
|
||||
dbg "Looking up instance name"
|
||||
nameq='.Reservations?[0]?.Instances?[0]?.Tags | map(select(.Key == "Name")) | .[].Value'
|
||||
if ! inst_name=$(json_query "$nameq" "$instoutput"); then
|
||||
pwst_warn "Could not look up instance Name tag"
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ "$inst_name" != "$name" ]]; then
|
||||
pwst_warn "Inst. name '$inst_name' != DH name '$name'"
|
||||
continue
|
||||
fi
|
||||
|
||||
dbg "Looking up public DNS"
|
||||
if ! pub_dns=$(json_query '.Reservations?[0]?.Instances?[0]?.PublicDnsName?' "$instoutput"); then
|
||||
pwst_warn "Could not lookup of public DNS for instance $instance_id $(ctx 0)"
|
||||
continue
|
||||
fi
|
||||
|
||||
# It's really important that instances have a defined and risk-relative
|
||||
# short lifespan. Multiple mechanisms are in place to assist, but none
|
||||
# are perfect. Ensure instances running for an excessive time are forcefully
|
||||
# terminated as soon as possible from this script.
|
||||
launch_epoch=$(date -u -d "$launch_time" +%s)
|
||||
now_epoch=$(date -u +%s)
|
||||
age_sec=$((now_epoch-launch_epoch))
|
||||
hard_max_sec=$((PW_MAX_HOURS*60*60*2)) # double PW_MAX_HOURS
|
||||
dbg "launch_epoch=$launch_epoch"
|
||||
dbg " now_epoch=$now_epoch"
|
||||
dbg " age_sec=$age_sec"
|
||||
dbg "hard_max_sec=$hard_max_sec"
|
||||
# Soft time limit is enforced via 'sleep $PW_MAX_HOURS && shutdown' started during instance setup (below).
|
||||
msg "Instance alive for $((age_sec/60/60)) hours (soft max: $PW_MAX_HOURS hard: $((hard_max_sec/60/60)))"
|
||||
if [[ $age_sec -gt $hard_max_sec ]]; then
|
||||
force_term "Excess instance lifetime; $(((age_sec - hard_max_sec)/60))m past hard max limit."
|
||||
continue
|
||||
elif [[ $age_sec -gt $((PW_MAX_HOURS*60*60)) ]]; then
|
||||
pwst_warn "Instance alive longer than soft max. Investigation recommended."
|
||||
fi
|
||||
|
||||
dbg "Attempting to contact '$name' at $pub_dns"
|
||||
if ! nc -z -w 13 $pub_dns 22 &> "$ncoutput"; then
|
||||
pwst_warn "Could not connect to port 22 on '$pub_dns' $(ctx 0)."
|
||||
continue
|
||||
fi
|
||||
|
||||
if ! $SSH ec2-user@$pub_dns true; then
|
||||
pwst_warn "Could not ssh to 'ec2-user@$pub_dns' $(ctx 0)."
|
||||
continue
|
||||
fi
|
||||
|
||||
dbg "Check if instance should be managed"
|
||||
if ! PWPoolReady=$(json_query '.Reservations?[0]?.Instances?[0]?.Tags? | map(select(.Key == "PWPoolReady")) | .[].Value' "$instoutput"); then
|
||||
pwst_warn "Instance does not have a PWPoolReady tag"
|
||||
PWPoolReady="absent"
|
||||
fi
|
||||
|
||||
# Mechanism for a developer to manually debug operations w/o fear of new tasks or instance shutdown.
|
||||
if [[ "$PWPoolReady" != "true" ]]; then
|
||||
pwst_msg "Instance disabled via tag 'PWPoolReady' == '$PWPoolReady'."
|
||||
set_pw_status setup disabled
|
||||
set_pw_status listener disabled
|
||||
(
|
||||
set +e # All commands below are best-effort only!
|
||||
dbg "Attempting to stop any pending shutdowns"
|
||||
$SSH ec2-user@$pub_dns sudo pkill shutdown
|
||||
|
||||
stop_listener
|
||||
|
||||
dbg "Attempting to stop shutdown sleep "
|
||||
$SSH ec2-user@$pub_dns pkill -u ec2-user -f "'bash -c sleep'"
|
||||
|
||||
if $SSH ec2-user@$pub_dns pgrep -u ec2-user -f service_pool.sh; then
|
||||
sleep 10s # Allow service_pool to exit gracefully
|
||||
fi
|
||||
|
||||
# N/B: This will not stop any currently running CI tasks.
|
||||
dbg "Guarantee pool listener is dead"
|
||||
$SSH ec2-user@$pub_dns sudo pkill -u ${name}-worker -f "'cirrus worker run'"
|
||||
)
|
||||
continue
|
||||
fi
|
||||
|
||||
if ! $SSH ec2-user@$pub_dns test -r .setup.done; then
|
||||
|
||||
if ! $SSH ec2-user@$pub_dns test -r .setup.started; then
|
||||
if $SSH ec2-user@$pub_dns test -r setup.log; then
|
||||
# Can be caused by operator flipping PWPoolReady value on instance for debugging
|
||||
pwst_warn "Setup log found, prior executions may have failed $(ctx 0)."
|
||||
fi
|
||||
|
||||
pwst_msg "Setting up new instance"
|
||||
|
||||
# Ensure bash used for consistency && some ssh commands below
|
||||
# don't play nicely with zsh.
|
||||
$SSH ec2-user@$pub_dns sudo chsh -s /bin/bash ec2-user &> /dev/null
|
||||
|
||||
if ! $SCP $SETUP_SCRIPT $SPOOL_SCRIPT $SHDWN_SCRIPT ec2-user@$pub_dns:/var/tmp/; then
|
||||
pwst_warn "Could not scp scripts to instance $(ctx 0)."
|
||||
continue # try again next loop
|
||||
fi
|
||||
|
||||
if ! $SCP $CIENV_SCRIPT ec2-user@$pub_dns:./; then
|
||||
pwst_warn "Could not scp CI Env. script to instance $(ctx 0)."
|
||||
continue # try again next loop
|
||||
fi
|
||||
|
||||
if ! $SSH ec2-user@$pub_dns chmod +x "/var/tmp/*.sh" "./ci_env.sh"; then
|
||||
pwst_warn "Could not chmod scripts $(ctx 0)."
|
||||
continue # try again next loop
|
||||
fi
|
||||
|
||||
# Keep runtime_hours_reduction w/in sensible, positive bounds.
|
||||
if [[ $runtime_hours_reduction -ge $((PW_MAX_HOURS - CREATE_STAGGER_HOURS)) ]]; then
|
||||
runtime_hours_reduction=$CREATE_STAGGER_HOURS
|
||||
fi
|
||||
|
||||
shutdown_seconds=$((60*60*PW_MAX_HOURS - 60*60*runtime_hours_reduction))
|
||||
[[ $shutdown_seconds -gt $((60*60*CREATE_STAGGER_HOURS)) ]] || \
|
||||
die "Detected unacceptably short \$shutdown_seconds ($shutdown_seconds) value."
|
||||
pwst_msg "Starting automatic instance recycling in $((shutdown_seconds/60/60)) hours"
|
||||
# Darwin is really weird WRT active terminals and the shutdown
|
||||
# command. Instead of installing a future shutdown, stick an
|
||||
# immediate shutdown at the end of a long sleep. This is the
|
||||
# simplest workaround I could find :S
|
||||
# Darwin sleep only accepts seconds.
|
||||
if ! $SSH ec2-user@$pub_dns bash -c \
|
||||
"'sleep $shutdown_seconds && /var/tmp/shutdown.sh' </dev/null >>setup.log 2>&1 &"; then
|
||||
pwst_warn "Could not start automatic instance recycling."
|
||||
continue # try again next loop
|
||||
fi
|
||||
|
||||
pwst_msg "Executing setup script."
|
||||
# Run setup script in background b/c it takes ~10m to complete.
|
||||
# N/B: This drops .setup.started and eventually (hopefully) .setup.done
|
||||
if ! $SSH ec2-user@$pub_dns \
|
||||
env POOLTOKEN=$POOLTOKEN \
|
||||
bash -c "'/var/tmp/setup.sh $DH_REQ_TAG:\ $DH_REQ_VAL' </dev/null >>setup.log 2>&1 &"; then
|
||||
# This is critical, no easy way to determine what broke.
|
||||
force_term "Failed to start background setup script"
|
||||
continue
|
||||
fi
|
||||
|
||||
msg "Setup script started."
|
||||
set_pw_status setup started
|
||||
|
||||
# No sense in incrementing if there was a failure running setup
|
||||
# shellcheck disable=SC2199
|
||||
if [[ "$@" =~ --force ]]; then
|
||||
runtime_hours_reduction=$((runtime_hours_reduction + CREATE_STAGGER_HOURS))
|
||||
fi
|
||||
|
||||
# Let setup run in the background
|
||||
continue
|
||||
fi
|
||||
|
||||
# Setup started in previous loop. Set to epoch on error.
|
||||
since_timestamp=$($SSH ec2-user@$pub_dns tail -1 .setup.started || echo "@0")
|
||||
since_epoch=$(date -u -d "$since_timestamp" +%s)
|
||||
running_seconds=$((now_epoch-since_epoch))
|
||||
# Be helpful to human monitors, show the last few lines from the log to help
|
||||
# track progress and/or any errors/warnings.
|
||||
pwst_msg "Setup incomplete; Running for $((running_seconds/60)) minutes (~10 typical)"
|
||||
msg "setup.log tail: $($SSH ec2-user@$pub_dns tail -n 1 setup.log)"
|
||||
if [[ $running_seconds -gt $SETUP_MAX_SECONDS ]]; then
|
||||
force_term "Setup running for ${running_seconds}s, max ${SETUP_MAX_SECONDS}s."
|
||||
fi
|
||||
continue
|
||||
fi
|
||||
|
||||
dbg "Instance setup has completed"
|
||||
set_pw_status setup complete
|
||||
|
||||
# Spawned by setup.sh
|
||||
dbg "Checking service_pool.sh script"
|
||||
if ! $SSH ec2-user@$pub_dns pgrep -u ec2-user -q -f service_pool.sh; then
|
||||
# This should not happen at this stage; Nefarious or uncontrolled activity?
|
||||
force_term "Pool servicing script (service_pool.sh) is not running."
|
||||
continue
|
||||
fi
|
||||
|
||||
dbg "Checking cirrus listener"
|
||||
state_fault=0
|
||||
if ! $SSH ec2-user@$pub_dns pgrep -u "${name}-worker" -q -f "'cirrus worker run'"; then
|
||||
# Don't try to examine prior state if there was none.
|
||||
if ((n_pw_total)); then
|
||||
for _pwentry in "${_pwstate[@]}"; do
|
||||
read -r _name _setup_state _listener_state _tasks _taskf _junk <<<"$_pwentry"
|
||||
dbg "Examining pw_state.txt entry '$_name' with listener state '$_listener_state'"
|
||||
if [[ "$_name" == "$name" ]] && [[ "$_listener_state" != "alive" ]]; then
|
||||
# service_pool.sh did not restart listener since last loop
|
||||
# and node is not in maintenance mode (PWPoolReady == 'true')
|
||||
force_term "Pool listener '$_listener_state' state fault."
|
||||
state_fault=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# The instance is in the process of shutting-down/terminating, move on to next instance.
|
||||
if ((state_fault)); then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Previous state didn't exist, or listener status was 'alive'.
|
||||
# Process may have simply crashed, allow service_pool.sh time to restart it.
|
||||
pwst_warn "Cirrus worker listener process NOT running, will recheck again $(ctx 0)."
|
||||
# service_pool.sh should catch this and restart the listener. If not, the next time
|
||||
# through this loop will force_term() the instance.
|
||||
set_pw_status listener dead # service_pool.sh should restart listener
|
||||
continue
|
||||
else
|
||||
set_pw_status listener alive
|
||||
fi
|
||||
|
||||
dbg "Checking worker log"
|
||||
logpath="/private/tmp/${name}-worker.log" # set in setup.sh
|
||||
if ! $SSH ec2-user@$pub_dns cat "'$logpath'" &> "$logoutput"; then
|
||||
# The "${name}-worker" user has write access to this log
|
||||
force_term "Missing worker log $logpath."
|
||||
continue
|
||||
fi
|
||||
|
||||
dbg "Checking worker registration"
|
||||
# First lines of log should always match this
|
||||
if ! head -10 "$logoutput" | grep -q 'worker successfully registered'; then
|
||||
# This could signal log manipulation by worker user, or it could be harmless.
|
||||
pwst_warn "Missing registration log entry"
|
||||
fi
|
||||
|
||||
# The CI user has write-access to this log file on the instance,
|
||||
# make this known to humans in case they care.
|
||||
n_started_tasks=$(grep -Ei 'started task [0-9]+' "$logoutput" | wc -l) || true
|
||||
n_finished_tasks=$(grep -Ei 'task [0-9]+ completed' "$logoutput" | wc -l) || true
|
||||
set_pw_status tasks $n_started_tasks
|
||||
set_pw_status taskf $n_finished_tasks
|
||||
|
||||
msg "Apparent tasks started/finished/running: $n_started_tasks $n_finished_tasks $((n_started_tasks-n_finished_tasks)) (max $PW_MAX_TASKS)"
|
||||
|
||||
dbg "Checking apparent task limit"
|
||||
# N/B: This is only enforced based on the _previous_ run of this script worker-count.
|
||||
# Doing this on the _current_ alive worker count would add a lot of complexity.
|
||||
if [[ "$n_finished_tasks" -gt $PW_MAX_TASKS ]] && [[ $n_pw_total -gt $PW_MIN_ALIVE ]]; then
|
||||
# N/B: Termination based on _finished_ tasks, so if a task happens to be currently running
|
||||
# it will very likely have _just_ started in the last few seconds. Cirrus will retry
|
||||
# automatically on another worker.
|
||||
force_term "Instance exceeded $PW_MAX_TASKS apparent tasks."
|
||||
elif [[ $n_pw_total -le $PW_MIN_ALIVE ]]; then
|
||||
pwst_warn "Not enforcing max-tasks limit, only $n_pw_total workers online last run."
|
||||
fi
|
||||
done
|
||||
|
||||
_I=""
|
||||
msg " "
|
||||
msg "Processing all persistent worker states."
|
||||
for _dhentry in "${_dhstate[@]}"; do
|
||||
read -r name otherstuff<<<"$_dhentry"
|
||||
_f1=$name
|
||||
_f2=$(<$TEMPDIR/${name}.setup)
|
||||
_f3=$(<$TEMPDIR/${name}.listener)
|
||||
_f4=$(<$TEMPDIR/${name}.tasks)
|
||||
_f5=$(<$TEMPDIR/${name}.taskf)
|
||||
_f6=$(<$TEMPDIR/${name}.comment)
|
||||
[[ -z "$_f6" ]] || _f6=" # $_f6"
|
||||
|
||||
printf '%s %s %s %s %s%s\n' \
|
||||
"$_f1" "$_f2" "$_f3" "$_f4" "$_f5" "$_f6" >> "$TEMPDIR/$(basename $PWSTATE)"
|
||||
done
|
||||
|
||||
dbg "Creating/updating state file"
|
||||
if [[ -r "$PWSTATE" ]]; then
|
||||
cp "$PWSTATE" "${PWSTATE}~"
|
||||
fi
|
||||
mv "$TEMPDIR/$(basename $PWSTATE)" "$PWSTATE"
|
|
@ -0,0 +1,32 @@
|
|||
|
||||
# Intended to be run like: `gnuplot -p -c Utilization.gnuplot`
|
||||
# Requires a file named `utilization.csv` produced by commands
|
||||
# in `Cron.sh`.
|
||||
#
|
||||
# Format Ref: http://gnuplot.info/docs_5.5/Overview.html
|
||||
|
||||
set terminal png enhanced rounded size 1400,800 nocrop
|
||||
set output 'html/utilization.png'
|
||||
|
||||
set title "Persistent Workers & Utilization"
|
||||
|
||||
set xdata time
|
||||
set timefmt "%Y-%m-%dT%H:%M:%S+00:00"
|
||||
set xtics nomirror rotate timedate
|
||||
set xlabel "time/date"
|
||||
set xrange [(system("date -u -Iseconds -d '26 hours ago'")):(system("date -u -Iseconds"))]
|
||||
|
||||
set ylabel "Workers Online"
|
||||
set ytics border nomirror numeric
|
||||
# Not practical to lookup $DH_PFX from pw_lib.sh
|
||||
set yrange [0:(system("grep -E '^[a-zA-Z0-9]+-[0-9]' dh_status.txt | wc -l") * 1.5)]
|
||||
|
||||
set y2label "Worker Utilization"
|
||||
set y2tics border nomirror numeric
|
||||
set y2range [0:100]
|
||||
|
||||
set datafile separator comma
|
||||
set grid
|
||||
|
||||
plot 'utilization.csv' using 1:2 axis x1y1 title "Workers" pt 7 ps 2, \
|
||||
'' using 1:((($3-$4)/$2)*100) axis x1y2 title "Utilization" with lines lw 2
|
|
@ -0,0 +1,50 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script drops the caller into a bash shell inside an environment
|
||||
# substantially similar to a Cirrus-CI task running on this host.
|
||||
# The envars below may require adjustment to better fit them to
|
||||
# current/ongoing development in podman's .cirrus.yml
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
# Not running as the pool worker user
|
||||
if [[ "$USER" == "ec2-user" ]]; then
|
||||
PWINST=$(curl -sSLf http://instance-data/latest/meta-data/tags/instance/Name)
|
||||
PWUSER=$PWINST-worker
|
||||
|
||||
if [[ ! -d "/Users/$PWUSER" ]]; then
|
||||
echo "Warnin: Instance hasn't been setup. Assuming caller will tend to this."
|
||||
sudo sysadminctl -addUser $PWUSER
|
||||
fi
|
||||
|
||||
sudo install -o $PWUSER "${BASH_SOURCE[0]}" "/Users/$PWUSER/"
|
||||
exec sudo su -c "/Users/$PWUSER/$(basename ${BASH_SOURCE[0]})" - $PWUSER
|
||||
fi
|
||||
|
||||
# Export all CI-critical envars defined below
|
||||
set -a
|
||||
|
||||
CIRRUS_SHELL="/bin/bash"
|
||||
CIRRUS_TASK_ID="0123456789"
|
||||
CIRRUS_WORKING_DIR="$HOME/ci/task-${CIRRUS_TASK_ID}"
|
||||
|
||||
GOPATH="$CIRRUS_WORKING_DIR/.go"
|
||||
GOCACHE="$CIRRUS_WORKING_DIR/.go/cache"
|
||||
GOENV="$CIRRUS_WORKING_DIR/.go/support"
|
||||
|
||||
CONTAINERS_MACHINE_PROVIDER="applehv"
|
||||
|
||||
MACHINE_IMAGE="https://fedorapeople.org/groups/podman/testing/applehv/arm64/fedora-coreos-38.20230925.dev.0-applehv.aarch64.raw.gz"
|
||||
|
||||
GINKGO_TAGS="remote exclude_graphdriver_btrfs btrfs_noversion exclude_graphdriver_devicemapper containers_image_openpgp remote"
|
||||
|
||||
DEBUG_MACHINE="1"
|
||||
|
||||
ORIGINAL_HOME="$HOME"
|
||||
HOME="$HOME/ci"
|
||||
TMPDIR="/private/tmp/ci"
|
||||
mkdir -p "$TMPDIR" "$CIRRUS_WORKING_DIR"
|
||||
|
||||
# Drop caller into the CI-like environment
|
||||
cd "$CIRRUS_WORKING_DIR"
|
||||
bash -il
|
|
@ -0,0 +1,20 @@
|
|||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<title>Cirrus-CI Persistent Workers</title>
|
||||
</head>
|
||||
<body>
|
||||
<center>
|
||||
<a href="https://cirrus-ci.com/pool/1cf8c7f7d7db0b56aecd89759721d2e710778c523a8c91c7c3aaee5b15b48d05">
|
||||
<img src="utilization.png">
|
||||
</a>
|
||||
<p>
|
||||
<h3>
|
||||
<a href="https://docs.google.com/document/d/1PX6UyqDDq8S72Ko9qe_K3zoV2XZNRQjGxPiWEkFmQQ4/edit">
|
||||
Documentation
|
||||
</a>
|
||||
</h3>
|
||||
</center>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,69 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
cd $(dirname "${BASH_SOURCE[0]}")
|
||||
|
||||
SCRIPTNAME="$(basename ${BASH_SOURCE[0]})"
|
||||
WEB_IMG="docker.io/library/nginx:latest"
|
||||
CRONLOG="Cron.log"
|
||||
CRONSCRIPT="Cron.sh"
|
||||
KEEP_LINES=10000
|
||||
REFRESH_REPO_EVERY=7 # days
|
||||
|
||||
# Do not use, these are needed to control script execution.
|
||||
_CNTNAME=pw_pool_web
|
||||
_FLOCKER="${_FLOCKER:-notlocked}"
|
||||
_RESTARTED_SCRIPT="${_RESTARTED_SCRIPT:-0}"
|
||||
|
||||
if [[ ! -r "$CRONLOG" ]] || [[ ! -r "$CRONSCRIPT" ]] || [[ ! -d "../.git" ]]; then
|
||||
echo "ERROR: $SCRIPTNAME not executing from correct directory" >> /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
relaunch_web_container() {
|
||||
# Assume code change or image update, restart container.
|
||||
(
|
||||
# Prevent podman and/or sub-processes from inheriting the lock FD.
|
||||
# This would deadlock all future runs of this script or Cron.sh
|
||||
# Can't use `flock --close ...` here because it "hangs" in this context.
|
||||
for fd_nr in $(/bin/ls /proc/self/fd/); do
|
||||
[[ $fd_nr -ge 3 ]] || \
|
||||
continue
|
||||
# Bash doesn't allow direct substitution of the FD number
|
||||
eval "exec $fd_nr>&-"
|
||||
done
|
||||
|
||||
set -x
|
||||
|
||||
podman run --replace --name "$_CNTNAME" -d --rm --pull=newer -p 8080:80 \
|
||||
-v $HOME/devel/automation/mac_pw_pool/html:/usr/share/nginx/html:ro,Z \
|
||||
$WEB_IMG
|
||||
)
|
||||
echo "$SCRIPTNAME restarted pw_poolweb container"
|
||||
}
|
||||
|
||||
# Don't perform maintenance while $CRONSCRIPT is running
|
||||
[[ "${_FLOCKER}" != "$CRONSCRIPT" ]] && exec env _FLOCKER="$CRONSCRIPT" flock -e -w 300 "$CRONSCRIPT" "$0" "$@" || :
|
||||
echo "$SCRIPTNAME running at $(date -u -Iseconds)"
|
||||
|
||||
if ! ((_RESTARTED_SCRIPT)); then
|
||||
today=$(date -u +%d)
|
||||
if ((today%REFRESH_REPO_EVERY)); then
|
||||
git remote update && git reset --hard origin/main
|
||||
# maintain the same flock
|
||||
echo "$SCRIPTNAME updatedd code after $REFRESH_REPO_EVERY days, restarting script..."
|
||||
env _RESTARTED_SCRIPT=1 _FLOCKER=$_FLOCKER "$0" "$@"
|
||||
exit $? # all done
|
||||
fi
|
||||
fi
|
||||
|
||||
tail -n $KEEP_LINES $CRONLOG > ${CRONLOG}.tmp && mv ${CRONLOG}.tmp $CRONLOG
|
||||
echo "$SCRIPTNAME rotated log"
|
||||
|
||||
# Always restart web-container when code changes, otherwise only if required
|
||||
if ((_RESTARTED_SCRIPT)); then
|
||||
relaunch_web_container
|
||||
else
|
||||
podman container exists "$_CNTNAME" || relaunch_web_container
|
||||
fi
|
|
@ -0,0 +1,126 @@
|
|||
|
||||
# This library is intended to be sourced by other scripts inside this
|
||||
# directory. All other usage contexts may lead to unintended outcomes.
|
||||
# only the IDs differ. Assumes the sourcing script defines a `dbg()`
|
||||
# function.
|
||||
|
||||
SCRIPT_FILENAME=$(basename "$0") # N/B: Caller's arg0, not this library file path.
|
||||
SCRIPT_DIRPATH=$(dirname "$0")
|
||||
LIB_DIRPATH=$(dirname "${BASH_SOURCE[0]}")
|
||||
REPO_DIRPATH=$(realpath "$LIB_DIRPATH/../")
|
||||
TEMPDIR=$(mktemp -d -p '' "${SCRIPT_FILENAME}_XXXXX.tmp")
|
||||
trap "rm -rf '$TEMPDIR'" EXIT
|
||||
|
||||
# Dedicated host name prefix; Actual name will have a "-<X>" (number) appended.
|
||||
# N/B: ${DH_PFX}-<X> _MUST_ match dedicated host names as listed in dh_status.txt
|
||||
# using the regex ^[a-zA-Z0-9]+-[0-9] (see Utilization.gnuplot)
|
||||
DH_PFX="MacM1"
|
||||
|
||||
# Only manage dedicated hosts with the following tag & value
|
||||
DH_REQ_TAG="purpose"
|
||||
DH_REQ_VAL="prod"
|
||||
|
||||
# Path to file recording the most recent state of each dedicated host.
|
||||
# Format is simply one line per dedicated host, with it's name, instance id, start
|
||||
# date/time separated by a space. Exceptional conditions are recorded as comments
|
||||
# with the name and details. File is refreshed/overwritten each time script runs
|
||||
# without any fatal/uncaught command-errors. Intended for reference by humans
|
||||
# and/or other tooling.
|
||||
DHSTATE="${PWSTATE:-$LIB_DIRPATH/dh_status.txt}"
|
||||
|
||||
# Similar to $DHSTATE but records the status of each instance. Format is
|
||||
# instance name, setup status, listener status, # started tasks, # finished tasks,
|
||||
# or the word 'error' indicating a fault accessing the remote worker logfile.
|
||||
# Optionally, there may be a final comment field, beginning with a # and text
|
||||
# suggesting where there may be a fault.
|
||||
# Possible status field values are as follows:
|
||||
# setup - started, complete, disabled, error
|
||||
# listener - alive, dead, disabled, error
|
||||
PWSTATE="${PWSTATE:-$LIB_DIRPATH/pw_status.txt}"
|
||||
|
||||
# At maximum possible creation-speed, there's aprox. 2-hours of time between
|
||||
# an instance going down, until another can be up and running again. Since
|
||||
# instances are all on shutdown/terminated on pre-set timers, it would hurt
|
||||
# pool availability if multiple instances all went down at the same time.
|
||||
# Therefore, host and instance creations will be staggered by according
|
||||
# to this interval.
|
||||
CREATE_STAGGER_HOURS=2
|
||||
|
||||
# Instance shutdown controls (assumes terminate-on-shutdown behavior)
|
||||
PW_MAX_HOURS=24 # Since successful configuration
|
||||
PW_MAX_TASKS=24 # Logged by listener (N/B: Log can be manipulated by tasks!)
|
||||
PW_MIN_ALIVE=3 # Bypass enforcement of $PW_MAX_TASKS if <= alive/operating workers
|
||||
|
||||
# How long to wait for setup.sh to finish running (drop a .setup.done file)
|
||||
# before forcibly terminating.
|
||||
SETUP_MAX_SECONDS=2400 # Typical time ~10 minutes, use 2x safety-factor.
|
||||
|
||||
# Name of launch template. Current/default version will be used.
|
||||
# https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#LaunchTemplates:
|
||||
TEMPLATE_NAME="${TEMPLATE_NAME:-Cirrus${DH_PFX}PWinstance}"
|
||||
|
||||
# Path to scripts to copy/execute on Darwin instances
|
||||
SETUP_SCRIPT="$LIB_DIRPATH/setup.sh"
|
||||
SPOOL_SCRIPT="$LIB_DIRPATH/service_pool.sh"
|
||||
SHDWN_SCRIPT="$LIB_DIRPATH/shutdown.sh"
|
||||
CIENV_SCRIPT="$LIB_DIRPATH/ci_env.sh"
|
||||
|
||||
# Set to 1 to enable debugging
|
||||
X_DEBUG="${X_DEBUG:-0}"
|
||||
|
||||
# AWS CLI command and general args
|
||||
AWS="aws --no-paginate --output=json --color=off --no-cli-pager --no-cli-auto-prompt"
|
||||
|
||||
# Common ssh/scp arguments
|
||||
SSH_ARGS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no -F /dev/null -o LogLevel=ERROR -o ConnectTimeout=13"
|
||||
# ssh/scp commands to run w/ arguments
|
||||
SSH="${SSH:-ssh -n $SSH_ARGS}" # N/B: default nulls stdin
|
||||
SCP="${SCP:-scp -q $SSH_ARGS}"
|
||||
|
||||
# Indentation to prefix msg/warn/die messages with to assist humans understanding context.
|
||||
_I="${_I:-}"
|
||||
|
||||
# Print details $1 (defaults to 1) calls above the caller in the stack.
|
||||
# usage e.x. $(ctx 0) - print details about current function
|
||||
# $(ctx) - print details about current function's caller
|
||||
# $(ctx 2) - print details about current functions's caller's caller.
|
||||
ctx() {
|
||||
local above level
|
||||
above=${1:-1}
|
||||
level=$((1+$above))
|
||||
script=$(basename ${BASH_SOURCE[$level]})
|
||||
echo "($script:${FUNCNAME[$level]}():${BASH_LINENO[$above]})"
|
||||
}
|
||||
|
||||
msg() { echo "${_I}${1:-No text message provided}"; }
|
||||
warn() { echo "${1:-No warning message provided}" | awk -e '{print "'"${_I}"'WARNING: "$0}' >> /dev/stderr; }
|
||||
die() { echo "${1:-No error message provided}" | awk -e '{print "'"${_I}"'ERROR: "$0}' >> /dev/stderr; exit 1; }
|
||||
dbg() {
|
||||
if ((X_DEBUG)); then
|
||||
msg "${1:-No debug message provided} $(ctx 1)" | awk -e '{print "'"${_I}"'DEBUG: "$0}' >> /dev/stderr
|
||||
fi
|
||||
}
|
||||
|
||||
# Obtain a JSON string value by running the provided query filter (arg 1) on
|
||||
# JSON file (arg 2). Return non-zero on jq error (1), or if value is empty
|
||||
# or null (2). Otherwise print value and return 0.
|
||||
jq_errf="$TEMPDIR/jq_error.output"
|
||||
json_query() {
|
||||
local value
|
||||
local indent=" "
|
||||
dbg "jq filter $1
|
||||
$indent on $(basename $2) $(ctx)"
|
||||
if ! value=$(jq -r "$1" "$2" 2>"$jq_errf"); then
|
||||
dbg "$indent error: $(<$jq_errf)"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -z "$value" ]] || [[ "$value" == "null" ]]; then
|
||||
dbg "$indent result: Empty or null"
|
||||
return 2
|
||||
fi
|
||||
|
||||
dbg "$indent result: '$value'"
|
||||
echo "$value"
|
||||
return 0
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Launch Cirrus-CI PW Pool listener & manager process.
|
||||
# Intended to be called once from setup.sh on M1 Macs.
|
||||
# Expects configuration filepath to be passed as the first argument.
|
||||
# Expects the number of hours until shutdown (and self-termination)
|
||||
# as the second argument.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
msg() { echo "##### ${1:-No message message provided}"; }
|
||||
die() { echo "ERROR: ${1:-No error message provided}"; exit 1; }
|
||||
|
||||
for varname in PWCFG PWUSER PWREADYURL PWREADY; do
|
||||
varval="${!varname}"
|
||||
[[ -n "$varval" ]] || \
|
||||
die "Env. var. \$$varname is unset/empty."
|
||||
done
|
||||
|
||||
[[ "$USER" == "ec2-user" ]] || \
|
||||
die "Expecting to execute as 'ec2-user'."
|
||||
|
||||
# All operations assume this CWD
|
||||
cd $HOME
|
||||
|
||||
# For whatever reason, when this script is run through ssh, the default
|
||||
# environment isn't loaded automatically.
|
||||
. /etc/profile
|
||||
|
||||
# This can be leftover under certain conditions
|
||||
# shellcheck disable=SC2154
|
||||
sudo pkill -u $PWUSER -f "cirrus worker run" || true
|
||||
|
||||
# Configuring a launchd agent to run the worker process is a major
|
||||
# PITA and seems to require rebooting the instance. Work around
|
||||
# this with a really hacky loop masquerading as a system service.
|
||||
# envar exported to us
|
||||
# shellcheck disable=SC2154
|
||||
while [[ -r $PWCFG ]] && [[ "$PWREADY" == "true" ]]; do # Remove file or change tag to shutdown this "service"
|
||||
# The $PWUSER has access to kill it's own listener, or it could crash.
|
||||
if ! pgrep -u $PWUSER -f -q "cirrus worker run"; then
|
||||
# FIXME: CI Tasks will execute as $PWUSER and ordinarily would have
|
||||
# read access to $PWCFG file containing $POOLTOKEN. While not
|
||||
# disastrous, it's desirable to not leak potentially sensitive
|
||||
# values. Work around this by keeping the file unreadable by
|
||||
# $PWUSER except for a brief period while starting up.
|
||||
sudo chmod 0644 $PWCFG
|
||||
msg "$(date -u -Iseconds) Starting PW pool listener as $PWUSER"
|
||||
# This is intended for user's setup.log
|
||||
# shellcheck disable=SC2024
|
||||
sudo su -l $PWUSER -c "/opt/homebrew/bin/cirrus worker run --file $PWCFG &" >>setup.log 2>&1 &
|
||||
sleep 10 # eek!
|
||||
sudo chmod 0600 $PWCFG
|
||||
fi
|
||||
|
||||
# This can fail on occasion for some reason
|
||||
# envar exported to us
|
||||
# shellcheck disable=SC2154
|
||||
if ! PWREADY=$(curl -sSLf $PWREADYURL); then
|
||||
PWREADY="recheck"
|
||||
fi
|
||||
|
||||
# Avoid re-launch busy-wait
|
||||
sleep 10
|
||||
|
||||
# Second-chance
|
||||
if [[ "$PWREADY" == "recheck" ]] && ! PWREADY=$(curl -sSLf $PWREADYURL); then
|
||||
msg "Failed twice to obtain PWPoolReady instance tag. Disabling listener."
|
||||
rm -f "$PWCFG"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
set +e
|
||||
|
||||
msg "Configuration file not readable; PWPoolReady tag '$PWREADY'."
|
||||
msg "Terminating $PWUSER PW pool listner process"
|
||||
# N/B: This will _not_ stop the cirrus agent (i.e. a running task)
|
||||
sudo pkill -u $PWUSER -f "cirrus worker run"
|
|
@ -0,0 +1,251 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Setup and launch Cirrus-CI PW Pool node. It must be called
|
||||
# with the env. var. `$POOLTOKEN` set. It is assumed to be
|
||||
# running on a fresh AWS EC2 mac2.metal instance as `ec2-user`
|
||||
# The instance must have both "metadata" and "Allow tags in
|
||||
# metadata" options enabled. The instance must set the
|
||||
# "terminate" option for "shutdown behavior".
|
||||
#
|
||||
# This script should be called with a single argument string,
|
||||
# of the label YAML to configure. For example "purpose: prod"
|
||||
#
|
||||
# N/B: Under special circumstances, this script (possibly with modifications)
|
||||
# can be executed more than once. All operations which modify state/config.
|
||||
# must be wrapped in conditional checks.
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
GVPROXY_RELEASE_URL="https://github.com/containers/gvisor-tap-vsock/releases/latest/download/gvproxy-darwin"
|
||||
STARTED_FILE="$HOME/.setup.started"
|
||||
COMPLETION_FILE="$HOME/.setup.done"
|
||||
|
||||
# Ref: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
|
||||
PWNAME=$(curl -sSLf http://instance-data/latest/meta-data/tags/instance/Name)
|
||||
PWREADYURL="http://instance-data/latest/meta-data/tags/instance/PWPoolReady"
|
||||
PWREADY=$(curl -sSLf $PWREADYURL)
|
||||
|
||||
PWUSER=$PWNAME-worker
|
||||
rm -f /private/tmp/*_cfg_*
|
||||
PWCFG=$(mktemp /private/tmp/${PWNAME}_cfg_XXXXXXXX)
|
||||
PWLOG="/private/tmp/${PWUSER}.log"
|
||||
|
||||
msg() { echo "##### ${1:-No message message provided}"; }
|
||||
die() { echo "ERROR: ${1:-No error message provided}"; exit 1; }
|
||||
|
||||
die_if_empty() {
|
||||
local tagname
|
||||
tagname="$1"
|
||||
[[ -n "$tagname" ]] || \
|
||||
die "Unexpectedly empty instance '$tagname' tag, is metadata tag access enabled?"
|
||||
}
|
||||
|
||||
[[ -n "$POOLTOKEN" ]] || \
|
||||
die "Must be called with non-empty \$POOLTOKEN set."
|
||||
|
||||
[[ "$#" -ge 1 ]] || \
|
||||
die "Must be called with a 'label: value' string argument"
|
||||
|
||||
echo "$1" | grep -i -q -E '^[a-z0-9]+:[ ]?[a-z0-9]+' || \
|
||||
die "First argument must be a string in the format 'name: value'. Not: '$1'"
|
||||
|
||||
msg "Configuring pool worker for '$1' tasks."
|
||||
|
||||
[[ ! -r "$COMPLETION_FILE" ]] || \
|
||||
die "Appears setup script already ran at '$(cat $COMPLETION_FILE)'"
|
||||
|
||||
[[ "$USER" == "ec2-user" ]] || \
|
||||
die "Expecting to execute as 'ec2-user'."
|
||||
|
||||
die_if_empty PWNAME
|
||||
die_if_empty PWREADY
|
||||
|
||||
[[ "$PWREADY" == "true" ]] || \
|
||||
die "Found PWPoolReady tag not set 'true', aborting setup."
|
||||
|
||||
# All operations assume this CWD
|
||||
cd $HOME
|
||||
|
||||
# Checked by instance launch script to monitor setup status & progress
|
||||
msg $(date -u -Iseconds | tee "$STARTED_FILE")
|
||||
|
||||
msg "Configuring paths"
|
||||
grep -q homebrew /etc/paths || \
|
||||
echo -e "/opt/homebrew/bin\n/opt/homebrew/opt/coreutils/libexec/gnubin\n$(cat /etc/paths)" \
|
||||
| sudo tee /etc/paths > /dev/null
|
||||
|
||||
# For whatever reason, when this script is run through ssh, the default
|
||||
# environment isn't loaded automatically.
|
||||
. /etc/profile
|
||||
|
||||
msg "Installing podman-machine, testing, and CI deps. (~5-10m install time)"
|
||||
if [[ ! -x /usr/local/bin/gvproxy ]]; then
|
||||
declare -a brew_taps
|
||||
declare -a brew_formulas
|
||||
|
||||
brew_taps=(
|
||||
# Required to use upstream vfkit
|
||||
cfergeau/crc
|
||||
|
||||
# Required to use upstream krunkit
|
||||
slp/krunkit
|
||||
)
|
||||
|
||||
brew_formulas=(
|
||||
# Necessary for worker-pool participation + task execution
|
||||
cirruslabs/cli/cirrus
|
||||
|
||||
# Necessary for building podman|buildah|skopeo
|
||||
go go-md2man coreutils pkg-config pstree gpgme
|
||||
|
||||
# Necessary to compress the podman repo tar
|
||||
zstd
|
||||
|
||||
# Necessary for testing podman-machine
|
||||
vfkit
|
||||
|
||||
# Necessary for podman-machine libkrun CI testing
|
||||
krunkit
|
||||
)
|
||||
|
||||
# msg() includes a ##### prefix, ensure this text is simply
|
||||
# associated with the prior msg() output.
|
||||
echo " Adding taps[] ${brew_taps[*]}"
|
||||
echo " before installing formulas[] ${brew_formulas[*]}"
|
||||
|
||||
for brew_tap in "${brew_taps[@]}"; do
|
||||
brew tap $brew_tap
|
||||
done
|
||||
|
||||
brew install "${brew_formulas[@]}"
|
||||
|
||||
# Normally gvproxy is installed along with "podman" brew. CI Tasks
|
||||
# on this instance will be running from source builds, so gvproxy must
|
||||
# be install from upstream release.
|
||||
curl -sSLfO "$GVPROXY_RELEASE_URL"
|
||||
sudo install -o root -g staff -m 0755 gvproxy-darwin /usr/local/bin/gvproxy
|
||||
rm gvproxy-darwin
|
||||
fi
|
||||
|
||||
msg "Setting up hostname"
|
||||
# Make host easier to identify from CI logs (default is some
|
||||
# random internal EC2 dns name).
|
||||
if [[ "$(uname -n)" != "$PWNAME" ]]; then
|
||||
sudo hostname $PWNAME
|
||||
sudo scutil --set HostName $PWNAME
|
||||
sudo scutil --set ComputerName $PWNAME
|
||||
fi
|
||||
|
||||
msg "Adding/Configuring PW User"
|
||||
if ! id "$PWUSER" &> /dev/null; then
|
||||
sudo sysadminctl -addUser $PWUSER
|
||||
fi
|
||||
|
||||
msg "Setting up local storage volume for PW User"
|
||||
if ! mount | grep -q "$PWUSER"; then
|
||||
# User can't remove own pre-existing homedir crap during cleanup
|
||||
sudo rm -rf /Users/$PWUSER/*
|
||||
sudo rm -rf /Users/$PWUSER/.??*
|
||||
|
||||
# This is really clunky, but seems the best that Apple Inc. can support.
|
||||
# Show what is being worked with to assist debugging
|
||||
diskutil list virtual
|
||||
local_storage_volume=$(diskutil list virtual | \
|
||||
grep -m 1 -B 5 "InternalDisk" | \
|
||||
grep -m 1 -E '^/dev/disk[0-9].+synthesized' | \
|
||||
awk '{print $1}')
|
||||
(
|
||||
set -x
|
||||
|
||||
# Fail hard if $local_storage_volume is invalid, otherwise show details to assist debugging
|
||||
diskutil info "$local_storage_volume"
|
||||
|
||||
# CI $TEMPDIR - critical for podman-machine storage performance
|
||||
ci_tempdir="/private/tmp/ci"
|
||||
mkdir -p "$ci_tempdir"
|
||||
sudo diskutil apfs addVolume "$local_storage_volume" APFS "ci_tempdir" -mountpoint "$ci_tempdir"
|
||||
sudo chown $PWUSER:staff "$ci_tempdir"
|
||||
sudo chmod 1770 "$ci_tempdir"
|
||||
|
||||
# CI-user's $HOME - not critical but might as well make it fast while we're
|
||||
# adding filesystems anyway.
|
||||
ci_homedir="/Users/$PWUSER"
|
||||
sudo diskutil apfs addVolume "$local_storage_volume" APFS "ci_homedir" -mountpoint "$ci_homedir"
|
||||
sudo chown $PWUSER:staff "$ci_homedir"
|
||||
sudo chmod 0750 "$ci_homedir"
|
||||
|
||||
df -h
|
||||
)
|
||||
|
||||
# Disk indexing is useless on a CI system, and creates un-deletable
|
||||
# files whereever $TEMPDIR happens to be pointing. Ignore any
|
||||
# individual volume failures that have an unknown state.
|
||||
sudo mdutil -a -i off || true
|
||||
|
||||
# User likely has pre-existing system processes trying to use
|
||||
# the (now) over-mounted home directory.
|
||||
sudo pkill -u $PWUSER || true
|
||||
fi
|
||||
|
||||
msg "Setting up Rosetta"
|
||||
# Rosetta 2 enables arm64 Mac to use Intel Apps. Only install if not present.
|
||||
if ! arch -arch x86_64 /usr/bin/uname -m; then
|
||||
sudo softwareupdate --install-rosetta --agree-to-license
|
||||
echo -n "Confirming rosetta is functional"
|
||||
if ! arch -arch x86_64 /usr/bin/uname -m; then
|
||||
die "Rosetta installed but non-functional, see setup log for details."
|
||||
fi
|
||||
fi
|
||||
|
||||
msg "Restricting appstore/software install to admin-only"
|
||||
# Abuse the symlink existance as a condition for running `sudo defaults write ...`
|
||||
# since checking the state of those values is complex.
|
||||
if [[ ! -L /usr/local/bin/softwareupdate ]]; then
|
||||
# Ref: https://developer.apple.com/documentation/devicemanagement/softwareupdate
|
||||
sudo defaults write com.apple.SoftwareUpdate restrict-software-update-require-admin-to-install -bool true
|
||||
sudo defaults write com.apple.appstore restrict-store-require-admin-to-install -bool true
|
||||
|
||||
# Unf. interacting with the rosetta installer seems to bypass both of the
|
||||
# above settings, even when run as a regular non-admin user. However, it's
|
||||
# also desireable to limit use of the utility in a CI environment generally.
|
||||
# Since /usr/sbin is read-only, but /usr/local is read-write and appears first
|
||||
# in $PATH, deploy a really fragile hack as an imperfect workaround.
|
||||
sudo ln -sf /usr/bin/false /usr/local/bin/softwareupdate
|
||||
fi
|
||||
|
||||
# FIXME: Semi-secret POOLTOKEN value should not be in this file.
|
||||
# ref: https://github.com/cirruslabs/cirrus-cli/discussions/662
|
||||
cat << EOF | sudo tee $PWCFG > /dev/null
|
||||
---
|
||||
name: "$PWNAME"
|
||||
token: "$POOLTOKEN"
|
||||
labels:
|
||||
$1
|
||||
log:
|
||||
file: "${PWLOG}"
|
||||
security:
|
||||
allowed-isolations:
|
||||
none: {}
|
||||
EOF
|
||||
sudo chown ${USER}:staff $PWCFG
|
||||
|
||||
# Monitored by instance launch script
|
||||
echo "# Log created $(date -u -Iseconds) - do not manually remove or modify!" > $PWLOG
|
||||
sudo chown ${USER}:staff $PWLOG
|
||||
sudo chmod g+rw $PWLOG
|
||||
|
||||
if ! pgrep -q -f service_pool.sh; then
|
||||
# Allow service_pool.sh access to these values
|
||||
export PWCFG
|
||||
export PWUSER
|
||||
export PWREADYURL
|
||||
export PWREADY
|
||||
msg "Spawning listener supervisor process."
|
||||
/var/tmp/service_pool.sh </dev/null >>setup.log 2>&1 &
|
||||
disown %-1
|
||||
else
|
||||
msg "Warning: Listener supervisor already running"
|
||||
fi
|
||||
|
||||
# Monitored by instance launch script
|
||||
date -u -Iseconds >> "$COMPLETION_FILE"
|
|
@ -0,0 +1,38 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Script intended to be called by automation only.
|
||||
# Should never be called from any other context.
|
||||
|
||||
# Log on the off-chance it somehow helps somebody debug something one day
|
||||
(
|
||||
|
||||
echo "Starting ${BASH_SOURCE[0]} at $(date -u -Iseconds)"
|
||||
|
||||
PWNAME=$(uname -n)
|
||||
PWUSER=$PWNAME-worker
|
||||
|
||||
if id -u "$PWUSER" &> /dev/null; then
|
||||
# Try to not reboot while a CI task is running.
|
||||
# Cirrus-CI imposes a hard-timeout of 2-hours.
|
||||
now=$(date -u +%s)
|
||||
timeout_at=$((now+60*60*2))
|
||||
echo "Waiting up to 2 hours for any pre-existing cirrus agent (i.e. running task)"
|
||||
while pgrep -u $PWUSER -q -f "cirrus-ci-agent"; do
|
||||
if [[ $(date -u +%s) -gt $timeout_at ]]; then
|
||||
echo "Timeout waiting for cirrus-ci-agent to terminate"
|
||||
break
|
||||
fi
|
||||
echo "Found cirrus-ci-agent still running, waiting..."
|
||||
sleep 60
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Initiating shutdown at $(date -u -Iseconds)"
|
||||
|
||||
# This script is run with a sleep in front of it
|
||||
# as a workaround for darwin's shutdown-command
|
||||
# terminal weirdness.
|
||||
|
||||
sudo shutdown -h now "Automatic instance recycling"
|
||||
|
||||
) < /dev/null >> setup.log 2>&1
|
|
@ -0,0 +1,229 @@
|
|||
/*
|
||||
|
||||
Validate this file before commiting with (from repository root):
|
||||
|
||||
podman run -it \
|
||||
-v ./renovate/defaults.json5:/usr/src/app/renovate.json5:z \
|
||||
ghcr.io/renovatebot/renovate:latest \
|
||||
renovate-config-validator
|
||||
|
||||
and/or use the pre-commit hook: https://github.com/renovatebot/pre-commit-hooks
|
||||
*/
|
||||
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"description": "This is a basic preset intended\
|
||||
for reuse to reduce the amount of boiler-plate\
|
||||
configuration that otherwise would need to be\
|
||||
duplicated. It should be referenced from other\
|
||||
repositories renovate config under the 'extends'\
|
||||
section as: github>containers/automation//renovate/defaults.json5\
|
||||
(optionally with a '#X.Y.Z' version-tag suffix).",
|
||||
|
||||
/*************************************************
|
||||
****** Global/general configuration options *****
|
||||
*************************************************/
|
||||
|
||||
// Re-use predefined sets of configuration options to DRY
|
||||
"extends": [
|
||||
// https://docs.renovatebot.com/presets-config/#configbase
|
||||
"config:recommended",
|
||||
|
||||
// https://docs.renovatebot.com/presets-default/#gitsignoff
|
||||
":gitSignOff",
|
||||
|
||||
// Always rebase dep. update PRs from `main` when PR is stale
|
||||
":rebaseStalePrs"
|
||||
],
|
||||
|
||||
// The default setting is ambiguous, explicitly base schedules on UTC
|
||||
"timezone": "UTC",
|
||||
|
||||
// Don't swamp CI, rate-limit opening of PRs w/in schedule limits.
|
||||
"prHourlyLimit": 1,
|
||||
|
||||
// Make renovate PRs stand out from the crowd
|
||||
"labels": ["dependencies"],
|
||||
|
||||
// Default setting is an "empty" schedule. Explicitly set this
|
||||
// such that security-alert PRs may be opened immediately.
|
||||
"vulnerabilityAlerts": {
|
||||
// Distinguish PRs from regular dependency updates
|
||||
"labels": ["dependencies", "security"],
|
||||
|
||||
// Force-enable renovate management of deps. which are otherwise
|
||||
// disabled. Note: Does not apply to any "ignorePaths" list, nor
|
||||
// any deps. disabled via `packageRules` in this block
|
||||
// (last-match wins rule).
|
||||
"enabled": true,
|
||||
|
||||
// Note: As of 2024-06-25 indirect golang dependency handling is
|
||||
// broken in Renovate, and disabled by default. This affects
|
||||
// vulnerabilityAlerts in that if the dep is 'indirect' no PR
|
||||
// will ever open, it must be handled manually. Attempting
|
||||
// to enable indirect deps (for golang) in this section will
|
||||
// not work, it will always be overriden by the global golang
|
||||
// indirect dep. setting.
|
||||
},
|
||||
|
||||
// On a busy repo, automatic-rebasing will swamp the CI system.
|
||||
// Turn it off here, then allow individual repos. to override/enable
|
||||
// it as appropriate.
|
||||
"rebaseWhen": "never",
|
||||
|
||||
/**************************************************
|
||||
***** Manager-specific configuration options *****
|
||||
**************************************************/
|
||||
|
||||
"customManagers": [
|
||||
// Track the latest CI VM images by tag on the containers/automation_images
|
||||
// repo. Propose updates when newer tag available compared to what is
|
||||
// referenced in a repo's .cirrus.yml file.
|
||||
{
|
||||
"customType": "regex",
|
||||
"fileMatch": "^.cirrus.yml$",
|
||||
// Expected veresion format: c<automation_images IMG_SFX value>
|
||||
// For example `c20230120t152650z-f37f36u2204`
|
||||
"matchStrings": ["c(?<currentValue>20\\d{6}t\\d{6}z-\\w+)"],
|
||||
"depNameTemplate": "containers/automation_images",
|
||||
"datasourceTemplate": "github-tags",
|
||||
"versioningTemplate": "loose",
|
||||
"autoReplaceStringTemplate": "c{{{newVersion}}}"
|
||||
},
|
||||
|
||||
// For skopeo and podman, manage the golangci-lint version as
|
||||
// referenced in their Makefile.
|
||||
{
|
||||
"customType": "regex",
|
||||
"fileMatch": "^Makefile$",
|
||||
// make ignores whitespace around the value, make renovate do the same.
|
||||
"matchStrings": [
|
||||
"GOLANGCI_LINT_VERSION\\s+:=\\s+(?<currentValue>.+)\\s*"
|
||||
],
|
||||
"depNameTemplate": "golangci/golangci-lint",
|
||||
"datasourceTemplate": "github-releases",
|
||||
"versioningTemplate": "semver-coerced",
|
||||
// Podman's installer script will puke if there's a 'v' prefix, as represented
|
||||
// in upstream golangci/golangci-lint releases.
|
||||
"extractVersionTemplate": "v(?<version>.+)"
|
||||
}
|
||||
],
|
||||
|
||||
/*************************************************
|
||||
***** Language-specific configuration options ****
|
||||
**************************************************/
|
||||
|
||||
// ***** ATTENTION WARNING CAUTION DANGER ***** //
|
||||
// Go versions 1.21 and later will AUTO-UPDATE based on _module_
|
||||
// _requirements_. ref: https://go.dev/doc/toolchain Because
|
||||
// many different projects covered by this config, build under
|
||||
// different distros and distro-versions, golang version consistency
|
||||
// is desireable across build outputs. In golang 1.21 and later,
|
||||
// it's possible to pin the version in each project using the
|
||||
// toolchain go.mod directive. This should be done to prevent
|
||||
// unwanted auto-updates.
|
||||
// Ref: Upstream discussion https://github.com/golang/go/issues/65847
|
||||
"constraints": {"go": "1.23"},
|
||||
|
||||
// N/B: LAST MATCHING RULE WINS, match statems are ANDed together.
|
||||
// https://docs.renovatebot.com/configuration-options/#packagerules
|
||||
"packageRules": [
|
||||
/*************************************************
|
||||
****** Rust-specific configuration options *******
|
||||
**************************************************/
|
||||
{
|
||||
"matchCategories": ["rust"],
|
||||
// Update both Cargo.toml and Cargo.lock when possible
|
||||
// i.e. bump the range even if the new version satisfies the existing range.
|
||||
// https://docs.renovatebot.com/configuration-options/#rangestrategy
|
||||
"rangeStrategy": "bump"
|
||||
},
|
||||
|
||||
{
|
||||
"matchCategories": ["rust"],
|
||||
"matchPackageNames": ["serde", "clap"],
|
||||
// Update both Cargo.toml and Cargo.lock when possible
|
||||
"rangeStrategy": "bump",
|
||||
// These packages roll updates far too often, slow them down.
|
||||
// Ref: https://github.com/containers/netavark/issues/772
|
||||
"schedule": ["after 1am and before 11am on the first day of the month"]
|
||||
},
|
||||
|
||||
/*************************************************
|
||||
****** Python-specific configuration options *****
|
||||
**************************************************/
|
||||
{
|
||||
"matchCategories": ["python"],
|
||||
// Preserve (but continue to upgrade) any existing SemVer ranges.
|
||||
"rangeStrategy": "replace"
|
||||
},
|
||||
|
||||
/*************************************************
|
||||
****** Golang-specific configuration options *****
|
||||
**************************************************/
|
||||
{
|
||||
"matchCategories": ["golang"],
|
||||
// disabled by default, safe to enable since "tidy" enforced by CI.
|
||||
"postUpdateOptions": ["gomodTidy"],
|
||||
// In case a version in use is retracted, allow going backwards.
|
||||
// N/B: This is NOT compatible with pseudo versions, see below.
|
||||
"rollbackPrs": false,
|
||||
// Preserve (but continue to upgrade) any existing SemVer ranges.
|
||||
"rangeStrategy": "replace"
|
||||
},
|
||||
|
||||
// Golang pseudo-version packages will spam with every Commit ID change.
|
||||
// Limit update frequency.
|
||||
{
|
||||
"matchCategories": ["golang"],
|
||||
"matchUpdateTypes": ["digest"],
|
||||
"schedule": ["after 1am and before 11am on the first day of the month"]
|
||||
},
|
||||
|
||||
// Package version retraction (https://go.dev/ref/mod#go-mod-file-retract)
|
||||
// is broken in Renovate. And no repo should use these retracted versions.
|
||||
// ref: https://github.com/renovatebot/renovate/issues/13012
|
||||
{
|
||||
"matchCategories": ["golang"],
|
||||
"matchPackageNames": ["github.com/containers/common"],
|
||||
// Both v1.0.0 and v1.0.1 should be ignored.
|
||||
"allowedVersions": "!/v((1.0.0)|(1.0.1))$/"
|
||||
},
|
||||
|
||||
// Skip updating the go.mod toolchain directive, humans will manage this.
|
||||
{
|
||||
"matchCategories": ["golang"],
|
||||
"matchDepTypes": ["toolchain"],
|
||||
"enabled": false
|
||||
},
|
||||
|
||||
/*************************************************
|
||||
************ CI configuration options ************
|
||||
**************************************************/
|
||||
|
||||
// Github-action updates cannot consistently be tested in a PR.
|
||||
// This is caused by an unfixable architecture-flaw: Execution
|
||||
// context always depends on trigger, and we (obvious) can't know
|
||||
// that ahead of time for all workflows. Abandon all hope and
|
||||
// mark github-action dep. update PRs '[skip-ci]'
|
||||
{
|
||||
"matchManagers": ["github-actions"],
|
||||
"matchDepTypes": ["action"],
|
||||
"commitMessagePrefix": "[skip-ci]"
|
||||
},
|
||||
|
||||
// Group together all CI VM image updates into a single PR. This is needed
|
||||
// to handle the case where an IMG_SFX is mentioned in a comment. For
|
||||
// example, flagging an important TODO or FIXME item. Or, where CI VM
|
||||
// images are split across multiple IMG_SFX values that all need to be updated.
|
||||
{
|
||||
"matchManagers": ["custom.regex"],
|
||||
"matchFileNames": [".cirrus.yml"],
|
||||
"groupName": "CI VM Image",
|
||||
// Somebody(s) need to check image update PRs as soon as they open.
|
||||
"reviewers": ["Luap99"],
|
||||
// Don't wait, roll out CI VM Updates immediately
|
||||
"schedule": ["at any time"]
|
||||
},
|
||||
]
|
||||
}
|
Loading…
Reference in New Issue