Compare commits
135 Commits
Author | SHA1 | Date |
---|---|---|
|
cc7d9b2a26 | |
|
0af8676cb8 | |
|
f55fe34cfb | |
|
987689cc34 | |
|
cb12019fba | |
|
e1231d1520 | |
|
b0959cb192 | |
|
7f213bf685 | |
|
79e68ef97c | |
|
aba42ca8ff | |
|
d805c0c822 | |
|
e83dcfcabf | |
|
7f13540563 | |
|
50c43af45e | |
|
cd259102d4 | |
|
051f0951f1 | |
|
e8a30ae1ea | |
|
a4888b2ce9 | |
|
8faa8b216c | |
|
fd6f70913e | |
|
f3777be65b | |
|
16f757f699 | |
|
26ab1b7744 | |
|
994ba027c2 | |
|
fa70d9e3af | |
|
3e2662f02b | |
|
0f5226e050 | |
|
24800f0f77 | |
|
5ae1659c96 | |
|
3c034bcadc | |
|
7067540a52 | |
|
e3c74c2aa4 | |
|
8c5bb22af7 | |
|
3b33514d26 | |
|
973aa8c2fe | |
|
4d23dd41f0 | |
|
b9186a2b38 | |
|
8b1776b799 | |
|
8218f24c4d | |
|
8f39f4b1af | |
|
99d1c2662e | |
|
32b94cedea | |
|
5ad53bd723 | |
|
24a62a63d3 | |
|
ab1f7624a0 | |
|
35a29e5dfe | |
|
657247095b | |
|
cc18e81abf | |
|
d2e5f7815e | |
|
48c9554a6c | |
|
0a0bc4f395 | |
|
b8969128d0 | |
|
4739c8921c | |
|
34ea41cc7f | |
|
ee5fba7664 | |
|
34e2995cd7 | |
|
51a2c1fbed | |
|
718ecdb04e | |
|
7ae84eb74c | |
|
d81a56f85b | |
|
27f6f9363f | |
|
1b35e0e24d | |
|
2c1ee35362 | |
|
447f70e9c7 | |
|
1809c5b6c0 | |
|
c552d5bba1 | |
|
3568a50f52 | |
|
436dceb68f | |
|
13be11668c | |
|
47a5015b07 | |
|
b0dde0f4fc | |
|
689cfa189c | |
|
bb3343c0c4 | |
|
b1d7d1d447 | |
|
256fefe0dd | |
|
11359412d4 | |
|
378249996e | |
|
12b7b27dda | |
|
720ba14043 | |
|
a69abee410 | |
|
399120c350 | |
|
4302d62c26 | |
|
8204fd5794 | |
|
d0474a3847 | |
|
14fd648920 | |
|
420ed9a467 | |
|
dc21cdf863 | |
|
b813ad7981 | |
|
415e21b68b | |
|
8b9ae348a0 | |
|
663cb85121 | |
|
9c771bf862 | |
|
13aaf6100f | |
|
46d69a3969 | |
|
081b9c3be5 | |
|
e4e0cdbd51 | |
|
ae7f68a9ac | |
|
836d5a7487 | |
|
02d3c0a99c | |
|
f750079c85 | |
|
0eb6675f13 | |
|
3a39b5cafc | |
|
8a0e087c4b | |
|
c910e69c12 | |
|
37e71d45af | |
|
9a8a1a2413 | |
|
2e805276bb | |
|
5d234f1e4a | |
|
badedd4968 | |
|
2cdb0b15ee | |
|
f27c7ae6d9 | |
|
d7a884b8cf | |
|
9336e20516 | |
|
7feb7435c2 | |
|
478b8d9d30 | |
|
1bd2fbdfe3 | |
|
d061d8061e | |
|
13f6c9fb53 | |
|
af1016e668 | |
|
74f8447d45 | |
|
3bf3cfd233 | |
|
428f06ed36 | |
|
b9ce71232f | |
|
36c2bc68e9 | |
|
df5c5e90ac | |
|
11026c20a3 | |
|
1f2ccedbfd | |
|
2c1a0c6c4c | |
|
fb6ba4a224 | |
|
f12157050c | |
|
4353f8c5b1 | |
|
86ddf63ac5 | |
|
948206e893 | |
|
c0112c254c | |
|
71ede1b334 |
12
.cirrus.yml
12
.cirrus.yml
|
@ -27,13 +27,11 @@ cirrus-ci/unit-test_task:
|
||||||
cirrus-ci/renovate_validation_task:
|
cirrus-ci/renovate_validation_task:
|
||||||
only_if: *not_docs
|
only_if: *not_docs
|
||||||
container:
|
container:
|
||||||
image: docker.io/renovate/renovate:latest
|
image: "ghcr.io/renovatebot/renovate:latest"
|
||||||
env:
|
|
||||||
RCV: /usr/local/bin/renovate-config-validator
|
|
||||||
preset_validate_script:
|
preset_validate_script:
|
||||||
- $RCV $CIRRUS_WORKING_DIR/renovate/defaults.json5
|
- renovate-config-validator $CIRRUS_WORKING_DIR/renovate/defaults.json5
|
||||||
repo_validate_script:
|
repo_validate_script:
|
||||||
- $RCV $CIRRUS_WORKING_DIR/.github/renovate.json5
|
- renovate-config-validator $CIRRUS_WORKING_DIR/.github/renovate.json5
|
||||||
|
|
||||||
# This is the same setup as used for Buildah CI
|
# This is the same setup as used for Buildah CI
|
||||||
gcp_credentials: ENCRYPTED[fc95bcc9f4506a3b0d05537b53b182e104d4d3979eedbf41cf54205be6397ca0bce0831d0d47580cf578dae5776548a5]
|
gcp_credentials: ENCRYPTED[fc95bcc9f4506a3b0d05537b53b182e104d4d3979eedbf41cf54205be6397ca0bce0831d0d47580cf578dae5776548a5]
|
||||||
|
@ -53,10 +51,10 @@ cirrus-ci/build-push_test_task:
|
||||||
# only stock, google-managed generic image. This also avoids needing to
|
# only stock, google-managed generic image. This also avoids needing to
|
||||||
# update custom-image last-used timestamps.
|
# update custom-image last-used timestamps.
|
||||||
image_project: centos-cloud
|
image_project: centos-cloud
|
||||||
image_family: centos-stream-8
|
image_family: centos-stream-9
|
||||||
timeout_in: 30
|
timeout_in: 30
|
||||||
env:
|
env:
|
||||||
CIMG: quay.io/buildah/stable:v1.23.0
|
CIMG: quay.io/buildah/stable:latest
|
||||||
TEST_FQIN: quay.io/buildah/do_not_use
|
TEST_FQIN: quay.io/buildah/do_not_use
|
||||||
# Robot account credentials for test-push to
|
# Robot account credentials for test-push to
|
||||||
# $TEST_FQIN registry by build-push/test/testbuilds.sh
|
# $TEST_FQIN registry by build-push/test/testbuilds.sh
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
podman run -it \
|
podman run -it \
|
||||||
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
|
-v ./.github/renovate.json5:/usr/src/app/renovate.json5:z \
|
||||||
docker.io/renovate/renovate:latest \
|
ghcr.io/renovatebot/renovate:latest \
|
||||||
renovate-config-validator
|
renovate-config-validator
|
||||||
3. Commit.
|
3. Commit.
|
||||||
|
|
||||||
|
@ -42,6 +42,4 @@
|
||||||
/*************************************************
|
/*************************************************
|
||||||
*** Repository-specific configuration options ***
|
*** Repository-specific configuration options ***
|
||||||
*************************************************/
|
*************************************************/
|
||||||
// Don't leave dep. update. PRs "hanging", assign them to people.
|
|
||||||
"assignees": ["cevich"],
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,16 +64,14 @@ jobs:
|
||||||
- if: steps.retro.outputs.do_intg == 'true'
|
- if: steps.retro.outputs.do_intg == 'true'
|
||||||
id: create_pr_comment
|
id: create_pr_comment
|
||||||
name: Create a status comment in the PR
|
name: Create a status comment in the PR
|
||||||
# Ref: https://github.com/marketplace/actions/comment-action
|
uses: thollander/actions-comment-pull-request@v3
|
||||||
uses: jungwinter/comment@v1
|
|
||||||
with:
|
with:
|
||||||
issue_number: '${{ steps.retro.outputs.prn }}'
|
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||||
type: 'create'
|
comment-tag: retro
|
||||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
|
||||||
# N/B: At the time of this comment, it is not possible to provide
|
# N/B: At the time of this comment, it is not possible to provide
|
||||||
# direct links to specific job-steps (here) nor links to artifact
|
# direct links to specific job-steps (here) nor links to artifact
|
||||||
# files. There are open RFE's for this capability to be added.
|
# files. There are open RFE's for this capability to be added.
|
||||||
body: >-
|
message: >-
|
||||||
[Cirrus-CI Retrospective Github
|
[Cirrus-CI Retrospective Github
|
||||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||||
has started. Running against
|
has started. Running against
|
||||||
|
@ -119,12 +117,11 @@ jobs:
|
||||||
- if: steps.retro.outputs.do_intg == 'true'
|
- if: steps.retro.outputs.do_intg == 'true'
|
||||||
id: edit_pr_comment_build
|
id: edit_pr_comment_build
|
||||||
name: Update status comment on PR
|
name: Update status comment on PR
|
||||||
uses: jungwinter/comment@v1
|
uses: thollander/actions-comment-pull-request@v3
|
||||||
with:
|
with:
|
||||||
type: 'edit'
|
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||||
comment_id: '${{ steps.create_pr_comment.outputs.id }}'
|
comment-tag: retro
|
||||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
message: >-
|
||||||
body: >-
|
|
||||||
Unit-testing passed (`${{ env.HELPER_LIB_TEST }}`)passed.
|
Unit-testing passed (`${{ env.HELPER_LIB_TEST }}`)passed.
|
||||||
[Cirrus-CI Retrospective Github
|
[Cirrus-CI Retrospective Github
|
||||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||||
|
@ -135,12 +132,11 @@ jobs:
|
||||||
- if: steps.retro.outputs.do_intg == 'true'
|
- if: steps.retro.outputs.do_intg == 'true'
|
||||||
id: edit_pr_comment_exec
|
id: edit_pr_comment_exec
|
||||||
name: Update status comment on PR again
|
name: Update status comment on PR again
|
||||||
uses: jungwinter/comment@v1
|
uses: thollander/actions-comment-pull-request@v3
|
||||||
with:
|
with:
|
||||||
type: 'edit'
|
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||||
comment_id: '${{ steps.edit_pr_comment_build.outputs.id }}'
|
comment-tag: retro
|
||||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
message: >-
|
||||||
body: >-
|
|
||||||
Smoke testing passed [Cirrus-CI Retrospective Github
|
Smoke testing passed [Cirrus-CI Retrospective Github
|
||||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||||
is triggering Cirrus-CI ${{ env.ACTION_TASK }} task.
|
is triggering Cirrus-CI ${{ env.ACTION_TASK }} task.
|
||||||
|
@ -154,12 +150,12 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
set +x
|
set +x
|
||||||
trap "history -c" EXIT
|
trap "history -c" EXIT
|
||||||
curl --request POST \
|
curl --fail-with-body --request POST \
|
||||||
--url https://api.cirrus-ci.com/graphql \
|
--url https://api.cirrus-ci.com/graphql \
|
||||||
--header "Authorization: Bearer ${{ secrets.CIRRUS_API_TOKEN }}" \
|
--header "Authorization: Bearer ${{ secrets.CIRRUS_API_TOKEN }}" \
|
||||||
--header 'content-type: application/json' \
|
--header 'content-type: application/json' \
|
||||||
--data '{"query":"mutation {\n trigger(input: {taskId: \"${{steps.retro.outputs.tid}}\", clientMutationId: \"${{env.UUID}}\"}) {\n clientMutationId\n task {\n name\n }\n }\n}"}' \
|
--data '{"query":"mutation {\n trigger(input: {taskId: \"${{steps.retro.outputs.tid}}\", clientMutationId: \"${{env.UUID}}\"}) {\n clientMutationId\n task {\n name\n }\n }\n}"}' \
|
||||||
> ./test_artifacts/action_task_trigger.json
|
| tee ./test_artifacts/action_task_trigger.json
|
||||||
|
|
||||||
actual=$(jq --raw-output '.data.trigger.clientMutationId' ./test_artifacts/action_task_trigger.json)
|
actual=$(jq --raw-output '.data.trigger.clientMutationId' ./test_artifacts/action_task_trigger.json)
|
||||||
echo "Verifying '$UUID' matches returned tracking value '$actual'"
|
echo "Verifying '$UUID' matches returned tracking value '$actual'"
|
||||||
|
@ -167,12 +163,11 @@ jobs:
|
||||||
|
|
||||||
- if: steps.retro.outputs.do_intg == 'true'
|
- if: steps.retro.outputs.do_intg == 'true'
|
||||||
name: Update comment on workflow success
|
name: Update comment on workflow success
|
||||||
uses: jungwinter/comment@v1
|
uses: thollander/actions-comment-pull-request@v3
|
||||||
with:
|
with:
|
||||||
type: 'edit'
|
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||||
comment_id: '${{ steps.edit_pr_comment_exec.outputs.id }}'
|
comment-tag: retro
|
||||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
message: >-
|
||||||
body: >-
|
|
||||||
Successfully triggered [${{ env.ACTION_TASK }}
|
Successfully triggered [${{ env.ACTION_TASK }}
|
||||||
task](https://cirrus-ci.com/task/${{ steps.retro.outputs.tid }}?command=main#L0)
|
task](https://cirrus-ci.com/task/${{ steps.retro.outputs.tid }}?command=main#L0)
|
||||||
to indicate
|
to indicate
|
||||||
|
@ -183,12 +178,11 @@ jobs:
|
||||||
|
|
||||||
- if: failure() && steps.retro.outputs.do_intg == 'true'
|
- if: failure() && steps.retro.outputs.do_intg == 'true'
|
||||||
name: Update comment on workflow failure
|
name: Update comment on workflow failure
|
||||||
uses: jungwinter/comment@v1
|
uses: thollander/actions-comment-pull-request@v3
|
||||||
with:
|
with:
|
||||||
type: 'edit'
|
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||||
comment_id: '${{ steps.create_pr_comment.outputs.id }}'
|
comment-tag: retro
|
||||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
message: >-
|
||||||
body: >-
|
|
||||||
Failure running [Cirrus-CI Retrospective Github
|
Failure running [Cirrus-CI Retrospective Github
|
||||||
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
Action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||||
failed against this PR's
|
failed against this PR's
|
||||||
|
@ -197,24 +191,22 @@ jobs:
|
||||||
# This can happen because of --force push, manual cancel button press, or some other cause.
|
# This can happen because of --force push, manual cancel button press, or some other cause.
|
||||||
- if: cancelled() && steps.retro.outputs.do_intg == 'true'
|
- if: cancelled() && steps.retro.outputs.do_intg == 'true'
|
||||||
name: Update comment on workflow cancellation
|
name: Update comment on workflow cancellation
|
||||||
uses: jungwinter/comment@v1
|
uses: thollander/actions-comment-pull-request@v3
|
||||||
with:
|
with:
|
||||||
type: 'edit'
|
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||||
comment_id: '${{ steps.create_pr_comment.outputs.id }}'
|
comment-tag: retro
|
||||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
message: '[Cancelled](https://github.com/${{github.repository}}/pull/${{steps.retro.outputs.prn}}/commits/${{steps.retro.outputs.sha}})'
|
||||||
body: '[Cancelled](https://github.com/${{github.repository}}/pull/${{steps.retro.outputs.prn}}/commits/${{steps.retro.outputs.sha}})'
|
|
||||||
|
|
||||||
# Abnormal workflow ($ACTION-TASK task already ran / not paused on a PR).
|
# Abnormal workflow ($ACTION-TASK task already ran / not paused on a PR).
|
||||||
- if: steps.retro.outputs.is_pr == 'true' && steps.retro.outputs.do_intg != 'true'
|
- if: steps.retro.outputs.is_pr == 'true' && steps.retro.outputs.do_intg != 'true'
|
||||||
id: create_error_pr_comment
|
id: create_error_pr_comment
|
||||||
name: Create an error status comment in the PR
|
name: Create an error status comment in the PR
|
||||||
# Ref: https://github.com/marketplace/actions/comment-action
|
# Ref: https://github.com/marketplace/actions/comment-action
|
||||||
uses: jungwinter/comment@v1
|
uses: thollander/actions-comment-pull-request@v3
|
||||||
with:
|
with:
|
||||||
issue_number: '${{ steps.retro.outputs.prn }}'
|
pr-number: '${{ steps.retro.outputs.prn }}'
|
||||||
type: 'create'
|
comment-tag: error
|
||||||
token: '${{ secrets.GITHUB_TOKEN }}'
|
message: >-
|
||||||
body: >-
|
|
||||||
***ERROR***: [cirrus-ci_retrospective
|
***ERROR***: [cirrus-ci_retrospective
|
||||||
action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
action](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
|
||||||
found `${{ env.ACTION_TASK }}` task with unexpected `${{ steps.retro.outputs.tst }}`
|
found `${{ env.ACTION_TASK }}` task with unexpected `${{ steps.retro.outputs.tst }}`
|
||||||
|
@ -230,7 +222,7 @@ jobs:
|
||||||
# Provide an archive of files for debugging/analysis.
|
# Provide an archive of files for debugging/analysis.
|
||||||
- if: always() && steps.retro.outputs.do_intg == 'true'
|
- if: always() && steps.retro.outputs.do_intg == 'true'
|
||||||
name: Archive event, build, and debugging output
|
name: Archive event, build, and debugging output
|
||||||
uses: actions/upload-artifact@v4.3.1
|
uses: actions/upload-artifact@v4.6.2
|
||||||
with:
|
with:
|
||||||
name: pr_${{ steps.retro.outputs.prn }}_debug.zip
|
name: pr_${{ steps.retro.outputs.prn }}_debug.zip
|
||||||
path: ./test_artifacts
|
path: ./test_artifacts
|
||||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
||||||
fi
|
fi
|
||||||
|
|
||||||
unit-tests: # N/B: Duplicates `ubuntu_unit_tests.yml` - templating not supported
|
unit-tests: # N/B: Duplicates `ubuntu_unit_tests.yml` - templating not supported
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
@ -145,7 +145,7 @@ jobs:
|
||||||
run: jq --indent 4 --color-output . ${{ github.event_path }}
|
run: jq --indent 4 --color-output . ${{ github.event_path }}
|
||||||
|
|
||||||
- if: always()
|
- if: always()
|
||||||
uses: actions/upload-artifact@v4.3.1
|
uses: actions/upload-artifact@v4.6.2
|
||||||
name: Archive triggering event JSON
|
name: Archive triggering event JSON
|
||||||
with:
|
with:
|
||||||
name: event.json.zip
|
name: event.json.zip
|
||||||
|
|
|
@ -4,7 +4,7 @@ on: [push, pull_request]
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
automation_unit-tests:
|
automation_unit-tests:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -36,7 +36,7 @@ INSTALL_PREFIX="${INSTALL_PREFIX%%/}" # Make debugging path problems easier
|
||||||
# When installing as root, allow sourcing env. vars. from this file
|
# When installing as root, allow sourcing env. vars. from this file
|
||||||
INSTALL_ENV_FILEPATH="${INSTALL_ENV_FILEPATH:-/etc/automation_environment}"
|
INSTALL_ENV_FILEPATH="${INSTALL_ENV_FILEPATH:-/etc/automation_environment}"
|
||||||
# Used internally here and in unit-testing, do not change without a really, really good reason.
|
# Used internally here and in unit-testing, do not change without a really, really good reason.
|
||||||
_ARGS="$@"
|
_ARGS="$*"
|
||||||
_MAGIC_JUJU=${_MAGIC_JUJU:-XXXXX}
|
_MAGIC_JUJU=${_MAGIC_JUJU:-XXXXX}
|
||||||
_DEFAULT_MAGIC_JUJU=d41d844b68a14ee7b9e6a6bb88385b4d
|
_DEFAULT_MAGIC_JUJU=d41d844b68a14ee7b9e6a6bb88385b4d
|
||||||
|
|
||||||
|
@ -109,7 +109,8 @@ install_automation() {
|
||||||
fi
|
fi
|
||||||
# Allow re-installing different versions, clean out old version if found
|
# Allow re-installing different versions, clean out old version if found
|
||||||
if [[ -d "$actual_inst_path" ]] && [[ -r "$actual_inst_path/AUTOMATION_VERSION" ]]; then
|
if [[ -d "$actual_inst_path" ]] && [[ -r "$actual_inst_path/AUTOMATION_VERSION" ]]; then
|
||||||
local installed_version=$(cat "$actual_inst_path/AUTOMATION_VERSION")
|
local installed_version
|
||||||
|
installed_version=$(<"$actual_inst_path/AUTOMATION_VERSION")
|
||||||
msg "Warning: Removing existing installed version '$installed_version'"
|
msg "Warning: Removing existing installed version '$installed_version'"
|
||||||
rm -rvf "$actual_inst_path"
|
rm -rvf "$actual_inst_path"
|
||||||
elif [[ -d "$actual_inst_path" ]]; then
|
elif [[ -d "$actual_inst_path" ]]; then
|
||||||
|
@ -217,7 +218,7 @@ check_args() {
|
||||||
msg " Use version '$MAGIC_LOCAL_VERSION' to install from local source."
|
msg " Use version '$MAGIC_LOCAL_VERSION' to install from local source."
|
||||||
msg " Use version 'latest' to install from current upstream"
|
msg " Use version 'latest' to install from current upstream"
|
||||||
exit 2
|
exit 2
|
||||||
elif ! echo "$AUTOMATION_VERSION" | egrep -q "$arg_rx"; then
|
elif ! echo "$AUTOMATION_VERSION" | grep -E -q "$arg_rx"; then
|
||||||
msg "Error: '$AUTOMATION_VERSION' does not appear to be a valid version number"
|
msg "Error: '$AUTOMATION_VERSION' does not appear to be a valid version number"
|
||||||
exit 4
|
exit 4
|
||||||
elif [[ -z "$_ARGS" ]] && [[ "$_MAGIC_JUJU" == "XXXXX" ]]; then
|
elif [[ -z "$_ARGS" ]] && [[ "$_MAGIC_JUJU" == "XXXXX" ]]; then
|
||||||
|
@ -254,6 +255,8 @@ elif [[ "$_MAGIC_JUJU" == "$_DEFAULT_MAGIC_JUJU" ]]; then
|
||||||
CHAIN_TO="$INSTALLATION_SOURCE/$arg/.install.sh"
|
CHAIN_TO="$INSTALLATION_SOURCE/$arg/.install.sh"
|
||||||
if [[ -r "$CHAIN_TO" ]]; then
|
if [[ -r "$CHAIN_TO" ]]; then
|
||||||
# Cannot assume common was installed system-wide
|
# Cannot assume common was installed system-wide
|
||||||
|
# AUTOMATION_LIB_PATH defined by anchors.sh
|
||||||
|
# shellcheck disable=SC2154
|
||||||
env AUTOMATION_LIB_PATH=$AUTOMATION_LIB_PATH \
|
env AUTOMATION_LIB_PATH=$AUTOMATION_LIB_PATH \
|
||||||
AUTOMATION_VERSION=$AUTOMATION_VERSION \
|
AUTOMATION_VERSION=$AUTOMATION_VERSION \
|
||||||
INSTALLATION_SOURCE=$INSTALLATION_SOURCE \
|
INSTALLATION_SOURCE=$INSTALLATION_SOURCE \
|
||||||
|
|
|
@ -228,7 +228,8 @@ parse_args() {
|
||||||
dbg "Grabbing Context parameter: '$arg'."
|
dbg "Grabbing Context parameter: '$arg'."
|
||||||
CONTEXT=$(realpath -e -P $arg || die_help "$E_CONTEXT '$arg'")
|
CONTEXT=$(realpath -e -P $arg || die_help "$E_CONTEXT '$arg'")
|
||||||
else
|
else
|
||||||
# Properly handle any embedded special characters
|
# Hack: Allow array addition to handle any embedded special characters
|
||||||
|
# shellcheck disable=SC2207
|
||||||
BUILD_ARGS+=($(printf "%q" "$arg"))
|
BUILD_ARGS+=($(printf "%q" "$arg"))
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
@ -290,7 +291,7 @@ stage_notice() {
|
||||||
# N/B: It would be nice/helpful to resolve any env. vars. in '$@'
|
# N/B: It would be nice/helpful to resolve any env. vars. in '$@'
|
||||||
# for display. Unfortunately this is hard to do safely
|
# for display. Unfortunately this is hard to do safely
|
||||||
# with (e.g.) eval echo "$@" :(
|
# with (e.g.) eval echo "$@" :(
|
||||||
msg="$@"
|
msg="$*"
|
||||||
(
|
(
|
||||||
echo "############################################################"
|
echo "############################################################"
|
||||||
echo "$msg"
|
echo "$msg"
|
||||||
|
@ -322,7 +323,7 @@ parallel_build() {
|
||||||
|
|
||||||
# Keep user-specified BUILD_ARGS near the beginning so errors are easy to spot
|
# Keep user-specified BUILD_ARGS near the beginning so errors are easy to spot
|
||||||
# Provide a copy of the output in case something goes wrong in a complex build
|
# Provide a copy of the output in case something goes wrong in a complex build
|
||||||
stage_notice "Executing build command: '$RUNTIME build ${BUILD_ARGS[@]} ${_args[@]}'"
|
stage_notice "Executing build command: '$RUNTIME build ${BUILD_ARGS[*]} ${_args[*]}'"
|
||||||
"$RUNTIME" build "${BUILD_ARGS[@]}" "${_args[@]}"
|
"$RUNTIME" build "${BUILD_ARGS[@]}" "${_args[@]}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,6 +379,8 @@ run_prepmod_cmd() {
|
||||||
local kind="$1"
|
local kind="$1"
|
||||||
shift
|
shift
|
||||||
dbg "Exporting variables '$_CMD_ENV'"
|
dbg "Exporting variables '$_CMD_ENV'"
|
||||||
|
# The indirect export is intentional here
|
||||||
|
# shellcheck disable=SC2163
|
||||||
export $_CMD_ENV
|
export $_CMD_ENV
|
||||||
stage_notice "Executing $kind-command: " "$@"
|
stage_notice "Executing $kind-command: " "$@"
|
||||||
bash -c "$@"
|
bash -c "$@"
|
||||||
|
@ -402,14 +405,19 @@ get_manifest_tags() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
dbg "Image listing json: $result_json"
|
dbg "Image listing json: $result_json"
|
||||||
if [[ -n "$result_json" ]]; then
|
if [[ -n "$result_json" ]]; then # N/B: value could be '[]'
|
||||||
# Rely on the caller to handle an empty list, ignore items missing a name key.
|
# Rely on the caller to handle an empty list, ignore items missing a name key.
|
||||||
if ! fqin_names=$(jq -r '.[]? | .names[]?'<<<"$result_json"); then
|
if ! fqin_names=$(jq -r '.[]? | .names[]?'<<<"$result_json"); then
|
||||||
die "Error obtaining image names from '$FQIN' manifest-list search result:
|
die "Error obtaining image names from '$FQIN' manifest-list search result:
|
||||||
$result_json"
|
$result_json"
|
||||||
fi
|
fi
|
||||||
grep "$FQIN"<<<"$fqin_names" | sort
|
|
||||||
|
dbg "Sorting fqin_names"
|
||||||
|
# Don't emit an empty newline when the list is empty
|
||||||
|
[[ -z "$fqin_names" ]] || \
|
||||||
|
sort <<<"$fqin_names"
|
||||||
fi
|
fi
|
||||||
|
dbg "get_manifest_tags() returning successfully"
|
||||||
}
|
}
|
||||||
|
|
||||||
push_images() {
|
push_images() {
|
||||||
|
@ -420,10 +428,10 @@ push_images() {
|
||||||
# It's possible that --modcmd=* removed all images, make sure
|
# It's possible that --modcmd=* removed all images, make sure
|
||||||
# this is known to the caller.
|
# this is known to the caller.
|
||||||
if ! fqin_list=$(get_manifest_tags); then
|
if ! fqin_list=$(get_manifest_tags); then
|
||||||
die "Error retrieving set of manifest-list tags to push for '$FQIN'"
|
die "Retrieving set of manifest-list tags to push for '$FQIN'"
|
||||||
fi
|
fi
|
||||||
if [[ -z "$fqin_list" ]]; then
|
if [[ -z "$fqin_list" ]]; then
|
||||||
die "No FQIN(s) to be pushed."
|
warn "No FQIN(s) to be pushed."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ((PUSH)); then
|
if ((PUSH)); then
|
||||||
|
|
|
@ -4,22 +4,16 @@
|
||||||
|
|
||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
|
# shellcheck disable=SC2154
|
||||||
if [[ "$CIRRUS_CI" == "true" ]]; then
|
if [[ "$CIRRUS_CI" == "true" ]]; then
|
||||||
# Cirrus-CI is setup (see .cirrus.yml) to run tests on CentOS
|
# Cirrus-CI is setup (see .cirrus.yml) to run tests on CentOS
|
||||||
# for simplicity, but it has no native qemu-user-static. For
|
# for simplicity, but it has no native qemu-user-static. For
|
||||||
# the benefit of CI testing, cheat and use whatever random
|
# the benefit of CI testing, cheat and use whatever random
|
||||||
# emulators are included in the container image.
|
# emulators are included in the container image.
|
||||||
|
|
||||||
# Workaround silly stupid hub rate-limiting
|
|
||||||
cat >> /etc/containers/registries.conf << EOF
|
|
||||||
[[registry]]
|
|
||||||
prefix="docker.io/library"
|
|
||||||
location="mirror.gcr.io"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# N/B: THIS IS NOT SAFE FOR PRODUCTION USE!!!!!
|
# N/B: THIS IS NOT SAFE FOR PRODUCTION USE!!!!!
|
||||||
podman run --rm --privileged \
|
podman run --rm --privileged \
|
||||||
docker.io/multiarch/qemu-user-static:latest \
|
mirror.gcr.io/multiarch/qemu-user-static:latest \
|
||||||
--reset -p yes
|
--reset -p yes
|
||||||
elif [[ -x "/usr/bin/qemu-aarch64-static" ]]; then
|
elif [[ -x "/usr/bin/qemu-aarch64-static" ]]; then
|
||||||
# TODO: Better way to determine if kernel already setup?
|
# TODO: Better way to determine if kernel already setup?
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
# Any/all other usage is virtually guaranteed to fail and/or cause
|
# Any/all other usage is virtually guaranteed to fail and/or cause
|
||||||
# harm to the system.
|
# harm to the system.
|
||||||
|
|
||||||
for varname in RUNTIME TEST_FQIN BUILDAH_USERNAME BUILDAH_PASSWORD; do
|
for varname in RUNTIME SUBJ_FILEPATH TEST_CONTEXT TEST_SOURCE_DIRPATH TEST_FQIN BUILDAH_USERNAME BUILDAH_PASSWORD; do
|
||||||
value=${!varname}
|
value=${!varname}
|
||||||
if [[ -z "$value" ]]; then
|
if [[ -z "$value" ]]; then
|
||||||
echo "ERROR: Required \$$varname variable is unset/empty."
|
echo "ERROR: Required \$$varname variable is unset/empty."
|
||||||
|
@ -13,6 +13,8 @@ for varname in RUNTIME TEST_FQIN BUILDAH_USERNAME BUILDAH_PASSWORD; do
|
||||||
done
|
done
|
||||||
unset value
|
unset value
|
||||||
|
|
||||||
|
# RUNTIME is defined by caller
|
||||||
|
# shellcheck disable=SC2154
|
||||||
$RUNTIME --version
|
$RUNTIME --version
|
||||||
test_cmd "Confirm $(basename $RUNTIME) is available" \
|
test_cmd "Confirm $(basename $RUNTIME) is available" \
|
||||||
0 "buildah version .+" \
|
0 "buildah version .+" \
|
||||||
|
@ -24,6 +26,8 @@ test_cmd "Confirm skopeo is available" \
|
||||||
skopeo --version
|
skopeo --version
|
||||||
|
|
||||||
PREPCMD='echo "SpecialErrorMessage:$REGSERVER" >> /dev/stderr && exit 42'
|
PREPCMD='echo "SpecialErrorMessage:$REGSERVER" >> /dev/stderr && exit 42'
|
||||||
|
# SUBJ_FILEPATH and TEST_CONTEXT are defined by caller
|
||||||
|
# shellcheck disable=SC2154
|
||||||
test_cmd "Confirm error output and exit(42) from --prepcmd" \
|
test_cmd "Confirm error output and exit(42) from --prepcmd" \
|
||||||
42 "SpecialErrorMessage:localhost" \
|
42 "SpecialErrorMessage:localhost" \
|
||||||
bash -c "$SUBJ_FILEPATH --nopush localhost/foo/bar $TEST_CONTEXT --prepcmd='$PREPCMD' 2>&1"
|
bash -c "$SUBJ_FILEPATH --nopush localhost/foo/bar $TEST_CONTEXT --prepcmd='$PREPCMD' 2>&1"
|
||||||
|
@ -53,7 +57,7 @@ test_cmd "Confirm manifest-list can be removed by name" \
|
||||||
$RUNTIME manifest rm containers-storage:localhost/foo/bar:latest
|
$RUNTIME manifest rm containers-storage:localhost/foo/bar:latest
|
||||||
|
|
||||||
test_cmd "Verify expected partial failure when passing bogus architectures" \
|
test_cmd "Verify expected partial failure when passing bogus architectures" \
|
||||||
125 "error creating build.+architecture staple" \
|
125 "no image found in image index for architecture" \
|
||||||
bash -c "A_DEBUG=1 $SUBJ_FILEPATH --arches=correct,horse,battery,staple localhost/foo/bar --nopush $TEST_CONTEXT 2>&1"
|
bash -c "A_DEBUG=1 $SUBJ_FILEPATH --arches=correct,horse,battery,staple localhost/foo/bar --nopush $TEST_CONTEXT 2>&1"
|
||||||
|
|
||||||
MODCMD='$RUNTIME tag $FQIN:latest $FQIN:9.8.7-testing'
|
MODCMD='$RUNTIME tag $FQIN:latest $FQIN:9.8.7-testing'
|
||||||
|
@ -86,15 +90,12 @@ test_cmd "Verify tagged manifest image digest matches the same in latest" \
|
||||||
MODCMD='
|
MODCMD='
|
||||||
set -x;
|
set -x;
|
||||||
$RUNTIME images && \
|
$RUNTIME images && \
|
||||||
$RUNTIME manifest rm containers-storage:$FQIN:latest && \
|
$RUNTIME manifest rm $FQIN:latest && \
|
||||||
$RUNTIME manifest rm containers-storage:$FQIN:9.8.7-testing && \
|
$RUNTIME manifest rm $FQIN:9.8.7-testing && \
|
||||||
echo "AllGone";
|
echo "AllGone";
|
||||||
'
|
'
|
||||||
# TODO: Test fails due to: https://github.com/containers/buildah/issues/3490
|
test_cmd "Verify --modcmd can execute command string that removes all tags" \
|
||||||
# for now pretend it should exit(125) which will be caught when bug is fixed
|
0 "AllGone.*No FQIN.+to be pushed" \
|
||||||
# - causing it to exit(0) as it should
|
|
||||||
test_cmd "Verify --modcmd can execute a long string with substitutions" \
|
|
||||||
125 "AllGone" \
|
|
||||||
bash -c "A_DEBUG=1 $SUBJ_FILEPATH --modcmd='$MODCMD' localhost/foo/bar --nopush $TEST_CONTEXT 2>&1"
|
bash -c "A_DEBUG=1 $SUBJ_FILEPATH --modcmd='$MODCMD' localhost/foo/bar --nopush $TEST_CONTEXT 2>&1"
|
||||||
|
|
||||||
test_cmd "Verify previous --modcmd removed the 'latest' tagged image" \
|
test_cmd "Verify previous --modcmd removed the 'latest' tagged image" \
|
||||||
|
@ -109,6 +110,8 @@ FAKE_VERSION=$RANDOM
|
||||||
MODCMD="set -ex;
|
MODCMD="set -ex;
|
||||||
\$RUNTIME tag \$FQIN:latest \$FQIN:$FAKE_VERSION;
|
\$RUNTIME tag \$FQIN:latest \$FQIN:$FAKE_VERSION;
|
||||||
\$RUNTIME manifest rm \$FQIN:latest;"
|
\$RUNTIME manifest rm \$FQIN:latest;"
|
||||||
|
# TEST_FQIN and TEST_SOURCE_DIRPATH defined by caller
|
||||||
|
# shellcheck disable=SC2154
|
||||||
test_cmd "Verify e2e workflow w/ additional build-args" \
|
test_cmd "Verify e2e workflow w/ additional build-args" \
|
||||||
0 "Pushing $TEST_FQIN:$FAKE_VERSION" \
|
0 "Pushing $TEST_FQIN:$FAKE_VERSION" \
|
||||||
bash -c "env A_DEBUG=1 $SUBJ_FILEPATH \
|
bash -c "env A_DEBUG=1 $SUBJ_FILEPATH \
|
||||||
|
@ -121,7 +124,7 @@ test_cmd "Verify e2e workflow w/ additional build-args" \
|
||||||
2>&1"
|
2>&1"
|
||||||
|
|
||||||
test_cmd "Verify latest tagged image was not pushed" \
|
test_cmd "Verify latest tagged image was not pushed" \
|
||||||
1 "(Tag latest was deleted or has expired.)|(manifest unknown: manifest unknown)" \
|
2 'reading manifest latest in quay\.io/buildah/do_not_use: manifest unknown' \
|
||||||
skopeo inspect docker://$TEST_FQIN:latest
|
skopeo inspect docker://$TEST_FQIN:latest
|
||||||
|
|
||||||
test_cmd "Verify architectures can be obtained from manifest list" \
|
test_cmd "Verify architectures can be obtained from manifest list" \
|
||||||
|
@ -132,7 +135,7 @@ test_cmd "Verify architectures can be obtained from manifest list" \
|
||||||
for arch in amd64 s390x arm64 ppc64le; do
|
for arch in amd64 s390x arm64 ppc64le; do
|
||||||
test_cmd "Verify $arch architecture present in $TEST_FQIN:$FAKE_VERSION" \
|
test_cmd "Verify $arch architecture present in $TEST_FQIN:$FAKE_VERSION" \
|
||||||
0 "" \
|
0 "" \
|
||||||
fgrep -qx "$arch" $TEST_TEMP/maniarches
|
grep -Fqx "$arch" $TEST_TEMP/maniarches
|
||||||
done
|
done
|
||||||
|
|
||||||
test_cmd "Verify pushed image can be removed" \
|
test_cmd "Verify pushed image can be removed" \
|
||||||
|
|
|
@ -0,0 +1,27 @@
|
||||||
|
# Podman First-Time Contributor Certificate Generator
|
||||||
|
|
||||||
|
This directory contains a simple web-based certificate generator to celebrate first-time contributors to the Podman project.
|
||||||
|
|
||||||
|
## Files
|
||||||
|
|
||||||
|
- **`certificate_generator.html`** - Interactive web interface for creating certificates
|
||||||
|
- **`certificate_template.html`** - The certificate template used for generation
|
||||||
|
- **`first_pr.png`** - Podman logo/branding image used in certificates
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
1. Open `certificate_generator.html` in a web browser
|
||||||
|
2. Fill in the contributor's details:
|
||||||
|
- Name
|
||||||
|
- Pull Request number
|
||||||
|
- Date (defaults to current date)
|
||||||
|
3. Preview the certificate in real-time
|
||||||
|
4. Click "Download Certificate" to save as HTML
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
|
||||||
|
These certificates are designed to recognize and celebrate community members who make their first contribution to the Podman project. The certificates feature Podman branding and can be customized for each contributor.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
Feel free to improve the design, add features, or suggest enhancements to make the certificate generator even better for recognizing our amazing contributors!
|
|
@ -0,0 +1,277 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Podman Certificate Generator</title>
|
||||||
|
<style>
|
||||||
|
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap');
|
||||||
|
@import url('https://fonts.googleapis.com/css2?family=Merriweather:wght@400;700;900&display=swap');
|
||||||
|
|
||||||
|
body {
|
||||||
|
font-family: 'Inter', sans-serif;
|
||||||
|
background-color: #f0f2f5;
|
||||||
|
margin: 0;
|
||||||
|
padding: 2rem;
|
||||||
|
}
|
||||||
|
.container {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: 380px 1fr;
|
||||||
|
gap: 2rem;
|
||||||
|
max-width: 1600px;
|
||||||
|
margin: auto;
|
||||||
|
}
|
||||||
|
.form-panel {
|
||||||
|
background-color: white;
|
||||||
|
padding: 2rem;
|
||||||
|
border-radius: 8px;
|
||||||
|
box-shadow: 0 4px 12px rgba(0,0,0,0.1);
|
||||||
|
height: fit-content;
|
||||||
|
position: sticky;
|
||||||
|
top: 2rem;
|
||||||
|
}
|
||||||
|
.form-panel h2 {
|
||||||
|
margin-top: 0;
|
||||||
|
color: #333;
|
||||||
|
font-family: 'Merriweather', serif;
|
||||||
|
}
|
||||||
|
.form-group {
|
||||||
|
margin-bottom: 1.5rem;
|
||||||
|
}
|
||||||
|
.form-group label {
|
||||||
|
display: block;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
font-weight: 600;
|
||||||
|
color: #555;
|
||||||
|
}
|
||||||
|
.form-group input {
|
||||||
|
width: 100%;
|
||||||
|
padding: 0.75rem;
|
||||||
|
border: 1px solid #ccc;
|
||||||
|
border-radius: 4px;
|
||||||
|
box-sizing: border-box;
|
||||||
|
font-size: 1rem;
|
||||||
|
}
|
||||||
|
.action-buttons {
|
||||||
|
display: flex;
|
||||||
|
gap: 1rem;
|
||||||
|
margin-top: 1.5rem;
|
||||||
|
}
|
||||||
|
.action-buttons button {
|
||||||
|
flex-grow: 1;
|
||||||
|
padding: 0.75rem;
|
||||||
|
border: none;
|
||||||
|
border-radius: 4px;
|
||||||
|
font-size: 1rem;
|
||||||
|
font-weight: 600;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: background-color 0.3s;
|
||||||
|
}
|
||||||
|
#downloadBtn {
|
||||||
|
background-color: #28a745;
|
||||||
|
color: white;
|
||||||
|
}
|
||||||
|
#downloadBtn:hover {
|
||||||
|
background-color: #218838;
|
||||||
|
}
|
||||||
|
.preview-panel {
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: flex-start;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Certificate Styles (copied from template and scaled) */
|
||||||
|
.certificate {
|
||||||
|
width: 800px;
|
||||||
|
height: 1100px;
|
||||||
|
background: #fdfaf0;
|
||||||
|
border: 2px solid #333;
|
||||||
|
position: relative;
|
||||||
|
box-shadow: 0 10px 30px rgba(0,0,0,0.2);
|
||||||
|
padding: 50px;
|
||||||
|
box-sizing: border-box;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
font-family: 'Merriweather', serif;
|
||||||
|
transform: scale(0.8);
|
||||||
|
transform-origin: top center;
|
||||||
|
}
|
||||||
|
.party-popper { position: absolute; font-size: 40px; }
|
||||||
|
.top-left { top: 40px; left: 40px; }
|
||||||
|
.top-right { top: 40px; right: 40px; }
|
||||||
|
.main-title { font-size: 48px; font-weight: 900; color: #333; text-align: center; margin-top: 60px; line-height: 1.2; text-transform: uppercase; }
|
||||||
|
.subtitle { font-size: 24px; font-weight: 400; color: #333; text-align: center; margin-top: 30px; text-transform: uppercase; letter-spacing: 2px; }
|
||||||
|
.contributor-name { font-size: 56px; font-weight: 700; color: #333; text-align: center; margin: 15px 0 50px; }
|
||||||
|
.mascot-image { width: 450px; height: 450px; background-image: url('first_pr.png'); background-size: contain; background-repeat: no-repeat; background-position: center; margin-top: 20px; -webkit-print-color-adjust: exact; print-color-adjust: exact; }
|
||||||
|
.description { font-size: 22px; color: #333; line-height: 1.6; text-align: center; margin-top: 40px; }
|
||||||
|
.description strong { font-weight: 700; }
|
||||||
|
.footer { width: 100%; margin-top: auto; padding-top: 30px; border-top: 1px solid #ccc; display: flex; justify-content: space-between; align-items: flex-end; font-size: 16px; color: #333; }
|
||||||
|
.pr-info { text-align: left; }
|
||||||
|
.signature { text-align: right; font-style: italic; }
|
||||||
|
|
||||||
|
@media print {
|
||||||
|
body {
|
||||||
|
background: #fff;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
.form-panel, .action-buttons {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
.container {
|
||||||
|
display: block;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
.preview-panel {
|
||||||
|
padding: 0;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
.certificate {
|
||||||
|
transform: scale(1);
|
||||||
|
box-shadow: none;
|
||||||
|
width: 100%;
|
||||||
|
height: 100vh;
|
||||||
|
page-break-inside: avoid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<div class="form-panel">
|
||||||
|
<h2>Certificate Generator</h2>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="contributorName">Contributor Name</label>
|
||||||
|
<input type="text" id="contributorName" value="Mike McGrath">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="prNumber">PR Number</label>
|
||||||
|
<input type="text" id="prNumber" value="26393">
|
||||||
|
</div>
|
||||||
|
<div class="form-group">
|
||||||
|
<label for="mergeDate">Date</label>
|
||||||
|
<input type="text" id="mergeDate" value="June 13, 2025">
|
||||||
|
</div>
|
||||||
|
<div class="action-buttons">
|
||||||
|
<button id="downloadBtn">Download HTML</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="preview-panel">
|
||||||
|
<div id="certificatePreview">
|
||||||
|
<!-- Certificate HTML will be injected here by script -->
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
const nameInput = document.getElementById('contributorName');
|
||||||
|
const prNumberInput = document.getElementById('prNumber');
|
||||||
|
const dateInput = document.getElementById('mergeDate');
|
||||||
|
const preview = document.getElementById('certificatePreview');
|
||||||
|
|
||||||
|
function generateCertificateHTML(name, prNumber, date) {
|
||||||
|
const prLink = `https://github.com/containers/podman/pull/${prNumber}`;
|
||||||
|
// This is the full, self-contained HTML for the certificate
|
||||||
|
return `
|
||||||
|
<div class="certificate">
|
||||||
|
<div class="party-popper top-left">🎉</div>
|
||||||
|
<div class="party-popper top-right">🎉</div>
|
||||||
|
<div class="main-title">Certificate of<br>Contribution</div>
|
||||||
|
<div class="subtitle">Awarded To</div>
|
||||||
|
<div class="contributor-name">${name}</div>
|
||||||
|
<div class="mascot-image"></div>
|
||||||
|
<div class="description">
|
||||||
|
For successfully submitting and merging their <strong>First Pull Request</strong> to the <strong>Podman project</strong>.<br>
|
||||||
|
Your contribution helps make open source better—one PR at a time!
|
||||||
|
</div>
|
||||||
|
<div class="footer">
|
||||||
|
<div class="pr-info">
|
||||||
|
<div>🔧 Merged PR: <a href="${prLink}" target="_blank">${prLink}</a></div>
|
||||||
|
<div style="margin-top: 5px;">${date}</div>
|
||||||
|
</div>
|
||||||
|
<div class="signature">
|
||||||
|
Keep hacking, keep contributing!<br>
|
||||||
|
– The Podman Community
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function updatePreview() {
|
||||||
|
const name = nameInput.value || '[CONTRIBUTOR_NAME]';
|
||||||
|
const prNumber = prNumberInput.value || '[PR_NUMBER]';
|
||||||
|
const date = dateInput.value || '[DATE]';
|
||||||
|
preview.innerHTML = generateCertificateHTML(name, prNumber, date);
|
||||||
|
}
|
||||||
|
|
||||||
|
document.getElementById('downloadBtn').addEventListener('click', () => {
|
||||||
|
const name = nameInput.value || 'contributor';
|
||||||
|
const prNumber = prNumberInput.value || '00000';
|
||||||
|
const date = dateInput.value || 'Date';
|
||||||
|
|
||||||
|
const certificateHTML = generateCertificateHTML(name, prNumber, date);
|
||||||
|
const fullPageHTML = `
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>Certificate for ${name}</title>
|
||||||
|
<style>
|
||||||
|
/* All the CSS from the generator page */
|
||||||
|
@import url('https://fonts.googleapis.com/css2?family=Merriweather:wght@400;700;900&display=swap');
|
||||||
|
body { margin: 20px; font-family: 'Merriweather', serif; background: #e0e0e0; }
|
||||||
|
.certificate {
|
||||||
|
transform: scale(1);
|
||||||
|
box-shadow: none;
|
||||||
|
margin: auto;
|
||||||
|
}
|
||||||
|
/* Paste all certificate-related styles here */
|
||||||
|
.certificate { width: 800px; height: 1100px; background: #fdfaf0; border: 2px solid #333; position: relative; padding: 50px; box-sizing: border-box; display: flex; flex-direction: column; align-items: center; }
|
||||||
|
.party-popper { position: absolute; font-size: 40px; }
|
||||||
|
.top-left { top: 40px; left: 40px; }
|
||||||
|
.top-right { top: 40px; right: 40px; }
|
||||||
|
.main-title { font-size: 48px; font-weight: 900; color: #333; text-align: center; margin-top: 60px; line-height: 1.2; text-transform: uppercase; }
|
||||||
|
.subtitle { font-size: 24px; font-weight: 400; color: #333; text-align: center; margin-top: 30px; text-transform: uppercase; letter-spacing: 2px; }
|
||||||
|
.contributor-name { font-size: 56px; font-weight: 700; color: #333; text-align: center; margin: 15px 0 50px; }
|
||||||
|
.mascot-image { width: 450px; height: 450px; background-image: url('first_pr.png'); background-size: contain; background-repeat: no-repeat; background-position: center; margin-top: 20px; -webkit-print-color-adjust: exact; print-color-adjust: exact; }
|
||||||
|
.description { font-size: 22px; color: #333; line-height: 1.6; text-align: center; margin-top: 40px; }
|
||||||
|
.description strong { font-weight: 700; }
|
||||||
|
.footer { width: 100%; margin-top: auto; padding-top: 30px; border-top: 1px solid #ccc; display: flex; justify-content: space-between; align-items: flex-end; font-size: 16px; color: #333; }
|
||||||
|
.pr-info { text-align: left; }
|
||||||
|
.signature { text-align: right; font-style: italic; }
|
||||||
|
|
||||||
|
@media print {
|
||||||
|
@page { size: A4 portrait; margin: 0; }
|
||||||
|
body, html { width: 100%; height: 100%; margin: 0; padding: 0; }
|
||||||
|
.certificate { width: 100%; height: 100%; box-shadow: none; transform: scale(1); }
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>${certificateHTML}</body>
|
||||||
|
</html>
|
||||||
|
`;
|
||||||
|
|
||||||
|
const blob = new Blob([fullPageHTML], { type: 'text/html' });
|
||||||
|
const url = URL.createObjectURL(blob);
|
||||||
|
const a = document.createElement('a');
|
||||||
|
a.href = url;
|
||||||
|
a.download = `podman-contribution-certificate-${name.toLowerCase().replace(/\s+/g, '-')}.html`;
|
||||||
|
document.body.appendChild(a);
|
||||||
|
a.click();
|
||||||
|
document.body.removeChild(a);
|
||||||
|
URL.revokeObjectURL(url);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add event listeners to update preview on input change
|
||||||
|
[nameInput, prNumberInput, dateInput].forEach(input => {
|
||||||
|
input.addEventListener('input', updatePreview);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Initial preview generation
|
||||||
|
updatePreview();
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
|
@ -0,0 +1,175 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>Podman Certificate of Contribution</title>
|
||||||
|
<style>
|
||||||
|
@import url('https://fonts.googleapis.com/css2?family=Merriweather:wght@400;700;900&display=swap');
|
||||||
|
|
||||||
|
body {
|
||||||
|
margin: 0;
|
||||||
|
padding: 20px;
|
||||||
|
font-family: 'Merriweather', serif;
|
||||||
|
background: #e0e0e0;
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
align-items: center;
|
||||||
|
min-height: 100vh;
|
||||||
|
}
|
||||||
|
|
||||||
|
.certificate {
|
||||||
|
width: 800px;
|
||||||
|
height: 1100px;
|
||||||
|
background: #fdfaf0;
|
||||||
|
border: 2px solid #333;
|
||||||
|
position: relative;
|
||||||
|
box-shadow: 0 10px 30px rgba(0, 0, 0, 0.2);
|
||||||
|
padding: 50px;
|
||||||
|
box-sizing: border-box;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.party-popper {
|
||||||
|
position: absolute;
|
||||||
|
font-size: 40px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.top-left {
|
||||||
|
top: 40px;
|
||||||
|
left: 40px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.top-right {
|
||||||
|
top: 40px;
|
||||||
|
right: 40px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.main-title {
|
||||||
|
font-size: 48px;
|
||||||
|
font-weight: 900;
|
||||||
|
color: #333;
|
||||||
|
text-align: center;
|
||||||
|
margin-top: 60px;
|
||||||
|
line-height: 1.2;
|
||||||
|
text-transform: uppercase;
|
||||||
|
}
|
||||||
|
|
||||||
|
.subtitle {
|
||||||
|
font-size: 24px;
|
||||||
|
font-weight: 400;
|
||||||
|
color: #333;
|
||||||
|
text-align: center;
|
||||||
|
margin-top: 30px;
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 2px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.contributor-name {
|
||||||
|
font-size: 56px;
|
||||||
|
font-weight: 700;
|
||||||
|
color: #333;
|
||||||
|
text-align: center;
|
||||||
|
margin: 15px 0 50px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.mascot-image {
|
||||||
|
width: 450px;
|
||||||
|
height: 450px;
|
||||||
|
background-image: url('first_pr.png');
|
||||||
|
background-size: contain;
|
||||||
|
background-repeat: no-repeat;
|
||||||
|
background-position: center;
|
||||||
|
margin-top: 20px;
|
||||||
|
-webkit-print-color-adjust: exact;
|
||||||
|
print-color-adjust: exact;
|
||||||
|
}
|
||||||
|
|
||||||
|
.description {
|
||||||
|
font-size: 22px;
|
||||||
|
color: #333;
|
||||||
|
line-height: 1.6;
|
||||||
|
text-align: center;
|
||||||
|
margin-top: 40px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.description strong {
|
||||||
|
font-weight: 700;
|
||||||
|
}
|
||||||
|
|
||||||
|
.footer {
|
||||||
|
width: 100%;
|
||||||
|
margin-top: auto;
|
||||||
|
padding-top: 30px;
|
||||||
|
border-top: 1px solid #ccc;
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
align-items: flex-end;
|
||||||
|
font-size: 16px;
|
||||||
|
color: #333;
|
||||||
|
}
|
||||||
|
|
||||||
|
.pr-info {
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
.signature {
|
||||||
|
text-align: right;
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media print {
|
||||||
|
@page {
|
||||||
|
size: A4 portrait;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
body, html {
|
||||||
|
width: 100%;
|
||||||
|
height: 100%;
|
||||||
|
margin: 0;
|
||||||
|
padding: 0;
|
||||||
|
background: #fdfaf0;
|
||||||
|
}
|
||||||
|
.certificate {
|
||||||
|
width: 100%;
|
||||||
|
height: 100vh;
|
||||||
|
box-shadow: none;
|
||||||
|
transform: scale(1);
|
||||||
|
border-radius: 0;
|
||||||
|
page-break-inside: avoid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="certificate">
|
||||||
|
<div class="party-popper top-left">🎉</div>
|
||||||
|
<div class="party-popper top-right">🎉</div>
|
||||||
|
|
||||||
|
<div class="main-title">Certificate of<br>Contribution</div>
|
||||||
|
<div class="subtitle">Awarded To</div>
|
||||||
|
|
||||||
|
<div class="contributor-name">[CONTRIBUTOR_NAME]</div>
|
||||||
|
|
||||||
|
<div class="mascot-image"></div>
|
||||||
|
|
||||||
|
<div class="description">
|
||||||
|
For successfully submitting and merging their <strong>First Pull Request</strong> to the <strong>Podman project</strong>.<br>
|
||||||
|
Your contribution helps make open source better—one PR at a time!
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="footer">
|
||||||
|
<div class="pr-info">
|
||||||
|
<div>🔧 Merged PR: [PR_LINK]</div>
|
||||||
|
<div style="margin-top: 5px;">[DATE]</div>
|
||||||
|
</div>
|
||||||
|
<div class="signature">
|
||||||
|
Keep hacking, keep contributing!<br>
|
||||||
|
– The Podman Community
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
Binary file not shown.
After Width: | Height: | Size: 578 KiB |
Binary file not shown.
After Width: | Height: | Size: 138 KiB |
Binary file not shown.
After Width: | Height: | Size: 138 KiB |
|
@ -6,7 +6,7 @@ RUN microdnf update -y && \
|
||||||
perl-Test perl-Test-Simple perl-Test-Differences \
|
perl-Test perl-Test-Simple perl-Test-Differences \
|
||||||
perl-YAML-LibYAML perl-FindBin \
|
perl-YAML-LibYAML perl-FindBin \
|
||||||
python3 python3-virtualenv python3-pip gcc python3-devel \
|
python3 python3-virtualenv python3-pip gcc python3-devel \
|
||||||
python3-flake8 python3-pep8-naming python3-flake8-docstrings python3-flake8-import-order python3-flake8-polyfill python3-mccabe python3-pep8-naming && \
|
python3-flake8 python3-pep8-naming python3-flake8-import-order python3-flake8-polyfill python3-mccabe python3-pep8-naming && \
|
||||||
microdnf clean all && \
|
microdnf clean all && \
|
||||||
rm -rf /var/cache/dnf
|
rm -rf /var/cache/dnf
|
||||||
# Required by perl
|
# Required by perl
|
||||||
|
|
|
@ -16,4 +16,4 @@ PyYAML~=6.0
|
||||||
aiohttp[speedups]~=3.8
|
aiohttp[speedups]~=3.8
|
||||||
gql[requests]~=3.3
|
gql[requests]~=3.3
|
||||||
requests>=2,<3
|
requests>=2,<3
|
||||||
urllib3<2.0.0
|
urllib3<2.5.1
|
||||||
|
|
|
@ -197,11 +197,12 @@ sub write_img {
|
||||||
|
|
||||||
# Annotate: add signature line at lower left
|
# Annotate: add signature line at lower left
|
||||||
# FIXME: include git repo info?
|
# FIXME: include git repo info?
|
||||||
if (grep { -x "$_/convert" } split(":", $ENV{PATH})) {
|
if (grep { -x "$_/magick" } split(":", $ENV{PATH})) {
|
||||||
unlink $img_out_tmp;
|
unlink $img_out_tmp;
|
||||||
my $signature = strftime("Generated %Y-%m-%dT%H:%M:%S%z by $ME v$VERSION", localtime);
|
my $signature = strftime("Generated %Y-%m-%dT%H:%M:%S%z by $ME v$VERSION", localtime);
|
||||||
my @cmd = (
|
my @cmd = (
|
||||||
"convert",
|
"magick",
|
||||||
|
$img_out,
|
||||||
'-family' => 'Courier',
|
'-family' => 'Courier',
|
||||||
'-pointsize' => '12',
|
'-pointsize' => '12',
|
||||||
# '-style' => 'Normal', # Argh! This gives us Bold!?
|
# '-style' => 'Normal', # Argh! This gives us Bold!?
|
||||||
|
@ -209,7 +210,7 @@ sub write_img {
|
||||||
'-fill' => '#000',
|
'-fill' => '#000',
|
||||||
'-gravity' => 'SouthWest',
|
'-gravity' => 'SouthWest',
|
||||||
"-annotate", "+5+5", $signature,
|
"-annotate", "+5+5", $signature,
|
||||||
"$img_out" => "$img_out_tmp"
|
$img_out_tmp
|
||||||
);
|
);
|
||||||
if (system(@cmd) == 0) {
|
if (system(@cmd) == 0) {
|
||||||
rename $img_out_tmp => $img_out;
|
rename $img_out_tmp => $img_out;
|
||||||
|
@ -424,18 +425,44 @@ sub _size {
|
||||||
}
|
}
|
||||||
|
|
||||||
##############
|
##############
|
||||||
# _by_size # sort helper, for putting big nodes at bottom
|
# _by_type # sort helper, for clustering int/sys/machine tests
|
||||||
##############
|
##############
|
||||||
sub _by_size {
|
sub _by_type {
|
||||||
_size($a) <=> _size($b) ||
|
my $ax = $a->{name};
|
||||||
$a->{name} cmp $b->{name};
|
my $bx = $b->{name};
|
||||||
|
|
||||||
|
# The big test types, in the order we want to show them
|
||||||
|
my @types = qw(integration system bud machine);
|
||||||
|
my %type_order = map { $types[$_] => $_ } (0..$#types);
|
||||||
|
my $type_re = join('|', @types);
|
||||||
|
|
||||||
|
if ($ax =~ /($type_re)/) {
|
||||||
|
my $a_type = $1;
|
||||||
|
if ($bx =~ /($type_re)/) {
|
||||||
|
my $b_type = $1;
|
||||||
|
|
||||||
|
return $type_order{$a_type} <=> $type_order{$b_type}
|
||||||
|
|| $ax cmp $bx;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
# e.g., $b is "win installer", $a is in @types, $b < $a
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
elsif ($bx =~ /($type_re)/) {
|
||||||
|
# e.g., $a is "win installer", $b is in @types, $a < $b
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
# Neither a nor b is in @types
|
||||||
|
$ax cmp $bx;
|
||||||
}
|
}
|
||||||
|
|
||||||
sub depended_on_by {
|
sub depended_on_by {
|
||||||
my $self = shift;
|
my $self = shift;
|
||||||
|
|
||||||
if (my $d = $self->{_depended_on_by}) {
|
if (my $d = $self->{_depended_on_by}) {
|
||||||
my @d = sort _by_size map { $self->{_tasklist}->find($_) } @$d;
|
my @d = sort _by_type map { $self->{_tasklist}->find($_) } @$d;
|
||||||
return @d;
|
return @d;
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -755,12 +782,16 @@ sub _draw_boxes {
|
||||||
if (my $only_if = $task->{yml}{only_if}) {
|
if (my $only_if = $task->{yml}{only_if}) {
|
||||||
$shape = 'record';
|
$shape = 'record';
|
||||||
$label .= '|' if $label;
|
$label .= '|' if $label;
|
||||||
if ($only_if =~ /CI:DOCS.*CI:BUILD/) {
|
|
||||||
$label .= "[SKIP: CI:BUILD]\\l[SKIP: CI:DOCS]\\l";
|
# Collapse whitespace, and remove leading/trailing
|
||||||
}
|
$only_if =~ s/[\s\n]+/ /g;
|
||||||
elsif ($only_if =~ /CI:DOCS/) {
|
$only_if =~ s/^\s+|\s+$//g;
|
||||||
$label .= "[SKIP: CI:DOCS]\\l";
|
|
||||||
|
# 2024-06-18 Paul CI skips
|
||||||
|
if ($only_if =~ m{\$CIRRUS_PR\s+==\s+''\s+.*\$CIRRUS_CHANGE_TITLE.*CI:ALL.*changesInclude.*test}) {
|
||||||
|
$label .= "[SKIP if not needed]";
|
||||||
}
|
}
|
||||||
|
|
||||||
# 2020-10 used in automation_images repo
|
# 2020-10 used in automation_images repo
|
||||||
elsif ($only_if eq q{$CIRRUS_PR != ''}) {
|
elsif ($only_if eq q{$CIRRUS_PR != ''}) {
|
||||||
$label .= "[only if PR]";
|
$label .= "[only if PR]";
|
||||||
|
@ -803,7 +834,28 @@ sub _draw_boxes {
|
||||||
elsif ($only_if =~ /CIRRUS_BRANCH\s+==\s+'main'\s+&&\s+\$CIRRUS_CRON\s+==\s+''/) {
|
elsif ($only_if =~ /CIRRUS_BRANCH\s+==\s+'main'\s+&&\s+\$CIRRUS_CRON\s+==\s+''/) {
|
||||||
$label .= "[only on merge]";
|
$label .= "[only on merge]";
|
||||||
}
|
}
|
||||||
|
elsif ($only_if =~ /CIRRUS_BRANCH\s+!=~\s+'v.*-rhel'\s+&&\s+\$CIRRUS_BASE_BRANCH\s+!=~\s+'v.*-rhel'/) {
|
||||||
|
$label .= "[only if no RHEL release]";
|
||||||
|
}
|
||||||
|
elsif ($only_if =~ /CIRRUS_CHANGE_TITLE.*CI:BUILD.*CIRRUS_CHANGE_TITLE.*CI:MACHINE/s) {
|
||||||
|
$label .= "[SKIP: CI:BUILD or CI:MACHINE]";
|
||||||
|
}
|
||||||
|
elsif ($only_if =~ /CIRRUS_CHANGE_TITLE\s+!=.*CI:MACHINE.*CIRRUS_BRANCH.*main.*CIRRUS_BASE_BRANCH.*main.*\)/s) {
|
||||||
|
$label .= "[only if: main]";
|
||||||
|
}
|
||||||
|
|
||||||
|
# automation_images
|
||||||
|
elsif ($only_if eq q{$CIRRUS_CRON == '' && $CIRRUS_BRANCH == $CIRRUS_DEFAULT_BRANCH}) {
|
||||||
|
$label .= "[only if DEFAULT_BRANCH and not cron]";
|
||||||
|
}
|
||||||
|
elsif ($only_if eq q{$CIRRUS_PR != '' && $CIRRUS_PR_LABELS !=~ ".*no_build-push.*"}) {
|
||||||
|
$label .= "[only if PR, but not no_build-push]";
|
||||||
|
}
|
||||||
|
elsif ($only_if eq q{$CIRRUS_CRON == 'lifecycle'}) {
|
||||||
|
$label .= "[only on cron=lifecycle]";
|
||||||
|
}
|
||||||
else {
|
else {
|
||||||
|
warn "$ME: unexpected only_if: $only_if\n";
|
||||||
$label .= "[only if: $only_if]";
|
$label .= "[only if: $only_if]";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -818,10 +870,27 @@ sub _draw_boxes {
|
||||||
if (my $skip = $task->{yml}{skip}) {
|
if (my $skip = $task->{yml}{skip}) {
|
||||||
$shape = 'record';
|
$shape = 'record';
|
||||||
$label .= '|' if $label && $label !~ /SKIP/;
|
$label .= '|' if $label && $label !~ /SKIP/;
|
||||||
|
|
||||||
|
# Collapse whitespace, and remove leading/trailing
|
||||||
|
$skip =~ s/[\s\n]+/ /g;
|
||||||
|
$skip =~ s/^\s+|\s+$//g;
|
||||||
|
|
||||||
my @reasons;
|
my @reasons;
|
||||||
push @reasons, 'BRANCH','TAG' if $skip =~ /CIRRUS_PR.*CIRRUS_TAG/;
|
|
||||||
push @reasons, 'TAG' if $skip eq q{$CIRRUS_TAG != ''};
|
# automation_images
|
||||||
push @reasons, 'CI:DOCS' if $skip =~ /CI:DOCS/;
|
if ($skip eq q{$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*' || $CIRRUS_CHANGE_TITLE =~ '.*CI:TOOLING.*'}) {
|
||||||
|
push @reasons, "CI:DOCS or CI:TOOLING";
|
||||||
|
}
|
||||||
|
elsif ($skip eq q{$CIRRUS_CHANGE_TITLE =~ '.*CI:DOCS.*'}) {
|
||||||
|
push @reasons, "CI:DOCS";
|
||||||
|
}
|
||||||
|
elsif ($skip eq '$CI == $CI') {
|
||||||
|
push @reasons, "DISABLED MANUALLY";
|
||||||
|
}
|
||||||
|
elsif ($skip) {
|
||||||
|
warn "$ME: unexpected skip '$skip'\n";
|
||||||
|
}
|
||||||
|
|
||||||
if (@reasons) {
|
if (@reasons) {
|
||||||
$label .= join('', map { "[SKIP: $_]\\l" } @reasons);
|
$label .= join('', map { "[SKIP: $_]\\l" } @reasons);
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,14 +90,14 @@ end_task:
|
||||||
- "middle_2"
|
- "middle_2"
|
||||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||||
"real_name_of_initial" [shape=ellipse style=bold color=a fontcolor=a]
|
"real_name_of_initial" [shape=ellipse style=bold color=a fontcolor=a]
|
||||||
|
"real_name_of_initial" -> "end" [color=a]
|
||||||
|
"end" [shape=ellipse style=bold color=z fontcolor=z]
|
||||||
"real_name_of_initial" -> "middle_1" [color=a]
|
"real_name_of_initial" -> "middle_1" [color=a]
|
||||||
"middle_1" [shape=ellipse style=bold color=b fontcolor=b]
|
"middle_1" [shape=ellipse style=bold color=b fontcolor=b]
|
||||||
"middle_1" -> "end" [color=b]
|
"middle_1" -> "end" [color=b]
|
||||||
"end" [shape=ellipse style=bold color=z fontcolor=z]
|
|
||||||
"real_name_of_initial" -> "middle_2" [color=a]
|
"real_name_of_initial" -> "middle_2" [color=a]
|
||||||
"middle_2" [shape=ellipse style=bold color=c fontcolor=c]
|
"middle_2" [shape=ellipse style=bold color=c fontcolor=c]
|
||||||
"middle_2" -> "end" [color=c]
|
"middle_2" -> "end" [color=c]
|
||||||
"real_name_of_initial" -> "end" [color=a]
|
|
||||||
|
|
||||||
<<<<<<<<<<<<<<<<<< env interpolation 1
|
<<<<<<<<<<<<<<<<<< env interpolation 1
|
||||||
env:
|
env:
|
||||||
|
@ -510,10 +510,12 @@ success_task:
|
||||||
|
|
||||||
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
||||||
"automation" [shape=ellipse style=bold color=a fontcolor=a]
|
"automation" [shape=ellipse style=bold color=a fontcolor=a]
|
||||||
"automation" -> "success" [color=a]
|
|
||||||
"success" [shape=ellipse style=bold color="#000000" fillcolor="#00f000" style=filled fontcolor="#000000"]
|
|
||||||
"automation" -> "build" [color=a]
|
"automation" -> "build" [color=a]
|
||||||
"build" [shape=record style=bold color="#0000f0" fillcolor="#f0f0f0" style=filled fontcolor="#0000f0" label="build\l|- Build for fedora-32\l- Build for fedora-31\l- Build for ubuntu-20\l- Build for ubuntu-19\l"]
|
"build" [shape=record style=bold color="#0000f0" fillcolor="#f0f0f0" style=filled fontcolor="#0000f0" label="build\l|- Build for fedora-32\l- Build for fedora-31\l- Build for ubuntu-20\l- Build for ubuntu-19\l"]
|
||||||
|
"build" -> "alt_build" [color="#0000f0"]
|
||||||
|
"alt_build" [shape=record style=bold color="#0000f0" fillcolor="#f0f0f0" style=filled fontcolor="#0000f0" label="alt build\l|- Build Each Commit\l- Windows Cross\l- Build Without CGO\l- Build varlink API\l- Static build\l- Test build RPM\l"]
|
||||||
|
"alt_build" -> "success" [color="#0000f0"]
|
||||||
|
"success" [shape=ellipse style=bold color="#000000" fillcolor="#00f000" style=filled fontcolor="#000000"]
|
||||||
"build" -> "bindings" [color="#0000f0"]
|
"build" -> "bindings" [color="#0000f0"]
|
||||||
"bindings" [shape=ellipse style=bold color=b fontcolor=b]
|
"bindings" [shape=ellipse style=bold color=b fontcolor=b]
|
||||||
"bindings" -> "success" [color=b]
|
"bindings" -> "success" [color=b]
|
||||||
|
@ -526,25 +528,23 @@ success_task:
|
||||||
"build" -> "osx_cross" [color="#0000f0"]
|
"build" -> "osx_cross" [color="#0000f0"]
|
||||||
"osx_cross" [shape=ellipse style=bold color=e fontcolor=e]
|
"osx_cross" [shape=ellipse style=bold color=e fontcolor=e]
|
||||||
"osx_cross" -> "success" [color=e]
|
"osx_cross" -> "success" [color=e]
|
||||||
|
"build" -> "success" [color="#0000f0"]
|
||||||
"build" -> "swagger" [color="#0000f0"]
|
"build" -> "swagger" [color="#0000f0"]
|
||||||
"swagger" [shape=ellipse style=bold color=f fontcolor=f]
|
"swagger" [shape=ellipse style=bold color=f fontcolor=f]
|
||||||
"swagger" -> "success" [color=f]
|
"swagger" -> "success" [color=f]
|
||||||
|
"build" -> "unit_test" [color="#0000f0"]
|
||||||
|
"unit_test" [shape=record style=bold color="#000000" fillcolor="#f09090" style=filled fontcolor="#000000" label="unit test\l|- Unit tests on fedora-32\l- Unit tests on fedora-31\l- Unit tests on ubuntu-20\l- Unit tests on ubuntu-19\l"]
|
||||||
|
"unit_test" -> "success" [color="#f09090"]
|
||||||
"build" -> "validate" [color="#0000f0"]
|
"build" -> "validate" [color="#0000f0"]
|
||||||
"validate" [shape=record style=bold color="#00c000" fillcolor="#f0f0f0" style=filled fontcolor="#00c000" label="validate\l|= Validate fedora-32 Build\l"]
|
"validate" [shape=record style=bold color="#00c000" fillcolor="#f0f0f0" style=filled fontcolor="#00c000" label="validate\l|= Validate fedora-32 Build\l"]
|
||||||
"validate" -> "success" [color="#00c000"]
|
"validate" -> "success" [color="#00c000"]
|
||||||
"build" -> "vendor" [color="#0000f0"]
|
"build" -> "vendor" [color="#0000f0"]
|
||||||
"vendor" [shape=ellipse style=bold color=g fontcolor=g]
|
"vendor" [shape=ellipse style=bold color=g fontcolor=g]
|
||||||
"vendor" -> "success" [color=g]
|
"vendor" -> "success" [color=g]
|
||||||
"build" -> "unit_test" [color="#0000f0"]
|
"automation" -> "success" [color=a]
|
||||||
"unit_test" [shape=record style=bold color="#000000" fillcolor="#f09090" style=filled fontcolor="#000000" label="unit test\l|- Unit tests on fedora-32\l- Unit tests on fedora-31\l- Unit tests on ubuntu-20\l- Unit tests on ubuntu-19\l"]
|
|
||||||
"unit_test" -> "success" [color="#f09090"]
|
|
||||||
"build" -> "alt_build" [color="#0000f0"]
|
|
||||||
"alt_build" [shape=record style=bold color="#0000f0" fillcolor="#f0f0f0" style=filled fontcolor="#0000f0" label="alt build\l|- Build Each Commit\l- Windows Cross\l- Build Without CGO\l- Build varlink API\l- Static build\l- Test build RPM\l"]
|
|
||||||
"alt_build" -> "success" [color="#0000f0"]
|
|
||||||
"build" -> "success" [color="#0000f0"]
|
|
||||||
"ext_svc_check" [shape=ellipse style=bold color=h fontcolor=h]
|
"ext_svc_check" [shape=ellipse style=bold color=h fontcolor=h]
|
||||||
"ext_svc_check" -> "success" [color=h]
|
|
||||||
"ext_svc_check" -> "build" [color=h]
|
"ext_svc_check" -> "build" [color=h]
|
||||||
|
"ext_svc_check" -> "success" [color=h]
|
||||||
"smoke" [shape=ellipse style=bold color=i fontcolor=i]
|
"smoke" [shape=ellipse style=bold color=i fontcolor=i]
|
||||||
"smoke" -> "success" [color=i]
|
|
||||||
"smoke" -> "build" [color=i]
|
"smoke" -> "build" [color=i]
|
||||||
|
"smoke" -> "success" [color=i]
|
||||||
|
|
|
@ -82,11 +82,6 @@ test_cmd "timebomb() function requires at least one argument" \
|
||||||
1 "must be UTC-based and of the form YYYYMMDD" \
|
1 "must be UTC-based and of the form YYYYMMDD" \
|
||||||
timebomb
|
timebomb
|
||||||
|
|
||||||
TZ=UTC12 \
|
|
||||||
test_cmd "timebomb() function ignores TZ envar and forces UTC" \
|
|
||||||
0 "" \
|
|
||||||
timebomb $(TZ=UTC date -d "+11 hours" +%Y%m%d)
|
|
||||||
|
|
||||||
TZ=UTC12 \
|
TZ=UTC12 \
|
||||||
test_cmd "timebomb() function ignores TZ and compares < UTC-forced current date" \
|
test_cmd "timebomb() function ignores TZ and compares < UTC-forced current date" \
|
||||||
1 "TIME BOMB EXPIRED" \
|
1 "TIME BOMB EXPIRED" \
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# This file is intended for sourcing by the cirrus-ci_retrospective workflow
|
# This file is intended for sourcing by the cirrus-ci_retrospective workflow
|
||||||
# It should not be used under any other context.
|
# It should not be used under any other context.
|
||||||
|
|
||||||
source $(dirname $BASH_SOURCE[0])/github_common.sh || exit 1
|
source $(dirname ${BASH_SOURCE[0]})/github_common.sh || exit 1
|
||||||
|
|
||||||
# Cirrus-CI Build status codes that represent completion
|
# Cirrus-CI Build status codes that represent completion
|
||||||
COMPLETE_STATUS_RE='FAILED|COMPLETED|ABORTED|ERRORED'
|
COMPLETE_STATUS_RE='FAILED|COMPLETED|ABORTED|ERRORED'
|
||||||
|
@ -63,7 +63,7 @@ load_ccir() {
|
||||||
was_pr='true'
|
was_pr='true'
|
||||||
# Don't race vs another cirrus-ci build triggered _after_ GH action workflow started
|
# Don't race vs another cirrus-ci build triggered _after_ GH action workflow started
|
||||||
# since both may share the same check_suite. e.g. task re-run or manual-trigger
|
# since both may share the same check_suite. e.g. task re-run or manual-trigger
|
||||||
if echo "$bst" | egrep -q "$COMPLETE_STATUS_RE"; then
|
if echo "$bst" | grep -E -q "$COMPLETE_STATUS_RE"; then
|
||||||
if [[ -n "$tst" ]] && [[ "$tst" == "PAUSED" ]]; then
|
if [[ -n "$tst" ]] && [[ "$tst" == "PAUSED" ]]; then
|
||||||
dbg "Detected action status $tst"
|
dbg "Detected action status $tst"
|
||||||
do_intg='true'
|
do_intg='true'
|
||||||
|
|
|
@ -0,0 +1,200 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script is intended for use by humans to allocate a dedicated-host
|
||||||
|
# and create an instance on it for testing purposes. When executed,
|
||||||
|
# it will create a temporary clone of the repository with the necessary
|
||||||
|
# modifications to manipulate the test host. It's the user's responsibility
|
||||||
|
# to cleanup this directory after manually removing the instance (see below).
|
||||||
|
#
|
||||||
|
# **Note**: Due to Apple/Amazon restrictions on the removal of these
|
||||||
|
# resources, cleanup must be done manually. You will need to shutdown and
|
||||||
|
# terminate the instance, then wait 24-hours before releasing the
|
||||||
|
# dedicated-host. The hosts cost money w/n an instance is running.
|
||||||
|
#
|
||||||
|
# The script assumes:
|
||||||
|
#
|
||||||
|
# * The current $USER value reflects your actual identity such that
|
||||||
|
# the test instance may be labeled appropriatly for auditing.
|
||||||
|
# * The `aws` CLI tool is installed on $PATH.
|
||||||
|
# * Appropriate `~/.aws/credentials` credentials are setup.
|
||||||
|
# * The us-east-1 region is selected in `~/.aws/config`.
|
||||||
|
# * The $POOLTOKEN env. var. is set to value available from
|
||||||
|
# https://cirrus-ci.com/pool/1cf8c7f7d7db0b56aecd89759721d2e710778c523a8c91c7c3aaee5b15b48d05
|
||||||
|
# * The local ssh-agent is able to supply the appropriate private key (stored in BW).
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
# shellcheck source-path=SCRIPTDIR
|
||||||
|
source $(dirname ${BASH_SOURCE[0]})/pw_lib.sh
|
||||||
|
|
||||||
|
# Support debugging all mac_pw_pool scripts or only this one
|
||||||
|
I_DEBUG="${I_DEBUG:0}"
|
||||||
|
if ((I_DEBUG)); then
|
||||||
|
X_DEBUG=1
|
||||||
|
warn "Debugging enabled."
|
||||||
|
fi
|
||||||
|
|
||||||
|
dbg "\$USER=$USER"
|
||||||
|
|
||||||
|
[[ -n "$USER" ]] || \
|
||||||
|
die "The variable \$USER must not be empty"
|
||||||
|
|
||||||
|
[[ -n "$POOLTOKEN" ]] || \
|
||||||
|
die "The variable \$POOLTOKEN must not be empty"
|
||||||
|
|
||||||
|
INST_NAME="${USER}Testing"
|
||||||
|
LIB_DIRNAME=$(realpath --relative-to=$REPO_DIRPATH $LIB_DIRPATH)
|
||||||
|
# /tmp is usually a tmpfs, don't let an accidental reboot ruin
|
||||||
|
# access to a test DH/instance for a developer.
|
||||||
|
TMP_CLONE_DIRPATH="/var/tmp/${LIB_DIRNAME}_${INST_NAME}"
|
||||||
|
|
||||||
|
dbg "\$TMP_CLONE_DIRPATH=$TMP_CLONE_DIRPATH"
|
||||||
|
|
||||||
|
if [[ -d "$TMP_CLONE_DIRPATH" ]]; then
|
||||||
|
die "Found existing '$TMP_CLONE_DIRPATH', assuming in-use/relevant; If not, manual cleanup is required."
|
||||||
|
fi
|
||||||
|
|
||||||
|
msg "Creating temporary clone dir and transfering any uncommited files."
|
||||||
|
|
||||||
|
git clone --no-local --no-hardlinks --depth 1 --single-branch --no-tags --quiet "file://$REPO_DIRPATH" "$TMP_CLONE_DIRPATH"
|
||||||
|
declare -a uncommited_filepaths
|
||||||
|
readarray -t uncommited_filepaths <<<$(
|
||||||
|
pushd "$REPO_DIRPATH" &> /dev/null
|
||||||
|
# Obtaining uncommited relative staged filepaths
|
||||||
|
git diff --name-only HEAD
|
||||||
|
# Obtaining uncommited relative unstaged filepaths
|
||||||
|
git ls-files . --exclude-standard --others
|
||||||
|
popd &> /dev/null
|
||||||
|
)
|
||||||
|
|
||||||
|
dbg "Copying \$uncommited_filepaths[*]=${uncommited_filepaths[*]}"
|
||||||
|
|
||||||
|
for uncommited_file in "${uncommited_filepaths[@]}"; do
|
||||||
|
uncommited_file_src="$REPO_DIRPATH/$uncommited_file"
|
||||||
|
uncommited_file_dest="$TMP_CLONE_DIRPATH/$uncommited_file"
|
||||||
|
uncommited_file_dest_parent=$(dirname "$uncommited_file_dest")
|
||||||
|
#dbg "Working on uncommited file '$uncommited_file_src'"
|
||||||
|
if [[ -r "$uncommited_file_src" ]]; then
|
||||||
|
mkdir -p "$uncommited_file_dest_parent"
|
||||||
|
#dbg "$uncommited_file_src -> $uncommited_file_dest"
|
||||||
|
cp -a "$uncommited_file_src" "$uncommited_file_dest"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
declare -a modargs
|
||||||
|
# Format: <pw_lib.sh var name> <new value> <old value>
|
||||||
|
modargs=(
|
||||||
|
# Necessary to prevent in-production macs from trying to use testing instance
|
||||||
|
"DH_REQ_VAL $INST_NAME $DH_REQ_VAL"
|
||||||
|
# Necessary to make test dedicated host stand out when auditing the set in the console
|
||||||
|
"DH_PFX $INST_NAME $DH_PFX"
|
||||||
|
# The default launch template name includes $DH_PFX, ensure the production template name is used.
|
||||||
|
# N/B: The old/unmodified pw_lib.sh is still loaded for the running script
|
||||||
|
"TEMPLATE_NAME $TEMPLATE_NAME Cirrus${DH_PFX}PWinstance"
|
||||||
|
# Permit developer to use instance for up to 3 days max (orphan vm cleaning process will nail it after that).
|
||||||
|
"PW_MAX_HOURS 72 $PW_MAX_HOURS"
|
||||||
|
# Permit developer to execute as many Cirrus-CI tasks as they want w/o automatic shutdown.
|
||||||
|
"PW_MAX_TASKS 9999 $PW_MAX_TASKS"
|
||||||
|
)
|
||||||
|
|
||||||
|
for modarg in "${modargs[@]}"; do
|
||||||
|
set -- $modarg # Convert the "tuple" into the param args $1 $2...
|
||||||
|
dbg "Modifying pw_lib.sh \$$1 definition to '$2' (was '$3')"
|
||||||
|
sed -i -r -e "s/^$1=.*/$1=\"$2\"/" "$TMP_CLONE_DIRPATH/$LIB_DIRNAME/pw_lib.sh"
|
||||||
|
# Ensure future script invocations use the new values
|
||||||
|
unset $1
|
||||||
|
done
|
||||||
|
|
||||||
|
cd "$TMP_CLONE_DIRPATH/$LIB_DIRNAME"
|
||||||
|
source ./pw_lib.sh
|
||||||
|
|
||||||
|
# Before going any further, make sure there isn't an existing
|
||||||
|
# dedicated-host named ${INST_NAME}-0. If there is, it can
|
||||||
|
# be re-used instead of failing the script outright.
|
||||||
|
existing_dh_json=$(mktemp -p "." dh_allocate_XXXXX.json)
|
||||||
|
$AWS ec2 describe-hosts --filter "Name=tag:Name,Values=${INST_NAME}-0" --query 'Hosts[].HostId' > "$existing_dh_json"
|
||||||
|
if grep -Fqx '[]' "$existing_dh_json"; then
|
||||||
|
|
||||||
|
msg "Creating the dedicated host '${INST_NAME}-0'"
|
||||||
|
declare dh_allocate_json
|
||||||
|
dh_allocate_json=$(mktemp -p "." dh_allocate_XXXXX.json)
|
||||||
|
|
||||||
|
declare -a awsargs
|
||||||
|
# Word-splitting of $AWS is desireable
|
||||||
|
# shellcheck disable=SC2206
|
||||||
|
awsargs=(
|
||||||
|
$AWS
|
||||||
|
ec2 allocate-hosts
|
||||||
|
--availability-zone us-east-1a
|
||||||
|
--instance-type mac2.metal
|
||||||
|
--auto-placement off
|
||||||
|
--host-recovery off
|
||||||
|
--host-maintenance off
|
||||||
|
--quantity 1
|
||||||
|
--tag-specifications
|
||||||
|
"ResourceType=dedicated-host,Tags=[{Key=Name,Value=${INST_NAME}-0},{Key=$DH_REQ_TAG,Value=$DH_REQ_VAL},{Key=PWPoolReady,Value=true},{Key=automation,Value=false}]"
|
||||||
|
)
|
||||||
|
|
||||||
|
# N/B: Apple/Amazon require min allocation time of 24hours!
|
||||||
|
dbg "Executing: ${awsargs[*]}"
|
||||||
|
"${awsargs[@]}" > "$dh_allocate_json" || \
|
||||||
|
die "Provisioning new dedicated host $INST_NAME failed. Manual debugging & cleanup required."
|
||||||
|
|
||||||
|
dbg $(jq . "$dh_allocate_json")
|
||||||
|
dhid=$(jq -r -e '.HostIds[0]' "$dh_allocate_json")
|
||||||
|
[[ -n "$dhid" ]] || \
|
||||||
|
die "Obtaining DH ID of new host. Manual debugging & cleanup required."
|
||||||
|
|
||||||
|
# There's a small delay between allocating the dedicated host and LaunchInstances.sh
|
||||||
|
# being able to interact with it. There's no sensible way to monitor for this state :(
|
||||||
|
sleep 3s
|
||||||
|
else # A dedicated host already exists
|
||||||
|
dhid=$(jq -r -e '.[0]' "$existing_dh_json")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Normally allocation is fairly instant, but not always. Confirm we're able to actually
|
||||||
|
# launch a mac instance onto the dedicated host.
|
||||||
|
for ((attempt=1 ; attempt < 11 ; attempt++)); do
|
||||||
|
msg "Attempt #$attempt launching a new instance on dedicated host"
|
||||||
|
./LaunchInstances.sh --force
|
||||||
|
if grep -E "^${INST_NAME}-0 i-" dh_status.txt; then
|
||||||
|
attempt=-1 # signal success
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
sleep 1s
|
||||||
|
done
|
||||||
|
|
||||||
|
[[ "$attempt" -eq -1 ]] || \
|
||||||
|
die "Failed to use LaunchInstances.sh. Manual debugging & cleanup required."
|
||||||
|
|
||||||
|
# At this point the script could call SetupInstances.sh in another loop
|
||||||
|
# but it takes about 20-minutes to complete. Also, the developer may
|
||||||
|
# not need it, they may simply want to ssh into the instance to poke
|
||||||
|
# around. i.e. they don't need to run any Cirrus-CI jobs on the test
|
||||||
|
# instance.
|
||||||
|
warn "---"
|
||||||
|
warn "NOT copying/running setup.sh to new instance (in case manual activities are desired)."
|
||||||
|
warn "---"
|
||||||
|
|
||||||
|
w="PLEASE REMEMBER TO terminate instance, wait two hours, then
|
||||||
|
remove the dedicated-host in the web console, or run
|
||||||
|
'aws ec2 release-hosts --host-ids=$dhid'."
|
||||||
|
|
||||||
|
msg "---"
|
||||||
|
msg "Dropping you into a shell inside a temp. repo clone:
|
||||||
|
($TMP_CLONE_DIRPATH/$LIB_DIRNAME)"
|
||||||
|
msg "---"
|
||||||
|
msg "Once it finishes booting (5m), you may use './InstanceSSH.sh ${INST_NAME}-0'
|
||||||
|
to access it. Otherwise to fully setup the instance for Cirrus-CI, you need
|
||||||
|
to execute './SetupInstances.sh' repeatedly until the ${INST_NAME}-0 line in
|
||||||
|
'pw_status.txt' includes the text 'complete alive'. That process can take 20+
|
||||||
|
minutes. Once alive, you may then use Cirrus-CI to test against this specific
|
||||||
|
instance with any 'persistent_worker' task having a label of
|
||||||
|
'$DH_REQ_TAG=$DH_REQ_VAL' set."
|
||||||
|
msg "---"
|
||||||
|
warn "$w"
|
||||||
|
|
||||||
|
export POOLTOKEN # ensure availability in sub-shell
|
||||||
|
bash -l
|
||||||
|
|
||||||
|
warn "$w"
|
|
@ -53,8 +53,8 @@ timestamp=$(date -u -Iseconds -d \
|
||||||
awk '{print $4}'))
|
awk '{print $4}'))
|
||||||
pw_state=$(grep -E -v '^($|#+| +)' "$PWSTATE")
|
pw_state=$(grep -E -v '^($|#+| +)' "$PWSTATE")
|
||||||
n_workers=$(grep 'complete alive' <<<"$pw_state" | wc -l)
|
n_workers=$(grep 'complete alive' <<<"$pw_state" | wc -l)
|
||||||
n_tasks=$(awk 'BEGIN{B=0} /MacM1-[0-9]+ complete alive/{B+=$4} END{print B}' <<<"$pw_state")
|
n_tasks=$(awk "BEGIN{B=0} /${DH_PFX}-[0-9]+ complete alive/{B+=\$4} END{print B}" <<<"$pw_state")
|
||||||
n_taskf=$(awk 'BEGIN{E=0} /MacM1-[0-9]+ complete alive/{E+=$5} END{print E}' <<<"$pw_state")
|
n_taskf=$(awk "BEGIN{E=0} /${DH_PFX}-[0-9]+ complete alive/{E+=\$5} END{print E}" <<<"$pw_state")
|
||||||
printf "%s,%i,%i,%i\n" "$timestamp" "$n_workers" "$n_tasks" "$n_taskf" | tee -a "$uzn_file"
|
printf "%s,%i,%i,%i\n" "$timestamp" "$n_workers" "$n_tasks" "$n_taskf" | tee -a "$uzn_file"
|
||||||
|
|
||||||
# Prevent uncontrolled growth of utilization.csv. Assume this script
|
# Prevent uncontrolled growth of utilization.csv. Assume this script
|
||||||
|
|
|
@ -59,7 +59,7 @@ inst_failure() {
|
||||||
}
|
}
|
||||||
|
|
||||||
# Find dedicated hosts to operate on.
|
# Find dedicated hosts to operate on.
|
||||||
dh_name_flt="Name=tag:Name,Values=MacM1-*"
|
dh_name_flt="Name=tag:Name,Values=${DH_PFX}-*"
|
||||||
dh_tag_flt="Name=tag:$DH_REQ_TAG,Values=$DH_REQ_VAL"
|
dh_tag_flt="Name=tag:$DH_REQ_TAG,Values=$DH_REQ_VAL"
|
||||||
dh_qry='Hosts[].{HostID:HostId, Name:[Tags[?Key==`Name`].Value][] | [0]}'
|
dh_qry='Hosts[].{HostID:HostId, Name:[Tags[?Key==`Name`].Value][] | [0]}'
|
||||||
dh_searchout="$TEMPDIR/hosts.output" # JSON or error message
|
dh_searchout="$TEMPDIR/hosts.output" # JSON or error message
|
||||||
|
@ -93,14 +93,14 @@ dcmpfmt="+%Y%m%d%H%M" # date comparison format compatible with numeric 'test'
|
||||||
# their launch timestamps.
|
# their launch timestamps.
|
||||||
declare -a pw_filt
|
declare -a pw_filt
|
||||||
pw_filts=(
|
pw_filts=(
|
||||||
'Name=tag:Name,Values=MacM1-*'
|
"Name=tag:Name,Values=${DH_PFX}-*"
|
||||||
'Name=tag:PWPoolReady,Values=true'
|
'Name=tag:PWPoolReady,Values=true'
|
||||||
"Name=tag:$DH_REQ_TAG,Values=$DH_REQ_VAL"
|
"Name=tag:$DH_REQ_TAG,Values=$DH_REQ_VAL"
|
||||||
'Name=instance-state-name,Values=running'
|
'Name=instance-state-name,Values=running'
|
||||||
)
|
)
|
||||||
pw_query='Reservations[].Instances[].LaunchTime'
|
pw_query='Reservations[].Instances[].LaunchTime'
|
||||||
inst_lt_f=$TEMPDIR/inst_launch_times
|
inst_lt_f=$TEMPDIR/inst_launch_times
|
||||||
dbg "Obtaining launch times for all running MacM1-* instances"
|
dbg "Obtaining launch times for all running ${DH_PFX}-* instances"
|
||||||
dbg "$AWS ec2 describe-instances --filters '${pw_filts[*]}' --query '$pw_query' &> '$inst_lt_f'"
|
dbg "$AWS ec2 describe-instances --filters '${pw_filts[*]}' --query '$pw_query' &> '$inst_lt_f'"
|
||||||
if ! $AWS ec2 describe-instances --filters "${pw_filts[@]}" --query "$pw_query" &> "$inst_lt_f"; then
|
if ! $AWS ec2 describe-instances --filters "${pw_filts[@]}" --query "$pw_query" &> "$inst_lt_f"; then
|
||||||
die "Can not query instances:
|
die "Can not query instances:
|
||||||
|
@ -231,7 +231,7 @@ for name_hostid in "${NAME2HOSTID[@]}"; do
|
||||||
if ((launch_new)); then
|
if ((launch_new)); then
|
||||||
msg "Creating new $name instance on $name host."
|
msg "Creating new $name instance on $name host."
|
||||||
if ! $AWS ec2 run-instances \
|
if ! $AWS ec2 run-instances \
|
||||||
--launch-template LaunchTemplateName=CirrusMacM1PWinstance \
|
--launch-template LaunchTemplateName=${TEMPLATE_NAME} \
|
||||||
--tag-specifications \
|
--tag-specifications \
|
||||||
"ResourceType=instance,Tags=[{Key=Name,Value=$name},{Key=$DH_REQ_TAG,Value=$DH_REQ_VAL},{Key=PWPoolReady,Value=true},{Key=automation,Value=true}]" \
|
"ResourceType=instance,Tags=[{Key=Name,Value=$name},{Key=$DH_REQ_TAG,Value=$DH_REQ_VAL},{Key=PWPoolReady,Value=true},{Key=automation,Value=true}]" \
|
||||||
--placement "HostId=$hostid" &> "$instoutput"; then
|
--placement "HostId=$hostid" &> "$instoutput"; then
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
# Cirrus-CI persistent worker maintenance
|
# Cirrus-CI persistent worker maintenance
|
||||||
|
|
||||||
These docs and scripts were implemented in a hurry. They both likely
|
These scripts are intended to be used from a repository clone,
|
||||||
contain cringe-worthy content and incomplete information.
|
by cron, on an always-on cloud machine. They make a lot of
|
||||||
This might be improved in the future. Sorry.
|
other assumptions, some of which may not be well documented.
|
||||||
|
Please see the comments at the top of each scripts for more
|
||||||
|
detailed/specific information.
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
|
@ -12,21 +14,26 @@ This might be improved in the future. Sorry.
|
||||||
* A copy of the ssh-key referenced by `CirrusMacM1PWinstance` launch template
|
* A copy of the ssh-key referenced by `CirrusMacM1PWinstance` launch template
|
||||||
under "Assumptions" below.
|
under "Assumptions" below.
|
||||||
* The ssh-key has been added to a running ssh-agent.
|
* The ssh-key has been added to a running ssh-agent.
|
||||||
|
* The running ssh-agent sh-compatible env. vars. are stored in
|
||||||
|
`/run/user/$UID/ssh-agent.env`
|
||||||
* The env. var. `POOLTOKEN` is set to the Cirrus-CI persistent worker pool
|
* The env. var. `POOLTOKEN` is set to the Cirrus-CI persistent worker pool
|
||||||
token value.
|
token value.
|
||||||
|
|
||||||
## Assumptions
|
## Assumptions
|
||||||
|
|
||||||
* You've read all scripts in this directory and meet any requirements
|
* You've read all scripts in this directory, generally follow
|
||||||
stated within.
|
their purpose, and meet any requirements stated within the
|
||||||
|
header comment.
|
||||||
|
* You've read the [private documentation](https://docs.google.com/document/d/1PX6UyqDDq8S72Ko9qe_K3zoV2XZNRQjGxPiWEkFmQQ4/edit)
|
||||||
|
and understand the safety/security section.
|
||||||
* You have permissions to access all referenced AWS resources.
|
* You have permissions to access all referenced AWS resources.
|
||||||
* There are one or more dedicated hosts allocated and have set:
|
* There are one or more dedicated hosts allocated and have set:
|
||||||
* A name tag like `MacM1-<some number>`
|
* A name tag like `MacM1-<some number>` (NO SPACES!)
|
||||||
* The `mac2` instance family
|
* The `mac2` instance family
|
||||||
* The `mac2.metal` instance type
|
* The `mac2.metal` instance type
|
||||||
* Disabled "Instance auto-placement", "Host recovery", and "Host maintenance"
|
* Disabled "Instance auto-placement", "Host recovery", and "Host maintenance"
|
||||||
* Quantity: 1
|
* Quantity: 1
|
||||||
* Tags: `automation=false` and `PWPoolReady=true`
|
* Tags: `automation=false`, `purpose=prod`, and `PWPoolReady=true`
|
||||||
* The EC2 `CirrusMacM1PWinstance` instance-template exists and sets:
|
* The EC2 `CirrusMacM1PWinstance` instance-template exists and sets:
|
||||||
* Shutdown-behavior: terminate
|
* Shutdown-behavior: terminate
|
||||||
* Same "key pair" referenced under `Prerequisites`
|
* Same "key pair" referenced under `Prerequisites`
|
||||||
|
@ -39,7 +46,7 @@ The goal is to maintain sufficient alive/running/working instances
|
||||||
to service most Cirrus-CI tasks pointing at the pool. This is
|
to service most Cirrus-CI tasks pointing at the pool. This is
|
||||||
best achieved with slower maintenance of hosts compared to setup
|
best achieved with slower maintenance of hosts compared to setup
|
||||||
of ready instances. This is because hosts can be inaccessible for
|
of ready instances. This is because hosts can be inaccessible for
|
||||||
1-1/2 hours, but instances come up in ~10-20m, ready to run tasks.
|
up to 2 hours, but instances come up in ~10-20m, ready to run tasks.
|
||||||
|
|
||||||
Either hosts and/or instances may be removed from management by
|
Either hosts and/or instances may be removed from management by
|
||||||
setting "false" or removing their `PWPoolReady=true` tag. Otherwise,
|
setting "false" or removing their `PWPoolReady=true` tag. Otherwise,
|
||||||
|
@ -47,18 +54,22 @@ the pool should be maintained by installing the crontab lines
|
||||||
indicated in the `Cron.sh` script.
|
indicated in the `Cron.sh` script.
|
||||||
|
|
||||||
Cirrus-CI will assign tasks (specially) targeted at the pool, to an
|
Cirrus-CI will assign tasks (specially) targeted at the pool, to an
|
||||||
instance with a running listener. If there are none, the task will
|
instance with a running listener (`cirrus worker run` process). If
|
||||||
queue forever (there might be a 24-hour timeout, I can't remember).
|
there are none, the task will queue forever (there might be a 24-hour
|
||||||
From a PR perspective, there is zero control over which instance you
|
timeout, I can't remember). From a PR perspective, there is little
|
||||||
get. It could easily be one somebody's previous task barfed all over
|
control over which instance you get. It could easily be one where
|
||||||
and ruined.
|
a previous task barfed all over and rendered unusable.
|
||||||
|
|
||||||
## Initialization
|
## Initialization
|
||||||
|
|
||||||
When no dedicated hosts have instances running, complete creation and
|
It is assumed that neither the `Cron.sh` nor any related maintenance
|
||||||
setup will take many hours. This may be bypassed by *manually* running
|
scripts are installed (in crontab) or currently running.
|
||||||
`LaunchInstances.sh --force`. This should be done prior to installing
|
|
||||||
the `Cron.sh` cron-job.
|
Once several dedicated hosts have been manually created, they
|
||||||
|
should initially have no instances on them. If left alone, the
|
||||||
|
maintenance scripts will eventually bring them all up, however
|
||||||
|
complete creation and setup will take many hours. This may be
|
||||||
|
bypassed by *manually* running `LaunchInstances.sh --force`.
|
||||||
|
|
||||||
In order to prevent all the instances from being recycled at the same
|
In order to prevent all the instances from being recycled at the same
|
||||||
(future) time, the shutdown time installed by `SetupInstances.sh` also
|
(future) time, the shutdown time installed by `SetupInstances.sh` also
|
||||||
|
@ -71,39 +82,57 @@ Now the `Cron.sh` cron-job may be installed, enabled and started.
|
||||||
## Manual Testing
|
## Manual Testing
|
||||||
|
|
||||||
Verifying changes to these scripts / cron-job must be done manually.
|
Verifying changes to these scripts / cron-job must be done manually.
|
||||||
To support this, every dedicated host has a `purpose` tag set, which
|
To support this, every dedicated host and instance has a `purpose`
|
||||||
must correspond to the value indicated in `pw_lib.sh`. To test script
|
tag, which must correspond to the value indicated in `pw_lib.sh`
|
||||||
changes, first create one or more dedicated hosts with a unique `purpose`
|
and in the target repo `.cirrus.yml`. To test script and/or
|
||||||
tag (like "cevich-testing"). Then temporarily update `pw_lib.sh` to use
|
CI changes:
|
||||||
that value.
|
|
||||||
|
|
||||||
***Importantly***, if running test tasks against the test workers,
|
1. Make sure you have locally met all requirements spelled out in the
|
||||||
ensure you also customize the `purpose` label in the `cirrus.yml` task(s).
|
header-comment of `AllocateTestDH.sh`.
|
||||||
Without this, production tasks will get scheduled on your testing instances.
|
1. Execute `AllocateTestDH.sh`. It will operate out of a temporary
|
||||||
Just be sure to revert all the `purpose` values back to `prod`
|
clone of the repository to prevent pushing required test-modifications
|
||||||
(and destroy related dedicated hosts) before any PRs get merged.
|
upstream.
|
||||||
|
1. Repeatedly execute `SetupInstances.sh`. It will update `pw_status.txt`
|
||||||
|
with any warnings/errors. When successful, lines will include
|
||||||
|
the host name, "complete", and "alive" status strings.
|
||||||
|
1. If instance debugging is needed, the `InstanceSSH.sh` script may be
|
||||||
|
used. Simply pass the name of the host you want to access. Every
|
||||||
|
instance should have a `setup.log` file in the `ec2-user` homedir. There
|
||||||
|
should also be `/private/tmp/<name>-worker.log` with entries from the
|
||||||
|
pool listener process.
|
||||||
|
1. To test CI changes against the test instance(s), push a PR that includes
|
||||||
|
`.cirrus.yml` changes to the task's `persistent_worker` dictionary's
|
||||||
|
`purpose` attribute. Set the value the same as the tag in step 1.
|
||||||
|
1. When you're done with all testing, terminate the instance. Then wait
|
||||||
|
a full 24-hours before "releasing" the dedicated host. Both operations
|
||||||
|
can be performed using the AWS EC2 WebUI. Please remember to do the
|
||||||
|
release step, as the $-clock continues to run while it's allocated.
|
||||||
|
|
||||||
## Security
|
Note: Instances are set to auto-terminate on shutdown. They should
|
||||||
|
self shutdown after 24-hours automatically. After termination for
|
||||||
|
any cause, there's about a 2-hour waiting period before a new instance
|
||||||
|
can be allocated. The `LaunchInstances.sh` script is able deal with this
|
||||||
|
properly.
|
||||||
|
|
||||||
To thwart attempts to hijack or use instances for nefarious purposes,
|
|
||||||
each employs three separate self-termination mechanisms. Two of them
|
|
||||||
depend on the instance's shutdown behavior being set to `terminate`
|
|
||||||
(see above). These mechanisms also ensure the workers remain relatively
|
|
||||||
"clean" an "fresh" from a "CI-Working" perspective.
|
|
||||||
|
|
||||||
Note: Should there be an in-flight CI task on a worker at
|
## Script Debugging Hints
|
||||||
shutdown, Cirrus-CI will perform a single automatic re-run on an
|
|
||||||
available worker.
|
|
||||||
|
|
||||||
1. Daily, a Cirrus-cron job runs and kills any instance running longer
|
* On each MacOS instance:
|
||||||
than 3 days.
|
* The pool listener process (running as the worker user) keeps a log under `/private/tmp`. The
|
||||||
2. Each instance's startup script runs a background 2-day sleep and
|
file includes the registered name of the worker. For example, on MacM1-7 you would find `/private/tmp/MacM1-7-worker.log`.
|
||||||
shutdown command (via MacOS-init consuming instance user-data).
|
This log shows tasks taken on, completed, and any errors reported back from Cirrus-CI internals.
|
||||||
3. A setup script run on each instance starts a pool-listener
|
* In the ec2-user's home directory is a `setup.log` file. This stores the output from executing
|
||||||
process.
|
`setup.sh`. It also contains any warnings/errors from the (very important) `service_pool.sh` script - which should
|
||||||
1. If the worker process dies the instance shuts down.
|
_always_ be running in the background.
|
||||||
2. After 24 +/-4 hours the instance shuts down if there are no
|
* There are several drop-files in the `ec2-user` home directory which are checked by `SetupInstances.sh`
|
||||||
cirrus-agent processes (presumably servicing a CI task).
|
to record state. If removed, along with `setup.log`, the script will re-execute (a possibly newer version of) `setup.sh`.
|
||||||
3. After 2 more hours, the instance shuts down regardless of any
|
* On the management host:
|
||||||
running agents - probably hung/stuck agent process or somebody's
|
* Automated operations are setup and run by `Cron.sh`, and logged to `Cron.log`. When running scripts manually, `Cron.sh`
|
||||||
started a fake agent doing "bad things".
|
can serve as a template for the intended order of operations.
|
||||||
|
* Critical operations are protected by a mandatory, exclusive file lock on `mac_pw_pool/Cron.sh`. Should
|
||||||
|
there be a deadlock, management of the pool (by `Cron.sh`) will stop. However the effects of this will not be observed
|
||||||
|
until workers begin hitting their lifetime and/or task limits.
|
||||||
|
* Without intervention, the `nightly_maintenance.sh` script will update the containers/automation repo clone on the
|
||||||
|
management VM. This happens if the repo becomes out of sync by more than 7 days (or as defined in the script).
|
||||||
|
When the repo is updated, the `pw_pool_web` container will be restarted. The container will also be restarted if its
|
||||||
|
found to not be running.
|
||||||
|
|
|
@ -104,6 +104,10 @@ fi
|
||||||
[[ -r "$DHSTATE" ]] || \
|
[[ -r "$DHSTATE" ]] || \
|
||||||
die "Can't read from state file: $DHSTATE"
|
die "Can't read from state file: $DHSTATE"
|
||||||
|
|
||||||
|
if [[ -z "$SSH_AUTH_SOCK" ]] || [[ -z "$SSH_AGENT_PID" ]]; then
|
||||||
|
die "Cannot access an ssh-agent. Please run 'ssh-agent -s > /run/user/$UID/ssh-agent.env' and 'ssh-add /path/to/required/key'."
|
||||||
|
fi
|
||||||
|
|
||||||
declare -a _dhstate
|
declare -a _dhstate
|
||||||
readarray -t _dhstate <<<$(grep -E -v '^($|#+| +)' "$DHSTATE" | sort)
|
readarray -t _dhstate <<<$(grep -E -v '^($|#+| +)' "$DHSTATE" | sort)
|
||||||
n_inst=0
|
n_inst=0
|
||||||
|
@ -134,14 +138,15 @@ if [[ -r "$PWSTATE" ]]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Assuming the `--force` option was used to initialize a new pool of
|
# Assuming the `--force` option was used to initialize a new pool of
|
||||||
# workers, then instances need to be configured with a self-termination
|
# workers, then instances need to be configured with a staggered
|
||||||
# shutdown delay. This ensures future replacement instances creation
|
# self-termination shutdown delay. This prevents all the instances
|
||||||
# is staggered, soas to maximize overall worker utilization.
|
# from being terminated at the same time, potentially impacting
|
||||||
term_addtl=0
|
# CI usage.
|
||||||
|
runtime_hours_reduction=0
|
||||||
# shellcheck disable=SC2199
|
# shellcheck disable=SC2199
|
||||||
if [[ "$@" =~ --force ]]; then
|
if [[ "$@" =~ --force ]]; then
|
||||||
warn "Forcing instance creation: Ignoring staggered creation limits."
|
warn "Forcing instance creation w/ staggered existence limits."
|
||||||
term_addtl=1 # Multiples of $CREATE_STAGGER_HOURS to add to shutdown delay
|
runtime_hours_reduction=$CREATE_STAGGER_HOURS
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for _dhentry in "${_dhstate[@]}"; do
|
for _dhentry in "${_dhstate[@]}"; do
|
||||||
|
@ -201,6 +206,27 @@ for _dhentry in "${_dhstate[@]}"; do
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# It's really important that instances have a defined and risk-relative
|
||||||
|
# short lifespan. Multiple mechanisms are in place to assist, but none
|
||||||
|
# are perfect. Ensure instances running for an excessive time are forcefully
|
||||||
|
# terminated as soon as possible from this script.
|
||||||
|
launch_epoch=$(date -u -d "$launch_time" +%s)
|
||||||
|
now_epoch=$(date -u +%s)
|
||||||
|
age_sec=$((now_epoch-launch_epoch))
|
||||||
|
hard_max_sec=$((PW_MAX_HOURS*60*60*2)) # double PW_MAX_HOURS
|
||||||
|
dbg "launch_epoch=$launch_epoch"
|
||||||
|
dbg " now_epoch=$now_epoch"
|
||||||
|
dbg " age_sec=$age_sec"
|
||||||
|
dbg "hard_max_sec=$hard_max_sec"
|
||||||
|
# Soft time limit is enforced via 'sleep $PW_MAX_HOURS && shutdown' started during instance setup (below).
|
||||||
|
msg "Instance alive for $((age_sec/60/60)) hours (soft max: $PW_MAX_HOURS hard: $((hard_max_sec/60/60)))"
|
||||||
|
if [[ $age_sec -gt $hard_max_sec ]]; then
|
||||||
|
force_term "Excess instance lifetime; $(((age_sec - hard_max_sec)/60))m past hard max limit."
|
||||||
|
continue
|
||||||
|
elif [[ $age_sec -gt $((PW_MAX_HOURS*60*60)) ]]; then
|
||||||
|
pwst_warn "Instance alive longer than soft max. Investigation recommended."
|
||||||
|
fi
|
||||||
|
|
||||||
dbg "Attempting to contact '$name' at $pub_dns"
|
dbg "Attempting to contact '$name' at $pub_dns"
|
||||||
if ! nc -z -w 13 $pub_dns 22 &> "$ncoutput"; then
|
if ! nc -z -w 13 $pub_dns 22 &> "$ncoutput"; then
|
||||||
pwst_warn "Could not connect to port 22 on '$pub_dns' $(ctx 0)."
|
pwst_warn "Could not connect to port 22 on '$pub_dns' $(ctx 0)."
|
||||||
|
@ -244,24 +270,6 @@ for _dhentry in "${_dhstate[@]}"; do
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# It's really important that instances have a defined and risk-relative
|
|
||||||
# short lifespan. Multiple mechanisms are in place to assist, but none
|
|
||||||
# are perfect. Ensure instances running for an excessive time are forcefully
|
|
||||||
# terminated as soon as possible from this script.
|
|
||||||
launch_epoch=$(date -u -d "$launch_time" +%s)
|
|
||||||
now_epoch=$(date -u +%s)
|
|
||||||
age_sec=$((now_epoch-launch_epoch))
|
|
||||||
hard_max_sec=$((PW_MAX_HOURS*60*60*2)) # double PW_MAX_HOURS
|
|
||||||
dbg "launch_epoch=$launch_epoch"
|
|
||||||
dbg " now_epoch=$now_epoch"
|
|
||||||
dbg " age_sec=$age_sec"
|
|
||||||
dbg "hard_max_sec=$hard_max_sec"
|
|
||||||
msg "Instance alive for $((age_sec/60/60)) hours (max $PW_MAX_HOURS)"
|
|
||||||
if [[ $age_sec -gt $hard_max_sec ]]; then
|
|
||||||
force_term "Excess instance lifetime (+$((age_sec-hard_max_sec))s)"
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! $SSH ec2-user@$pub_dns test -r .setup.done; then
|
if ! $SSH ec2-user@$pub_dns test -r .setup.done; then
|
||||||
|
|
||||||
if ! $SSH ec2-user@$pub_dns test -r .setup.started; then
|
if ! $SSH ec2-user@$pub_dns test -r .setup.started; then
|
||||||
|
@ -291,8 +299,15 @@ for _dhentry in "${_dhstate[@]}"; do
|
||||||
continue # try again next loop
|
continue # try again next loop
|
||||||
fi
|
fi
|
||||||
|
|
||||||
shutdown_seconds=$((60*60*term_addtl*CREATE_STAGGER_HOURS + 60*60*PW_MAX_HOURS))
|
# Keep runtime_hours_reduction w/in sensible, positive bounds.
|
||||||
pwst_msg "Starting automatic instance recycling in $((term_addtl*CREATE_STAGGER_HOURS + PW_MAX_HOURS)) hours"
|
if [[ $runtime_hours_reduction -ge $((PW_MAX_HOURS - CREATE_STAGGER_HOURS)) ]]; then
|
||||||
|
runtime_hours_reduction=$CREATE_STAGGER_HOURS
|
||||||
|
fi
|
||||||
|
|
||||||
|
shutdown_seconds=$((60*60*PW_MAX_HOURS - 60*60*runtime_hours_reduction))
|
||||||
|
[[ $shutdown_seconds -gt $((60*60*CREATE_STAGGER_HOURS)) ]] || \
|
||||||
|
die "Detected unacceptably short \$shutdown_seconds ($shutdown_seconds) value."
|
||||||
|
pwst_msg "Starting automatic instance recycling in $((shutdown_seconds/60/60)) hours"
|
||||||
# Darwin is really weird WRT active terminals and the shutdown
|
# Darwin is really weird WRT active terminals and the shutdown
|
||||||
# command. Instead of installing a future shutdown, stick an
|
# command. Instead of installing a future shutdown, stick an
|
||||||
# immediate shutdown at the end of a long sleep. This is the
|
# immediate shutdown at the end of a long sleep. This is the
|
||||||
|
@ -318,8 +333,11 @@ for _dhentry in "${_dhstate[@]}"; do
|
||||||
msg "Setup script started."
|
msg "Setup script started."
|
||||||
set_pw_status setup started
|
set_pw_status setup started
|
||||||
|
|
||||||
# If starting multiple instance for any reason, stagger shutdowns.
|
# No sense in incrementing if there was a failure running setup
|
||||||
term_addtl=$((term_addtl+1))
|
# shellcheck disable=SC2199
|
||||||
|
if [[ "$@" =~ --force ]]; then
|
||||||
|
runtime_hours_reduction=$((runtime_hours_reduction + CREATE_STAGGER_HOURS))
|
||||||
|
fi
|
||||||
|
|
||||||
# Let setup run in the background
|
# Let setup run in the background
|
||||||
continue
|
continue
|
||||||
|
@ -409,8 +427,15 @@ for _dhentry in "${_dhstate[@]}"; do
|
||||||
msg "Apparent tasks started/finished/running: $n_started_tasks $n_finished_tasks $((n_started_tasks-n_finished_tasks)) (max $PW_MAX_TASKS)"
|
msg "Apparent tasks started/finished/running: $n_started_tasks $n_finished_tasks $((n_started_tasks-n_finished_tasks)) (max $PW_MAX_TASKS)"
|
||||||
|
|
||||||
dbg "Checking apparent task limit"
|
dbg "Checking apparent task limit"
|
||||||
if [[ "$n_finished_tasks" -gt $PW_MAX_TASKS ]]; then
|
# N/B: This is only enforced based on the _previous_ run of this script worker-count.
|
||||||
|
# Doing this on the _current_ alive worker count would add a lot of complexity.
|
||||||
|
if [[ "$n_finished_tasks" -gt $PW_MAX_TASKS ]] && [[ $n_pw_total -gt $PW_MIN_ALIVE ]]; then
|
||||||
|
# N/B: Termination based on _finished_ tasks, so if a task happens to be currently running
|
||||||
|
# it will very likely have _just_ started in the last few seconds. Cirrus will retry
|
||||||
|
# automatically on another worker.
|
||||||
force_term "Instance exceeded $PW_MAX_TASKS apparent tasks."
|
force_term "Instance exceeded $PW_MAX_TASKS apparent tasks."
|
||||||
|
elif [[ $n_pw_total -le $PW_MIN_ALIVE ]]; then
|
||||||
|
pwst_warn "Not enforcing max-tasks limit, only $n_pw_total workers online last run."
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,8 @@ set xrange [(system("date -u -Iseconds -d '26 hours ago'")):(system("date -u -Is
|
||||||
|
|
||||||
set ylabel "Workers Online"
|
set ylabel "Workers Online"
|
||||||
set ytics border nomirror numeric
|
set ytics border nomirror numeric
|
||||||
set yrange [0:(system("grep 'MacM1' dh_status.txt | wc -l") * 1.5)]
|
# Not practical to lookup $DH_PFX from pw_lib.sh
|
||||||
|
set yrange [0:(system("grep -E '^[a-zA-Z0-9]+-[0-9]' dh_status.txt | wc -l") * 1.5)]
|
||||||
|
|
||||||
set y2label "Worker Utilization"
|
set y2label "Worker Utilization"
|
||||||
set y2tics border nomirror numeric
|
set y2tics border nomirror numeric
|
||||||
|
|
|
@ -9,7 +9,7 @@ WEB_IMG="docker.io/library/nginx:latest"
|
||||||
CRONLOG="Cron.log"
|
CRONLOG="Cron.log"
|
||||||
CRONSCRIPT="Cron.sh"
|
CRONSCRIPT="Cron.sh"
|
||||||
KEEP_LINES=10000
|
KEEP_LINES=10000
|
||||||
MAX_REPO_AGE_DAYS=21
|
REFRESH_REPO_EVERY=7 # days
|
||||||
|
|
||||||
# Do not use, these are needed to control script execution.
|
# Do not use, these are needed to control script execution.
|
||||||
_CNTNAME=pw_pool_web
|
_CNTNAME=pw_pool_web
|
||||||
|
@ -22,10 +22,21 @@ if [[ ! -r "$CRONLOG" ]] || [[ ! -r "$CRONSCRIPT" ]] || [[ ! -d "../.git" ]]; th
|
||||||
fi
|
fi
|
||||||
|
|
||||||
relaunch_web_container() {
|
relaunch_web_container() {
|
||||||
# Assume code has changed, restart container w/ latest image
|
# Assume code change or image update, restart container.
|
||||||
(
|
(
|
||||||
|
# Prevent podman and/or sub-processes from inheriting the lock FD.
|
||||||
|
# This would deadlock all future runs of this script or Cron.sh
|
||||||
|
# Can't use `flock --close ...` here because it "hangs" in this context.
|
||||||
|
for fd_nr in $(/bin/ls /proc/self/fd/); do
|
||||||
|
[[ $fd_nr -ge 3 ]] || \
|
||||||
|
continue
|
||||||
|
# Bash doesn't allow direct substitution of the FD number
|
||||||
|
eval "exec $fd_nr>&-"
|
||||||
|
done
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
podman run --replace --name "$_CNTNAME" -it --rm --pull=newer -p 8080:80 \
|
|
||||||
|
podman run --replace --name "$_CNTNAME" -d --rm --pull=newer -p 8080:80 \
|
||||||
-v $HOME/devel/automation/mac_pw_pool/html:/usr/share/nginx/html:ro,Z \
|
-v $HOME/devel/automation/mac_pw_pool/html:/usr/share/nginx/html:ro,Z \
|
||||||
$WEB_IMG
|
$WEB_IMG
|
||||||
)
|
)
|
||||||
|
@ -37,20 +48,13 @@ relaunch_web_container() {
|
||||||
echo "$SCRIPTNAME running at $(date -u -Iseconds)"
|
echo "$SCRIPTNAME running at $(date -u -Iseconds)"
|
||||||
|
|
||||||
if ! ((_RESTARTED_SCRIPT)); then
|
if ! ((_RESTARTED_SCRIPT)); then
|
||||||
# Make sure the recent code is being used.
|
today=$(date -u +%d)
|
||||||
last_commit_date=$(git log -1 --format="%cI" --no-show-signature HEAD)
|
if ((today%REFRESH_REPO_EVERY)); then
|
||||||
last_s=$(date -d "$last_commit_date" +%s)
|
|
||||||
now_s=$(date -u +%s)
|
|
||||||
diff_s=$((now_s-last_s))
|
|
||||||
|
|
||||||
if [[ "$diff_s" -gt $(($MAX_REPO_AGE_DAYS*24*60*60)) ]]; then
|
|
||||||
git remote update && git reset --hard origin/main
|
git remote update && git reset --hard origin/main
|
||||||
# maintain the same flock
|
# maintain the same flock
|
||||||
echo "$SCRIPTNAME updatedd code older than $MAX_REPO_AGE_DAYS days, restarting script..."
|
echo "$SCRIPTNAME updatedd code after $REFRESH_REPO_EVERY days, restarting script..."
|
||||||
env _RESTARTED_SCRIPT=1 _FLOCKER=$_FLOCKER "$0" "$@"
|
env _RESTARTED_SCRIPT=1 _FLOCKER=$_FLOCKER "$0" "$@"
|
||||||
exit $? # all done
|
exit $? # all done
|
||||||
else
|
|
||||||
echo "$SCRIPTNAME code appears recent ($last_commit_date), yay!"
|
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
@ -7,9 +7,15 @@
|
||||||
SCRIPT_FILENAME=$(basename "$0") # N/B: Caller's arg0, not this library file path.
|
SCRIPT_FILENAME=$(basename "$0") # N/B: Caller's arg0, not this library file path.
|
||||||
SCRIPT_DIRPATH=$(dirname "$0")
|
SCRIPT_DIRPATH=$(dirname "$0")
|
||||||
LIB_DIRPATH=$(dirname "${BASH_SOURCE[0]}")
|
LIB_DIRPATH=$(dirname "${BASH_SOURCE[0]}")
|
||||||
|
REPO_DIRPATH=$(realpath "$LIB_DIRPATH/../")
|
||||||
TEMPDIR=$(mktemp -d -p '' "${SCRIPT_FILENAME}_XXXXX.tmp")
|
TEMPDIR=$(mktemp -d -p '' "${SCRIPT_FILENAME}_XXXXX.tmp")
|
||||||
trap "rm -rf '$TEMPDIR'" EXIT
|
trap "rm -rf '$TEMPDIR'" EXIT
|
||||||
|
|
||||||
|
# Dedicated host name prefix; Actual name will have a "-<X>" (number) appended.
|
||||||
|
# N/B: ${DH_PFX}-<X> _MUST_ match dedicated host names as listed in dh_status.txt
|
||||||
|
# using the regex ^[a-zA-Z0-9]+-[0-9] (see Utilization.gnuplot)
|
||||||
|
DH_PFX="MacM1"
|
||||||
|
|
||||||
# Only manage dedicated hosts with the following tag & value
|
# Only manage dedicated hosts with the following tag & value
|
||||||
DH_REQ_TAG="purpose"
|
DH_REQ_TAG="purpose"
|
||||||
DH_REQ_VAL="prod"
|
DH_REQ_VAL="prod"
|
||||||
|
@ -42,15 +48,16 @@ CREATE_STAGGER_HOURS=2
|
||||||
|
|
||||||
# Instance shutdown controls (assumes terminate-on-shutdown behavior)
|
# Instance shutdown controls (assumes terminate-on-shutdown behavior)
|
||||||
PW_MAX_HOURS=24 # Since successful configuration
|
PW_MAX_HOURS=24 # Since successful configuration
|
||||||
PW_MAX_TASKS=30 # Logged by listener (N/B: Can be manipulated by tasks!)
|
PW_MAX_TASKS=24 # Logged by listener (N/B: Log can be manipulated by tasks!)
|
||||||
|
PW_MIN_ALIVE=3 # Bypass enforcement of $PW_MAX_TASKS if <= alive/operating workers
|
||||||
|
|
||||||
# How long to wait for setup.sh to finish running (drop a .setup.done file)
|
# How long to wait for setup.sh to finish running (drop a .setup.done file)
|
||||||
# before forcibly terminating.
|
# before forcibly terminating.
|
||||||
SETUP_MAX_SECONDS=1200 # Typical time ~600seconds
|
SETUP_MAX_SECONDS=2400 # Typical time ~10 minutes, use 2x safety-factor.
|
||||||
|
|
||||||
# Name of launch template. Current/default version will be used.
|
# Name of launch template. Current/default version will be used.
|
||||||
# https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#LaunchTemplates:
|
# https://us-east-1.console.aws.amazon.com/ec2/home?region=us-east-1#LaunchTemplates:
|
||||||
TEMPLATE_NAME="${TEMPLATE_NAME:-CirrusMacM1PWinstance}"
|
TEMPLATE_NAME="${TEMPLATE_NAME:-Cirrus${DH_PFX}PWinstance}"
|
||||||
|
|
||||||
# Path to scripts to copy/execute on Darwin instances
|
# Path to scripts to copy/execute on Darwin instances
|
||||||
SETUP_SCRIPT="$LIB_DIRPATH/setup.sh"
|
SETUP_SCRIPT="$LIB_DIRPATH/setup.sh"
|
||||||
|
|
|
@ -9,6 +9,10 @@
|
||||||
#
|
#
|
||||||
# This script should be called with a single argument string,
|
# This script should be called with a single argument string,
|
||||||
# of the label YAML to configure. For example "purpose: prod"
|
# of the label YAML to configure. For example "purpose: prod"
|
||||||
|
#
|
||||||
|
# N/B: Under special circumstances, this script (possibly with modifications)
|
||||||
|
# can be executed more than once. All operations which modify state/config.
|
||||||
|
# must be wrapped in conditional checks.
|
||||||
|
|
||||||
set -eo pipefail
|
set -eo pipefail
|
||||||
|
|
||||||
|
@ -74,10 +78,46 @@ grep -q homebrew /etc/paths || \
|
||||||
# environment isn't loaded automatically.
|
# environment isn't loaded automatically.
|
||||||
. /etc/profile
|
. /etc/profile
|
||||||
|
|
||||||
msg "Installing podman-machine, testing, and CI deps. (~2m install time)"
|
msg "Installing podman-machine, testing, and CI deps. (~5-10m install time)"
|
||||||
if [[ ! -x /usr/local/bin/gvproxy ]]; then
|
if [[ ! -x /usr/local/bin/gvproxy ]]; then
|
||||||
brew tap cfergeau/crc
|
declare -a brew_taps
|
||||||
brew install go go-md2man coreutils pstree vfkit cirruslabs/cli/cirrus
|
declare -a brew_formulas
|
||||||
|
|
||||||
|
brew_taps=(
|
||||||
|
# Required to use upstream vfkit
|
||||||
|
cfergeau/crc
|
||||||
|
|
||||||
|
# Required to use upstream krunkit
|
||||||
|
slp/krunkit
|
||||||
|
)
|
||||||
|
|
||||||
|
brew_formulas=(
|
||||||
|
# Necessary for worker-pool participation + task execution
|
||||||
|
cirruslabs/cli/cirrus
|
||||||
|
|
||||||
|
# Necessary for building podman|buildah|skopeo
|
||||||
|
go go-md2man coreutils pkg-config pstree gpgme
|
||||||
|
|
||||||
|
# Necessary to compress the podman repo tar
|
||||||
|
zstd
|
||||||
|
|
||||||
|
# Necessary for testing podman-machine
|
||||||
|
vfkit
|
||||||
|
|
||||||
|
# Necessary for podman-machine libkrun CI testing
|
||||||
|
krunkit
|
||||||
|
)
|
||||||
|
|
||||||
|
# msg() includes a ##### prefix, ensure this text is simply
|
||||||
|
# associated with the prior msg() output.
|
||||||
|
echo " Adding taps[] ${brew_taps[*]}"
|
||||||
|
echo " before installing formulas[] ${brew_formulas[*]}"
|
||||||
|
|
||||||
|
for brew_tap in "${brew_taps[@]}"; do
|
||||||
|
brew tap $brew_tap
|
||||||
|
done
|
||||||
|
|
||||||
|
brew install "${brew_formulas[@]}"
|
||||||
|
|
||||||
# Normally gvproxy is installed along with "podman" brew. CI Tasks
|
# Normally gvproxy is installed along with "podman" brew. CI Tasks
|
||||||
# on this instance will be running from source builds, so gvproxy must
|
# on this instance will be running from source builds, so gvproxy must
|
||||||
|
@ -137,11 +177,42 @@ if ! mount | grep -q "$PWUSER"; then
|
||||||
df -h
|
df -h
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Disk indexing is useless on a CI system, and creates un-deletable
|
||||||
|
# files whereever $TEMPDIR happens to be pointing. Ignore any
|
||||||
|
# individual volume failures that have an unknown state.
|
||||||
|
sudo mdutil -a -i off || true
|
||||||
|
|
||||||
# User likely has pre-existing system processes trying to use
|
# User likely has pre-existing system processes trying to use
|
||||||
# the (now) over-mounted home directory.
|
# the (now) over-mounted home directory.
|
||||||
sudo pkill -u $PWUSER || true
|
sudo pkill -u $PWUSER || true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
msg "Setting up Rosetta"
|
||||||
|
# Rosetta 2 enables arm64 Mac to use Intel Apps. Only install if not present.
|
||||||
|
if ! arch -arch x86_64 /usr/bin/uname -m; then
|
||||||
|
sudo softwareupdate --install-rosetta --agree-to-license
|
||||||
|
echo -n "Confirming rosetta is functional"
|
||||||
|
if ! arch -arch x86_64 /usr/bin/uname -m; then
|
||||||
|
die "Rosetta installed but non-functional, see setup log for details."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
msg "Restricting appstore/software install to admin-only"
|
||||||
|
# Abuse the symlink existance as a condition for running `sudo defaults write ...`
|
||||||
|
# since checking the state of those values is complex.
|
||||||
|
if [[ ! -L /usr/local/bin/softwareupdate ]]; then
|
||||||
|
# Ref: https://developer.apple.com/documentation/devicemanagement/softwareupdate
|
||||||
|
sudo defaults write com.apple.SoftwareUpdate restrict-software-update-require-admin-to-install -bool true
|
||||||
|
sudo defaults write com.apple.appstore restrict-store-require-admin-to-install -bool true
|
||||||
|
|
||||||
|
# Unf. interacting with the rosetta installer seems to bypass both of the
|
||||||
|
# above settings, even when run as a regular non-admin user. However, it's
|
||||||
|
# also desireable to limit use of the utility in a CI environment generally.
|
||||||
|
# Since /usr/sbin is read-only, but /usr/local is read-write and appears first
|
||||||
|
# in $PATH, deploy a really fragile hack as an imperfect workaround.
|
||||||
|
sudo ln -sf /usr/bin/false /usr/local/bin/softwareupdate
|
||||||
|
fi
|
||||||
|
|
||||||
# FIXME: Semi-secret POOLTOKEN value should not be in this file.
|
# FIXME: Semi-secret POOLTOKEN value should not be in this file.
|
||||||
# ref: https://github.com/cirruslabs/cirrus-cli/discussions/662
|
# ref: https://github.com/cirruslabs/cirrus-cli/discussions/662
|
||||||
cat << EOF | sudo tee $PWCFG > /dev/null
|
cat << EOF | sudo tee $PWCFG > /dev/null
|
||||||
|
|
|
@ -18,6 +18,10 @@ if id -u "$PWUSER" &> /dev/null; then
|
||||||
timeout_at=$((now+60*60*2))
|
timeout_at=$((now+60*60*2))
|
||||||
echo "Waiting up to 2 hours for any pre-existing cirrus agent (i.e. running task)"
|
echo "Waiting up to 2 hours for any pre-existing cirrus agent (i.e. running task)"
|
||||||
while pgrep -u $PWUSER -q -f "cirrus-ci-agent"; do
|
while pgrep -u $PWUSER -q -f "cirrus-ci-agent"; do
|
||||||
|
if [[ $(date -u +%s) -gt $timeout_at ]]; then
|
||||||
|
echo "Timeout waiting for cirrus-ci-agent to terminate"
|
||||||
|
break
|
||||||
|
fi
|
||||||
echo "Found cirrus-ci-agent still running, waiting..."
|
echo "Found cirrus-ci-agent still running, waiting..."
|
||||||
sleep 60
|
sleep 60
|
||||||
done
|
done
|
||||||
|
|
|
@ -4,21 +4,20 @@ Validate this file before commiting with (from repository root):
|
||||||
|
|
||||||
podman run -it \
|
podman run -it \
|
||||||
-v ./renovate/defaults.json5:/usr/src/app/renovate.json5:z \
|
-v ./renovate/defaults.json5:/usr/src/app/renovate.json5:z \
|
||||||
docker.io/renovate/renovate:latest \
|
ghcr.io/renovatebot/renovate:latest \
|
||||||
renovate-config-validator
|
renovate-config-validator
|
||||||
|
|
||||||
|
and/or use the pre-commit hook: https://github.com/renovatebot/pre-commit-hooks
|
||||||
*/
|
*/
|
||||||
|
|
||||||
{
|
{
|
||||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||||
|
|
||||||
"description": "This is a basic preset intended\
|
"description": "This is a basic preset intended\
|
||||||
for reuse to reduce the amount of boiler-plate\
|
for reuse to reduce the amount of boiler-plate\
|
||||||
configuration that otherwise would need to be\
|
configuration that otherwise would need to be\
|
||||||
duplicated. It should be referenced from other\
|
duplicated. It should be referenced from other\
|
||||||
repositories renovate config under the 'extends'\
|
repositories renovate config under the 'extends'\
|
||||||
section as:\
|
section as: github>containers/automation//renovate/defaults.json5\
|
||||||
github>containers/automation//renovate/defaults.json5\
|
|
||||||
(optionally with a '#X.Y.Z' version-tag suffix).",
|
(optionally with a '#X.Y.Z' version-tag suffix).",
|
||||||
|
|
||||||
/*************************************************
|
/*************************************************
|
||||||
|
@ -34,7 +33,7 @@ Validate this file before commiting with (from repository root):
|
||||||
":gitSignOff",
|
":gitSignOff",
|
||||||
|
|
||||||
// Always rebase dep. update PRs from `main` when PR is stale
|
// Always rebase dep. update PRs from `main` when PR is stale
|
||||||
":rebaseStalePrs",
|
":rebaseStalePrs"
|
||||||
],
|
],
|
||||||
|
|
||||||
// The default setting is ambiguous, explicitly base schedules on UTC
|
// The default setting is ambiguous, explicitly base schedules on UTC
|
||||||
|
@ -49,6 +48,7 @@ Validate this file before commiting with (from repository root):
|
||||||
// Default setting is an "empty" schedule. Explicitly set this
|
// Default setting is an "empty" schedule. Explicitly set this
|
||||||
// such that security-alert PRs may be opened immediately.
|
// such that security-alert PRs may be opened immediately.
|
||||||
"vulnerabilityAlerts": {
|
"vulnerabilityAlerts": {
|
||||||
|
// Distinguish PRs from regular dependency updates
|
||||||
"labels": ["dependencies", "security"],
|
"labels": ["dependencies", "security"],
|
||||||
|
|
||||||
// Force-enable renovate management of deps. which are otherwise
|
// Force-enable renovate management of deps. which are otherwise
|
||||||
|
@ -57,16 +57,13 @@ Validate this file before commiting with (from repository root):
|
||||||
// (last-match wins rule).
|
// (last-match wins rule).
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
|
|
||||||
// Indirect dependencies are disabled by default for the `gomod` manager.
|
// Note: As of 2024-06-25 indirect golang dependency handling is
|
||||||
// However, for vulnerability updates we may want them even if they break
|
// broken in Renovate, and disabled by default. This affects
|
||||||
// during renovate's automatic top-level `go mod tidy`.
|
// vulnerabilityAlerts in that if the dep is 'indirect' no PR
|
||||||
"packageRules": [
|
// will ever open, it must be handled manually. Attempting
|
||||||
{
|
// to enable indirect deps (for golang) in this section will
|
||||||
"matchManagers": ["gomod"],
|
// not work, it will always be overriden by the global golang
|
||||||
"matchDepTypes": ["indirect"],
|
// indirect dep. setting.
|
||||||
"enabled": true,
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
|
|
||||||
// On a busy repo, automatic-rebasing will swamp the CI system.
|
// On a busy repo, automatic-rebasing will swamp the CI system.
|
||||||
|
@ -78,8 +75,12 @@ Validate this file before commiting with (from repository root):
|
||||||
***** Manager-specific configuration options *****
|
***** Manager-specific configuration options *****
|
||||||
**************************************************/
|
**************************************************/
|
||||||
|
|
||||||
"regexManagers": [
|
"customManagers": [
|
||||||
|
// Track the latest CI VM images by tag on the containers/automation_images
|
||||||
|
// repo. Propose updates when newer tag available compared to what is
|
||||||
|
// referenced in a repo's .cirrus.yml file.
|
||||||
{
|
{
|
||||||
|
"customType": "regex",
|
||||||
"fileMatch": "^.cirrus.yml$",
|
"fileMatch": "^.cirrus.yml$",
|
||||||
// Expected veresion format: c<automation_images IMG_SFX value>
|
// Expected veresion format: c<automation_images IMG_SFX value>
|
||||||
// For example `c20230120t152650z-f37f36u2204`
|
// For example `c20230120t152650z-f37f36u2204`
|
||||||
|
@ -87,28 +88,49 @@ Validate this file before commiting with (from repository root):
|
||||||
"depNameTemplate": "containers/automation_images",
|
"depNameTemplate": "containers/automation_images",
|
||||||
"datasourceTemplate": "github-tags",
|
"datasourceTemplate": "github-tags",
|
||||||
"versioningTemplate": "loose",
|
"versioningTemplate": "loose",
|
||||||
"autoReplaceStringTemplate": "c{{{newVersion}}}",
|
"autoReplaceStringTemplate": "c{{{newVersion}}}"
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// For skopeo and podman, manage the golangci-lint version as
|
||||||
|
// referenced in their Makefile.
|
||||||
{
|
{
|
||||||
|
"customType": "regex",
|
||||||
"fileMatch": "^Makefile$",
|
"fileMatch": "^Makefile$",
|
||||||
// make ignores whitespace around the value, make renovate do the same.
|
// make ignores whitespace around the value, make renovate do the same.
|
||||||
"matchStrings": ["GOLANGCI_LINT_VERSION\\s+:=\\s+(?<currentValue>.+)\\s*"],
|
"matchStrings": [
|
||||||
|
"GOLANGCI_LINT_VERSION\\s+:=\\s+(?<currentValue>.+)\\s*"
|
||||||
|
],
|
||||||
"depNameTemplate": "golangci/golangci-lint",
|
"depNameTemplate": "golangci/golangci-lint",
|
||||||
"datasourceTemplate": "github-releases",
|
"datasourceTemplate": "github-releases",
|
||||||
"versioningTemplate": "semver-coerced",
|
"versioningTemplate": "semver-coerced",
|
||||||
// Podman's installer script will puke if there's a 'v' prefix, as represented
|
// Podman's installer script will puke if there's a 'v' prefix, as represented
|
||||||
// in upstream golangci/golangci-lint releases.
|
// in upstream golangci/golangci-lint releases.
|
||||||
"extractVersionTemplate": "v(?<version>.+)",
|
"extractVersionTemplate": "v(?<version>.+)"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
|
||||||
|
/*************************************************
|
||||||
|
***** Language-specific configuration options ****
|
||||||
|
**************************************************/
|
||||||
|
|
||||||
|
// ***** ATTENTION WARNING CAUTION DANGER ***** //
|
||||||
|
// Go versions 1.21 and later will AUTO-UPDATE based on _module_
|
||||||
|
// _requirements_. ref: https://go.dev/doc/toolchain Because
|
||||||
|
// many different projects covered by this config, build under
|
||||||
|
// different distros and distro-versions, golang version consistency
|
||||||
|
// is desireable across build outputs. In golang 1.21 and later,
|
||||||
|
// it's possible to pin the version in each project using the
|
||||||
|
// toolchain go.mod directive. This should be done to prevent
|
||||||
|
// unwanted auto-updates.
|
||||||
|
// Ref: Upstream discussion https://github.com/golang/go/issues/65847
|
||||||
|
"constraints": {"go": "1.23"},
|
||||||
|
|
||||||
// N/B: LAST MATCHING RULE WINS, match statems are ANDed together.
|
// N/B: LAST MATCHING RULE WINS, match statems are ANDed together.
|
||||||
// https://docs.renovatebot.com/configuration-options/#packagerules
|
// https://docs.renovatebot.com/configuration-options/#packagerules
|
||||||
"packageRules": [
|
"packageRules": [
|
||||||
|
|
||||||
/*************************************************
|
/*************************************************
|
||||||
***** Rust-specific configuration options *****
|
****** Rust-specific configuration options *******
|
||||||
*************************************************/
|
**************************************************/
|
||||||
{
|
{
|
||||||
"matchCategories": ["rust"],
|
"matchCategories": ["rust"],
|
||||||
// Update both Cargo.toml and Cargo.lock when possible
|
// Update both Cargo.toml and Cargo.lock when possible
|
||||||
|
@ -124,12 +146,12 @@ Validate this file before commiting with (from repository root):
|
||||||
"rangeStrategy": "bump",
|
"rangeStrategy": "bump",
|
||||||
// These packages roll updates far too often, slow them down.
|
// These packages roll updates far too often, slow them down.
|
||||||
// Ref: https://github.com/containers/netavark/issues/772
|
// Ref: https://github.com/containers/netavark/issues/772
|
||||||
"schedule": ["after 1am and before 11am on the first day of the month"],
|
"schedule": ["after 1am and before 11am on the first day of the month"]
|
||||||
},
|
},
|
||||||
|
|
||||||
/*************************************************
|
/*************************************************
|
||||||
***** Python-specific configuration options *****
|
****** Python-specific configuration options *****
|
||||||
*************************************************/
|
**************************************************/
|
||||||
{
|
{
|
||||||
"matchCategories": ["python"],
|
"matchCategories": ["python"],
|
||||||
// Preserve (but continue to upgrade) any existing SemVer ranges.
|
// Preserve (but continue to upgrade) any existing SemVer ranges.
|
||||||
|
@ -137,23 +159,17 @@ Validate this file before commiting with (from repository root):
|
||||||
},
|
},
|
||||||
|
|
||||||
/*************************************************
|
/*************************************************
|
||||||
***** Golang-specific configuration options *****
|
****** Golang-specific configuration options *****
|
||||||
*************************************************/
|
**************************************************/
|
||||||
{
|
{
|
||||||
"matchCategories": ["golang"],
|
"matchCategories": ["golang"],
|
||||||
|
|
||||||
// disabled by default, safe to enable since "tidy" enforced by CI.
|
// disabled by default, safe to enable since "tidy" enforced by CI.
|
||||||
"postUpdateOptions": ["gomodTidy"],
|
"postUpdateOptions": ["gomodTidy"],
|
||||||
|
|
||||||
// In case a version in use is retracted, allow going backwards.
|
// In case a version in use is retracted, allow going backwards.
|
||||||
// N/B: This is NOT compatible with pseudo versions, see below.
|
// N/B: This is NOT compatible with pseudo versions, see below.
|
||||||
"rollbackPrs": false,
|
"rollbackPrs": false,
|
||||||
|
|
||||||
// Preserve (but continue to upgrade) any existing SemVer ranges.
|
// Preserve (but continue to upgrade) any existing SemVer ranges.
|
||||||
"rangeStrategy": "replace",
|
"rangeStrategy": "replace"
|
||||||
|
|
||||||
// N/B: LAST MATCHING RULE WINS
|
|
||||||
// https://docs.renovatebot.com/configuration-options/#packagerules
|
|
||||||
},
|
},
|
||||||
|
|
||||||
// Golang pseudo-version packages will spam with every Commit ID change.
|
// Golang pseudo-version packages will spam with every Commit ID change.
|
||||||
|
@ -161,7 +177,7 @@ Validate this file before commiting with (from repository root):
|
||||||
{
|
{
|
||||||
"matchCategories": ["golang"],
|
"matchCategories": ["golang"],
|
||||||
"matchUpdateTypes": ["digest"],
|
"matchUpdateTypes": ["digest"],
|
||||||
"schedule": ["after 1am and before 11am on the first day of the month"],
|
"schedule": ["after 1am and before 11am on the first day of the month"]
|
||||||
},
|
},
|
||||||
|
|
||||||
// Package version retraction (https://go.dev/ref/mod#go-mod-file-retract)
|
// Package version retraction (https://go.dev/ref/mod#go-mod-file-retract)
|
||||||
|
@ -174,6 +190,17 @@ Validate this file before commiting with (from repository root):
|
||||||
"allowedVersions": "!/v((1.0.0)|(1.0.1))$/"
|
"allowedVersions": "!/v((1.0.0)|(1.0.1))$/"
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Skip updating the go.mod toolchain directive, humans will manage this.
|
||||||
|
{
|
||||||
|
"matchCategories": ["golang"],
|
||||||
|
"matchDepTypes": ["toolchain"],
|
||||||
|
"enabled": false
|
||||||
|
},
|
||||||
|
|
||||||
|
/*************************************************
|
||||||
|
************ CI configuration options ************
|
||||||
|
**************************************************/
|
||||||
|
|
||||||
// Github-action updates cannot consistently be tested in a PR.
|
// Github-action updates cannot consistently be tested in a PR.
|
||||||
// This is caused by an unfixable architecture-flaw: Execution
|
// This is caused by an unfixable architecture-flaw: Execution
|
||||||
// context always depends on trigger, and we (obvious) can't know
|
// context always depends on trigger, and we (obvious) can't know
|
||||||
|
@ -190,19 +217,13 @@ Validate this file before commiting with (from repository root):
|
||||||
// example, flagging an important TODO or FIXME item. Or, where CI VM
|
// example, flagging an important TODO or FIXME item. Or, where CI VM
|
||||||
// images are split across multiple IMG_SFX values that all need to be updated.
|
// images are split across multiple IMG_SFX values that all need to be updated.
|
||||||
{
|
{
|
||||||
"matchManagers": ["regex"],
|
"matchManagers": ["custom.regex"],
|
||||||
"matchFileNames": [".cirrus.yml"], // full-path exact-match
|
"matchFileNames": [".cirrus.yml"],
|
||||||
"groupName": "CI VM Image",
|
"groupName": "CI VM Image",
|
||||||
// Somebody(s) need to check image update PRs as soon as they open.
|
// Somebody(s) need to check image update PRs as soon as they open.
|
||||||
"reviewers": ["cevich"],
|
"reviewers": ["Luap99"],
|
||||||
// Don't wait, roll out CI VM Updates immediately
|
// Don't wait, roll out CI VM Updates immediately
|
||||||
"schedule": ["at any time"],
|
"schedule": ["at any time"]
|
||||||
},
|
},
|
||||||
|
]
|
||||||
// Add CI:DOCS prefix to skip unnecessary tests for golangci updates in podman CI.
|
|
||||||
{
|
|
||||||
"matchPackageNames": ["golangci/golangci-lint"],
|
|
||||||
"commitMessagePrefix": "[CI:DOCS]",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue