Compare commits

...

52 Commits

Author SHA1 Message Date
Darko Janjic d86867cd97
fix: Sidecar terminates itself after the main container is finished. Closes #10612 (#14633)
Signed-off-by: Darko Janjic <darko@pipekit.io>
2025-07-18 11:03:10 +10:00
github-actions[bot] 0e1d07645d
docs: update CHANGELOG.md for v3.7.0-rc4 (#14658)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Alan Clucas <alan@clucas.org>
Co-authored-by: Joibel <1827156+Joibel@users.noreply.github.com>
Co-authored-by: Alan Clucas <alan@clucas.org>
2025-07-16 08:13:15 +01:00
jswxstw 85b4c649d0
fix: process aggregate outputs for steps node with retries. Fixes #14647 (#14651)
Signed-off-by: oninowang <oninowang@tencent.com>
2025-07-15 14:06:32 +10:00
Armin Friedl 7fd6b10f87
feat: Respect NameFilter in Workflow Archive (fixes #14069) (#14473)
Signed-off-by: Armin Friedl <dev@friedl.net>
2025-07-14 07:31:58 +05:30
Jemin Seo 799cbc5d93
docs(cli): add examples for `argo template get` with option explanations (#14648)
Signed-off-by: Jemin <jemin9812@gmail.com>
2025-07-14 07:24:09 +05:30
Alan Clucas 78ba6eae26
chore: remove more log levels and implement speedups (#14644)
Signed-off-by: Alan Clucas <alan@clucas.org>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-11 08:08:58 +01:00
Alan Clucas 175322c9c9
fix: Make codegen easier to understand when it fails (#14619)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-07-10 15:43:07 +00:00
Jose M. Abuin aa1ea6a646
docs: Added doc for GC directory (#14589)
Signed-off-by: Jose M. Abuin <chemaster@gmail.com>
Co-authored-by: antoinetran <antoinetran@users.noreply.github.com>
2025-07-10 16:42:23 +01:00
Alan Clucas b05068ba3e
chore: remove tracing logging level (#14642)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-07-10 10:53:47 +01:00
Isitha Subasinghe bad972df6d
docs: release docs for 3.7 (#14574)
Signed-off-by: isubasinghe <isitha@pipekit.io>
2025-07-10 10:09:02 +01:00
Alan Clucas 48b247f12a
fix: merge collision fix (#14643)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-07-10 07:55:34 +00:00
chenrui f73e8b0e72
fix: Make the phase of the node unchange when the pod is completed and outputs are not set in the status.node (#14625)
Signed-off-by: chenrui7 <chenrui7@kingsoft.com>
Co-authored-by: chenrui7 <chenrui7@kingsoft.com>
2025-07-09 23:05:50 +05:30
shuangkun tian 05ec2d61ec
feat: support open custom links in new tab.Part of #13114 (#14314)
Signed-off-by: shuangkun <tsk2013uestc@163.com>
2025-07-09 14:29:23 +01:00
Isitha Subasinghe e419637113
feat: logging refactor to `slog`. Fixes #11120 (#14527)
Signed-off-by: isubasinghe <isitha@pipekit.io>
2025-07-09 10:32:05 +00:00
Alan Clucas 3a95f4ef2a
fix: fix for feature note changed files (#14640)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-07-09 07:11:51 +00:00
shuangkun tian d5bbf1fb91
fix: retry when the server is temporarily unavailable. (#14637)
Signed-off-by: shuangkun <tsk2013uestc@163.com>
2025-07-08 23:45:53 +05:30
Alan Clucas 27ece58423
fix: do PR check with some depth for merge-base (#14636)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-07-08 18:02:23 +10:00
Isitha Subasinghe 1b963d336c
fix: ensure task results sync when calling fullfilled. Fixes #14568 (#14536)
Signed-off-by: isubasinghe <isitha@pipekit.io>
2025-07-08 08:36:44 +01:00
Alan Clucas 9b276dfc0d
feat: new-features automated documentation (#14491)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-07-08 10:22:47 +10:00
shuangkun tian a5d43eb202
fix: avoid healthz check restart controller. Fixes: #14526 (#14613)
Signed-off-by: shuangkun <tsk2013uestc@163.com>
2025-07-04 09:25:48 +01:00
jswxstw 85e96a1f70
fix: correct finding the closest ancestor retry node. Fixes #14517 (#14576)
Signed-off-by: oninowang <oninowang@tencent.com>
Co-authored-by: oninowang <oninowang@tencent.com>
2025-07-03 10:39:26 +10:00
shuangkun tian 162e6454d8
fix: create task results only once. Fixes: #14617 (#14618)
Signed-off-by: shuangkun <tsk2013uestc@163.com>
2025-07-02 08:52:51 +01:00
shuangkun tian 5807fadbd1
fix: add etcd too many requests transient. (#14621)
Signed-off-by: shuangkun <tsk2013uestc@163.com>
2025-07-02 11:21:15 +05:30
Alan Clucas 9c47755980
chore(deps): bump golang version (#14596)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-07-01 09:46:05 +01:00
Alan Clucas 62302816ff
chore(deps): remove open-golang dependency (#14591)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-30 16:10:39 +01:00
Alan Clucas a58f34fb66
chore(deps): replace golang/mock with maintained port by uber (#14592)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-30 16:09:58 +01:00
garireo2549 48e7984b78
feat: Display when Conditions in CronWorkflow UI : Fixes: #14334 (#14585)
Signed-off-by: garireo2549 <freedomgreo@gmail.com>
2025-06-29 10:51:19 +10:00
shuangkun tian e634b1d4f3
fix: set creator when use X509 client certificates. Fixes: #14578 (#14579)
Signed-off-by: shuangkun <tsk2013uestc@163.com>
Signed-off-by: shuangkun tian <72060326+shuangkun@users.noreply.github.com>
Co-authored-by: Tianchu Zhao <evantczhao@gmail.com>
2025-06-29 10:28:37 +10:00
dependabot[bot] ec99485c06
chore(deps): bump github.com/go-viper/mapstructure/v2 from 2.2.1 to 2.3.0 in the go_modules group (#14611)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-27 17:34:29 +00:00
Tianchu Zhao 883b502dac
fix: prevent running workflow throttle by parallelism (#14606)
Signed-off-by: Tianchu Zhao <evantczhao@gmail.com>
2025-06-26 09:15:41 +01:00
github-actions[bot] 751b4d1c59
docs: update CHANGELOG.md for v3.7.0-rc3 (#14604)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: isubasinghe <28187128+isubasinghe@users.noreply.github.com>
2025-06-25 19:55:06 +10:00
akash khamkar ca4bfb874a
fix: executor workflowtaskresult retry should use the default retry and configurable (#14598)
Signed-off-by: akashjkhamkar <akash.khamkar40@gmail.com>
2025-06-25 07:03:19 +00:00
Alan Clucas e0a0f2856b
fix: restore the CRDs to install.yaml (#14599)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-25 10:37:21 +10:00
Isitha Subasinghe b81e8c9fc0
fix: retry forevever on task result creation related transient errors. Fixes #14560 (#14555)
Signed-off-by: isubasinghe <isitha@pipekit.io>
2025-06-24 10:24:05 +10:00
github-actions[bot] f4bc9b7418
docs: update CHANGELOG.md for v3.7.0-rc2 (#14594)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: isubasinghe <28187128+isubasinghe@users.noreply.github.com>
2025-06-23 19:19:41 +10:00
Alan Clucas 9e2c0fc0dc
chore(revert): feat!: Introduce curated Sprig function allowlist (#14446) (#14586)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-23 14:22:42 +10:00
Alan Clucas edd20871d0
fix: try to stop .spelling from getting deleted (#14593)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-23 14:21:18 +10:00
Alan Clucas ce1b4471a6
fix: ignore ALPN by using the env variable hack (#14588)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-23 14:20:54 +10:00
Malthe Poulsen 00c8e809ff
fix(server/auth/webhook): update GitHub webhook events list and library version (#14389)
Signed-off-by: Malthe Poulsen <malthe@grundtvigsvej.dk>
2025-06-21 12:51:37 +01:00
shuangkun tian 65834245a9
chore(plugin): add ray job plugin. (#14583)
Signed-off-by: shuangkun <tsk2013uestc@163.com>
2025-06-19 08:16:16 +01:00
jswxstw f7d2c099cb
fix: merge template defaults before processing. Fixes #13691 (#14298)
Signed-off-by: oninowang <oninowang@tencent.com>
2025-06-17 13:47:19 +01:00
github-actions[bot] 1e2a87f2af
docs: update CHANGELOG.md for v3.5.15 (#14566)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Alan Clucas <alan@clucas.org>
Co-authored-by: Joibel <1827156+Joibel@users.noreply.github.com>
Co-authored-by: Alan Clucas <alan@clucas.org>
2025-06-13 08:25:08 +00:00
Alan Clucas 2104eda035
docs: document feature releases (#14549)
Signed-off-by: Alan Clucas <alan@clucas.org>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-06-10 20:37:19 -04:00
Nicola Dardanis 75ff10dd67
fix!: Adjust parameter value overriding. Fixes #14426 (#14462)
Signed-off-by: Nicola Dardanis <nicdard@gmail.com>
2025-06-10 09:56:03 +10:00
Alan Clucas d3cfe9ecc3
docs: tidy up autogenerated wf-configmap (#14543)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-06 07:59:43 +01:00
Alan Clucas 9ab8d69fc2
docs: improve synchronization docs (#14545)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-06 12:56:38 +10:00
Alan Clucas ff9f0130ac
docs: update CONTRIBUTORS.md including guidance on PR change request timeliness and PR takeover (#13762)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-06 12:49:15 +10:00
William Van Hevelingen 029ce12020
chore: update issue templates to use types instead of labels (#14548)
Signed-off-by: William Van Hevelingen <william.vanhevelingen@acquia.com>
2025-06-05 08:40:32 +01:00
Alan Clucas cbc2750123
chore: fix makefile (#14540)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-04 14:44:38 +01:00
github-actions[bot] 7d7366f829
docs: update CHANGELOG.md for v3.6.10 (#14542)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Alan Clucas <alan@clucas.org>
Co-authored-by: Joibel <1827156+Joibel@users.noreply.github.com>
Co-authored-by: Alan Clucas <alan@clucas.org>
2025-06-04 13:07:51 +00:00
github-actions[bot] 8479eabe6e
docs: update CHANGELOG.md for v3.7.0-rc1 (#14539)
Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Signed-off-by: Alan Clucas <alan@clucas.org>
Co-authored-by: isubasinghe <28187128+isubasinghe@users.noreply.github.com>
Co-authored-by: Alan Clucas <alan@clucas.org>
2025-06-04 13:42:59 +01:00
Alan Clucas 843f3d4da5
docs: auto-generate wf-controller-configmap docs (#14515)
Signed-off-by: Alan Clucas <alan@clucas.org>
2025-06-04 12:01:48 +00:00
340 changed files with 10946 additions and 5105 deletions

16
.features/TEMPLATE.md Normal file
View File

@ -0,0 +1,16 @@
<!-- Required: All of these fields are required, including at least one issue -->
Description: <!-- A brief one line description of the feature -->
Author: <!-- Author name and GitHub link in markdown format e.g. [Alan Clucas](https://github.com/Joibel) -->
Component: <!-- component name here, see hack/featuregen/components.go for the list -->
Issues: <!-- Space separated list of issues 1234 5678 -->
<!--
Optional
Additional details about the feature written in markdown, aimed at users who want to learn about it
* Explain when you would want to use the feature
* Include code examples if applicable
* Provide working examples
* Format code using back-ticks
* Use Kubernetes style
* One sentence per line of markdown
-->

View File

@ -0,0 +1,12 @@
Component: General
Issues: 14069
Description: Name filter parameter for prefix/contains/exact search in `/archived-workflows`
Author: [Armin Friedl](https://github.com/arminfriedl)
A new `nameFilter` parameter was added to the `GET
/archived-workflows` endpoint. The filter works analogous to the one
in `GET /workflows`. It allows to specify how a search for
`?listOptions.fieldSelector=metadata.name=<search-string>` in these
endpoints should be interpreted. Possible values are `Prefix`,
`Contains` and `Exact`. The `metadata.name` field is matched
accordingly against the value for `<search-string>`.

View File

@ -0,0 +1,9 @@
Component: General
Issues: 11120
Description: This migrates most of the logging off logrus and onto a custom logger.
Author: [Isitha Subasinghe](https://github.com/isubasinghe)
Currently it is quite hard to identify log lines with it's corresponding workflow.
This change propagates a context object down the call hierarchy containing an
annotated logging object. This allows context aware logging from deep within the
codebase.

View File

@ -0,0 +1,6 @@
Component: Build and Development
Issues: 14155
Description: Document features as they are created
Author: [Alan Clucas](https://github.com/Joibel)
To assist with creating release documentation and blog postings, all features now require a document in .features/pending explaining what they do for users.

View File

@ -0,0 +1,14 @@
Component: UI
Issues: 13114
Description: Support open custom links in new tab automatically.
Author: [Shuangkun Tian](https://github.com/shuangkun)
Support configuring a custom link to open in a new tab by default.
If target == _blank, open in new tab, if target is null or _self, open in this tab. For example:
```
- name: Pod Link
scope: pod
target: _blank
url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
```

View File

@ -1,6 +1,6 @@
name: Reproducible bug report
description: Create a reproducible bug report. Not for support requests.
labels: [ type/bug ]
type: Bug
body:
- type: checkboxes
id: terms

View File

@ -1,7 +1,7 @@
---
name: Feature
about: Propose a feature for this project
labels: 'type/feature'
type: Feature
---
# Summary

View File

@ -1,6 +1,7 @@
name: Regression report
description: Create a regression report. Not for support requests.
labels: [ type/bug, type/regression ]
type: Bug
labels: [ type/regression ]
body:
- type: checkboxes

View File

@ -84,6 +84,7 @@ jobs:
- .clang-format
lint:
- *tests
- .features/**
# plus lint config
- .golangci.yml
# all GH workflows / actions
@ -99,6 +100,7 @@ jobs:
# docs scripts & tools from `make docs`
- hack/docs/copy-readme.sh
- hack/docs/check-env-doc.sh
- hack/featuregen/**
- .markdownlint.yaml
- .mlc_config.json
- .spelling

View File

@ -14,8 +14,60 @@ permissions:
jobs:
title-check:
runs-on: ubuntu-24.04
outputs:
type: ${{ steps.semantic-pr-check.outputs.type }}
steps:
- name: Check PR Title's semantic conformance
id: semantic-pr-check
uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # v5.5.3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
feature-pr-handling:
needs: title-check
runs-on: ubuntu-24.04
if: needs.title-check.outputs.type == 'feat'
env:
PR_HEAD: ${{ github.event.pull_request.head.sha }}
steps:
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
fetch-depth: 50
- name: Ensure ./.features/pending/*.md addition(s)
id: changed-files
uses: tj-actions/changed-files@cbda684547adc8c052d50711417fa61b428a9f88 # v41.1.2
with:
files: |
.features/pending/*.md
- name: No ./.features/*.md addition
if: steps.changed-files.outputs.added_files_count == 0
run: |
echo "No feature description was added to the ./.features/ directory for this feature PR."
echo "Please add a .md file to the ./.features/ directory."
echo "See docs/running-locally.md for more details."
false
- name: Validate ./.features/*.md changes
if: steps.changed-files.outputs.added_files_count > 0
run: |
echo "A feature description was added to the ./.features/ directory."
make features-validate \
|| { echo "New ./.features/*.md file failed validation."; exit 1; }
# In order to validate any links in the yaml file, render the config to markdown
- name: Render .features/*.md feature descriptions
run: make features-preview > features_preview.md
- name: Link Checker
id: lychee
uses: lycheeverse/lychee-action@f613c4a64e50d792e0b31ec34bbcbba12263c6a6 # f613c4a64e50d792e0b31ec34bbcbba12263c6a6
with:
args: "--verbose --no-progress ./features_preview.md"
failIfEmpty: false

2
.gitignore vendored
View File

@ -8,6 +8,7 @@ dist/
# delve debug binaries
cmd/**/debug
hack/**/debug
hack/featuregen/featuregen
/argo
/argoexec
release-notes
@ -41,6 +42,7 @@ sdks/python/client/dist/*
/v3/
/cmd/argoexec/commands/test.txt
/db-dumps/
.spelling.tmp
# Do not commit rendered installation manifests since they are misleading to users.
manifests/install.yaml

View File

@ -79,6 +79,7 @@ Kaniko
Katacoda
Katib
Kerberos
KeyValueEditor
Killercoda
KubectlExec
Kubeflow
@ -109,6 +110,7 @@ PersistentVolumeClaims
Ploomber
PostgreSQL
Postgres
Pre-fill
PriorityClass
RCs
Risc-V
@ -138,11 +140,13 @@ anded
apis
architecting
argo
argoexec
argoproj
args
async
auth
backend
backoff
backported
boolean
booleans
@ -152,7 +156,6 @@ codebase
config
cpu
cron
crypto
daemoned
dependabot
dev
@ -173,6 +176,7 @@ gitops
goroutine
goroutines
govaluate
grpc
gzipped
i.e.
idempotence
@ -213,14 +217,15 @@ pprof
pre-commit
pytorch
qps
ray
rc2
repo
retryStrategy
roadmap
runtime
runtimes
s3
sandboxed
semver
shortcodes
stateful
stderr
@ -263,7 +268,11 @@ v3.4.
v3.4.4
v3.5
v3.6
v3.6.0
v3.6.1
v3.6.5
v3.7
v3.7.0
validator
vendored
versioned

View File

@ -1,5 +1,288 @@
# Changelog
## v3.7.0-rc4 (2025-07-15)
Full Changelog: [v3.7.0-rc3...v3.7.0-rc4](https://github.com/argoproj/argo-workflows/compare/v3.7.0-rc3...v3.7.0-rc4)
### Selected Changes
* [d1d689b36](https://github.com/argoproj/argo-workflows/commit/d1d689b367c4c8a5a8a095ae60b5d7043f99eda9) fix: process aggregate outputs for steps node with retries. Fixes #14647 (#14651)
<details><summary><h3>Contributors</h3></summary>
* jswxstw
</details>
## v3.7.0-rc3 (2025-06-25)
Full Changelog: [v3.7.0-rc2...v3.7.0-rc3](https://github.com/argoproj/argo-workflows/compare/v3.7.0-rc2...v3.7.0-rc3)
### Selected Changes
* [e0a0f2856](https://github.com/argoproj/argo-workflows/commit/e0a0f2856ba6e8a49b1ea28b15d50d39f3106125) fix: restore the CRDs to install.yaml (#14599)
* [b81e8c9fc](https://github.com/argoproj/argo-workflows/commit/b81e8c9fc04667eb973e05a8f4aace7ee81e0f1a) fix: retry forevever on task result creation related transient errors. Fixes #14560 (#14555)
<details><summary><h3>Contributors</h3></summary>
* Alan Clucas
* Isitha Subasinghe
* isubasinghe
</details>
## v3.7.0-rc2 (2025-06-23)
Full Changelog: [v3.7.0-rc1...v3.7.0-rc2](https://github.com/argoproj/argo-workflows/compare/v3.7.0-rc1...v3.7.0-rc2)
### Selected Changes
* [edd20871d](https://github.com/argoproj/argo-workflows/commit/edd20871d0e96e2fedcc45a5b556130f1767586e) fix: try to stop .spelling from getting deleted (#14593)
* [ce1b4471a](https://github.com/argoproj/argo-workflows/commit/ce1b4471a64b4f20305d44fbbe2f3a9f242c8dcb) fix: ignore ALPN by using the env variable hack (#14588)
* [00c8e809f](https://github.com/argoproj/argo-workflows/commit/00c8e809ff47ff0b8c870ae30f9ab0970a922d1e) fix(server/auth/webhook): update GitHub webhook events list and library version (#14389)
* [f7d2c099c](https://github.com/argoproj/argo-workflows/commit/f7d2c099cbb4672e1c80762619ed548a758d1612) fix: merge template defaults before processing. Fixes #13691 (#14298)
* [75ff10dd6](https://github.com/argoproj/argo-workflows/commit/75ff10dd67102c8a1adfe73491fe6ce845258daf) fix!: Adjust parameter value overriding. Fixes #14426 (#14462)
<details><summary><h3>Contributors</h3></summary>
* Alan Clucas
* Copilot
* isubasinghe
* Joibel
* jswxstw
* Malthe Poulsen
* Nicola Dardanis
* shuangkun tian
* William Van Hevelingen
</details>
## v3.7.0-rc1 (2025-06-04)
Full Changelog: [v3.6.10...v3.7.0-rc1](https://github.com/argoproj/argo-workflows/compare/v3.6.10...v3.7.0-rc1)
### Selected Changes
* [52037195a](https://github.com/argoproj/argo-workflows/commit/52037195a7fb584db0eb582b34dd02bafb6be6c5) chore(deps): update dependencies (#14535)
* [dc2b8f6d8](https://github.com/argoproj/argo-workflows/commit/dc2b8f6d8ff9f94f7d1829030abc4b6cc522bb9c) chore(deps): bump go-sqlite3 for vulnerability (#14524)
* [e6ced255f](https://github.com/argoproj/argo-workflows/commit/e6ced255f7a8d83aa8bcf9f833a82d3225cd3c08) fix: change non-root image to use tag (#14530)
* [4ad7dd44c](https://github.com/argoproj/argo-workflows/commit/4ad7dd44cbf2df216a036c978cc7cb31e1a0b401) fix: logs from workflow template init containers (#14476)
* [a4f457eac](https://github.com/argoproj/argo-workflows/commit/a4f457eace1193b07f81999f31243f99ff620966) fix: Fix ST1003 linting issue. Fixes #14405 (#14520)
* [19ae37f32](https://github.com/argoproj/argo-workflows/commit/19ae37f32ab5e07e3f78e1091cda7322a8fdd935) chore(deps)!: bump k8s dependencies to 1.33 (#14519)
* [7f0f9decf](https://github.com/argoproj/argo-workflows/commit/7f0f9decf5c133ed1726e09397f4357b752c43b4) chore(deps): update nix to nixos-25.05 (#14474)
* [87c925517](https://github.com/argoproj/argo-workflows/commit/87c925517f55bc7c8931b3d53994b48b3bb6e338) feat!: Introduce curated Sprig function allowlist (#14446)
* [ea2cc791b](https://github.com/argoproj/argo-workflows/commit/ea2cc791be47ead4d56b13b31febfd4a6d454156) fix: ignore failing resolveArt when Art is optional. Fixes #14267 (#14503)
* [755a04d95](https://github.com/argoproj/argo-workflows/commit/755a04d954ed7e3f18307a9ab07280b1201c373a) fix: Fix ST1016 linting issue. Fixes #14405 (#14514)
* [cf4daada9](https://github.com/argoproj/argo-workflows/commit/cf4daada93a08c437a7b17f9d0e86c0b736372c4) fix: remove unnecessary init container status checks. Fixes #14495 (#14510)
* [de6f160ae](https://github.com/argoproj/argo-workflows/commit/de6f160aefac23e243d18f1cfbd7f693287297c5) fix: update nix. fixes #14504 (#14505)
* [cb39b3ec9](https://github.com/argoproj/argo-workflows/commit/cb39b3ec953e6473ccd6257a94853a578a6208a6) fix: ui render with empty artifact (#14227)
* [a6a0c272d](https://github.com/argoproj/argo-workflows/commit/a6a0c272d00716cdda828779f26e9b35e26a595c) fix: update TLS config to support ALPN. (#14435) (cherry-pick main) (#14490)
* [1745aec0e](https://github.com/argoproj/argo-workflows/commit/1745aec0e1fd1e7d3ead333def9606d919b2ce14) feat: mark memoized node as cached (#13883)
* [9156b4c93](https://github.com/argoproj/argo-workflows/commit/9156b4c93353e543b892b2c409bd02a934da20d0) feat: more granular caching options for argoKubeClient informer (#14304)
* [e2ef918d3](https://github.com/argoproj/argo-workflows/commit/e2ef918d37e0a2fa9bf020d79d1eb5cb2c4c6c8f) fix: correctly release all mutexes when multiple mutexes are held. FIxes #14461 (#14467)
* [f57736935](https://github.com/argoproj/argo-workflows/commit/f57736935d3c29f6a382202b5a14f22c3ca5e812) feat: non-root argoexec (#14477)
* [45d82d3e5](https://github.com/argoproj/argo-workflows/commit/45d82d3e5540abc0d391f4d8ec4b01142444cced) fix: added pagination to the workflow and pod listings. Fixes #14374 (#14373)
* [9ae8e5363](https://github.com/argoproj/argo-workflows/commit/9ae8e5363bb6c2316291c27a65729e1fda761ac7) fix: namespace permissions for per-namespace parallelism. Fixes #14460 (#14459)
* [81130fbd5](https://github.com/argoproj/argo-workflows/commit/81130fbd58a542af7eb7a6c0192faed4de4385e8) feat: allow last retry variables in expressions. Fixes #10364 (#14450)
* [13c9b37bf](https://github.com/argoproj/argo-workflows/commit/13c9b37bf7a311a8ffbb8a608f00b56c05c5fcbf) fix: ensure variable sub and processing is performed. Fixes #12941 (#14444)
* [7dfdb63a3](https://github.com/argoproj/argo-workflows/commit/7dfdb63a347b8db99f84182e76eff332e2069f91) feat!: don't push to docker hub (#14457)
* [e55cc7f4b](https://github.com/argoproj/argo-workflows/commit/e55cc7f4b442d3f240810f9b1a74ce4c0450b821) feat: Add React Testing Library and initial component coverage (#14412)
* [fe561b78a](https://github.com/argoproj/argo-workflows/commit/fe561b78a1c688210a84fe0ba772948c1188aa5d) feat: multi-controller locks (semaphores and mutexes) (#14309)
* [986e0a883](https://github.com/argoproj/argo-workflows/commit/986e0a8832e115fff29e2b4dabe78a0f7400e4bd) fix: Remove nil ConfigMap reference in cache gc (#14442)
* [d5d0a6401](https://github.com/argoproj/argo-workflows/commit/d5d0a64014f414d995e02ea2f401a0110ad5adcf) fix: Reference existing attributes in example (#14448)
* [ce2fa2335](https://github.com/argoproj/argo-workflows/commit/ce2fa2335696c6fe92984bf498a1b59161534c59) fix(sso): use relative redirect URLs. Fixes #13031 (#13747)
* [bd13b1c92](https://github.com/argoproj/argo-workflows/commit/bd13b1c92bdb6cbc60c71f7664c4472e6fdc1d3f) fix: correct manual retry logic. Fixes #14124 (#14328)
* [e75f70df5](https://github.com/argoproj/argo-workflows/commit/e75f70df5e72d76e652bdcb103398a4fb03f3585) fix: refactor argoKubeClient to start argo server when creating workflowServiceClient (#14401)
* [9b7312601](https://github.com/argoproj/argo-workflows/commit/9b73126010eef059ba881d30da1b56153e3dd65e) feat(controller): retry strategy support on daemon containers, fixes #13705 (#13738)
* [3cbc98ef7](https://github.com/argoproj/argo-workflows/commit/3cbc98ef73df18c7a3036e471c1a737cc6fd980f) fix: process aggregate outputs for nodes with retries. Fixes #14228 (#14299)
* [5a64c5013](https://github.com/argoproj/argo-workflows/commit/5a64c5013704704fba193807314ed8221a2c78f0) fix: semaphore configmap retry on transient error. Fixes #14335 (#14336)
* [fa51c6d38](https://github.com/argoproj/argo-workflows/commit/fa51c6d382bcf751ef2bd135a85e028171620797) chore(deps): bump github.com/argoproj/argo-events from 1.9.1 to 1.9.6 in the go_modules group (#14382)
* [364bde267](https://github.com/argoproj/argo-workflows/commit/364bde267fb4d36e41926d58f283da1e29689d6e) fix: prevent dfs sorter infinite recursion on cycle. Fixes #13395 (#14391)
* [566f8e87e](https://github.com/argoproj/argo-workflows/commit/566f8e87ee5452cf64300268eedadbd6aa54b66c) chore(deps): bump http-proxy-middleware from 2.0.7 to 2.0.9 in /ui in the deps group (#14404)
* [3dbd1757a](https://github.com/argoproj/argo-workflows/commit/3dbd1757a1948a4a411bccf7e4d51c26bd5cfa71) fix: silence noisy "Ignoring duplicate key error" messages. Fixes #14344 (#14357)
* [614b0d244](https://github.com/argoproj/argo-workflows/commit/614b0d2440dab5c37b3783e47328370854213ace) fix: check metric creation before use. Fixes #14367 (#14383)
* [e99702e9c](https://github.com/argoproj/argo-workflows/commit/e99702e9c423246050346a2ef590d48410998877) fix(test): change `createdAfter` to UTC time. Fixes #14346 (#14347)
* [1097d4508](https://github.com/argoproj/argo-workflows/commit/1097d4508448e2a225d69a311a126a6f02518f0e) fix: add creator label when submit workflow by the resubmit button in argo UI (#14349)
* [260863d33](https://github.com/argoproj/argo-workflows/commit/260863d33ae36429b1a0f363b6f05b75b99c1e7b) chore(deps): bump the go_modules group with 2 updates (#14353)
* [9b7c0c4c4](https://github.com/argoproj/argo-workflows/commit/9b7c0c4c4ce1ecb8e1d27df95d14ccef09cffe0f) chore(deps): bump github.com/expr-lang/expr from 1.16.9 to 1.17.0 in the go_modules group (#14307)
* [893c4b072](https://github.com/argoproj/argo-workflows/commit/893c4b072a2252426e45be6ae14d5f7adffaa48f) refactor: tidy tools in makefile (#14318)
* [ee45eac88](https://github.com/argoproj/argo-workflows/commit/ee45eac887c16d8272727d2a2d35fe47c1fbf65c) feat: Cache semaphore limit lookup (#14205)
* [347c2d173](https://github.com/argoproj/argo-workflows/commit/347c2d173e6abfce0faae58f7e6913bd78d25c3b) fix(build): downgrade dev container image due to Python issues (#14323)
* [8098a14a2](https://github.com/argoproj/argo-workflows/commit/8098a14a2f5dc134a2e21e5a6e6b23b4b76e0e21) refactor: separate sqldb from persist (#14308)
* [73a8e7094](https://github.com/argoproj/argo-workflows/commit/73a8e7094873d9b3e751523c135dd1f1f58e12fb) fix(workflow/sync): use RWMutex to prevent concurrent map access (#14321)
* [9c073573d](https://github.com/argoproj/argo-workflows/commit/9c073573d06697bba5385c1a9773301c318f7dda) refactor: move s3 pkg from argoproj to workflow repo. Part of #14312 (#14315)
* [f0fdb69f7](https://github.com/argoproj/argo-workflows/commit/f0fdb69f79c51e473769bebbd1c65344a1b87b76) feat(CLI-alpha): support backfill for cron workflow. Part of #2706 (#13999)
* [50219cee2](https://github.com/argoproj/argo-workflows/commit/50219cee29ed4e9fac54703192338d2f8277a2f3) fix: incomplete/unsorted contributors list in CHANGELOG.md. Fixes #14293 (#14301)
* [8691b9b18](https://github.com/argoproj/argo-workflows/commit/8691b9b186ef0d8157933d35d1d658b0e1729b74) fix: Fix typo in common.go (#14286)
* [b09352efb](https://github.com/argoproj/argo-workflows/commit/b09352efbc62155517b91def91dd06078322cf0a) chore(deps): bump @babel/runtime from 7.23.8 to 7.26.10 in /ui in the deps group (#14296)
* [3162ab3d8](https://github.com/argoproj/argo-workflows/commit/3162ab3d8339fcefcb79bf55353d5d9959629c90) chore(deps): bump golang.org/x/net from 0.34.0 to 0.36.0 in the go_modules group (#14295)
* [fb217b0a7](https://github.com/argoproj/argo-workflows/commit/fb217b0a771f42173bf3fa8502a6acf25e769006) fix: Prevent dropdown from disappearing when shrinking the viewport (#14291)
* [22e91327f](https://github.com/argoproj/argo-workflows/commit/22e91327fb37a97ce0061a6c63f9a195820b2036) fix(server): consistent actor email label. Fixes #14121 (#14122)
* [96a6f7f4e](https://github.com/argoproj/argo-workflows/commit/96a6f7f4e575a238f012c8f1f15ee18f644b61c1) fix: don't print help for non-validation errors. Fixes argoproj#14234 (#14249)
* [f687a8243](https://github.com/argoproj/argo-workflows/commit/f687a824348ab37d2622d9f0ebfa544c28e0df42) fix(cli): remove red from log colour selection. Fixes #6740 (#14215)
* [06c730118](https://github.com/argoproj/argo-workflows/commit/06c7301183555189f78e248ca2fafafa059d49f4) chore(deps): fix snyk (#14264)
* [68fde4ffc](https://github.com/argoproj/argo-workflows/commit/68fde4ffcbe9b84dfad969a45133cd4cc659d346) fix: wait for workflow informer to sync before pod informer (#14248)
* [d6a961b35](https://github.com/argoproj/argo-workflows/commit/d6a961b35633cbb3ac14c026240a9c5acbdaed17) fix(api/jsonschema): use working `$id` (#14257)
* [69ecfffc5](https://github.com/argoproj/argo-workflows/commit/69ecfffc5d5aa4a775cc0ca196b3639a1f90855f) fix(api/jsonschema): use unchanging JSON Schema version (#14092)
* [4f4d38fdb](https://github.com/argoproj/argo-workflows/commit/4f4d38fdbce060e037ccd88f94c1ac1200e25578) fix: use quay.io for third-party images to avoid rate limiting. Fixes #10807 (#14239)
* [1a212da9f](https://github.com/argoproj/argo-workflows/commit/1a212da9f1e250ae8f4bb944132c503024116a51) fix: incorrect description of value field in Workflow.Parameter (#14233)
* [f61838c81](https://github.com/argoproj/argo-workflows/commit/f61838c818b43899d5537f828c2ccace8669d91a) feat: add support for databases enforcing strict data integrity through PKs. Fixes #13611 (#14103)
* [f2ac4cbec](https://github.com/argoproj/argo-workflows/commit/f2ac4cbecf0af27a0587fad66c2ac40ef1a6d2d2) chore(deps)!: bump k8s dependencies to 1.32 (#14209)
* [3a5809705](https://github.com/argoproj/argo-workflows/commit/3a58097053efc7009863d4e471744b26b15cb817) chore(deps): bump github.com/go-jose/go-jose/v3 from 3.0.3 to 3.0.4 in the go_modules group (#14231)
* [815b24960](https://github.com/argoproj/argo-workflows/commit/815b24960e80e28540ac566b29e1860958a0ff39) feat: Filter workflows by "Finished before" and "Created Since" via API. Fixes #13151 (#13962)
* [dbfedbd60](https://github.com/argoproj/argo-workflows/commit/dbfedbd608b36e85a629ba3ea8b672041924e9d9) feat: dynamic namespace parallelism. Fixes #14194 (#14188)
* [e9e7c4398](https://github.com/argoproj/argo-workflows/commit/e9e7c4398c00621f697acf0002b7992028958daa) fix: gracefully handle invalid CronWorkflows and simplify logic. Fixes #14047 (#14197)
* [01c90bd7a](https://github.com/argoproj/argo-workflows/commit/01c90bd7a62b40aadbc0113f98a043def59f99ef) fix: gcs fix list a file. Fixes #2841 (#14214)
* [f1722947c](https://github.com/argoproj/argo-workflows/commit/f1722947cb177b711c62eaa6cd292c812818480e) fix(ui): scrollable override parameter div for retry node panel. Fixes #14208 (#14210)
* [7ecb17f95](https://github.com/argoproj/argo-workflows/commit/7ecb17f95255771e1b3408f0614285e51fc384fd) fix: manual retries exit handler cleanup. Fixes #14180 (#14181)
* [64f509324](https://github.com/argoproj/argo-workflows/commit/64f50932466be614128e3fa8fcb8322db496933f) chore(deps): bump minio-go to newer version (#14185)
* [124fb76c1](https://github.com/argoproj/argo-workflows/commit/124fb76c115e327ce9954fc67195623a0479db37) fix: add content-md5 header for artifacts (#14191)
* [581950f7a](https://github.com/argoproj/argo-workflows/commit/581950f7ae9f039a10d392d40a111f8f33e06eb3) fix: correct semaphore configmap keys for multiple semaphores (#14184)
* [6c5608ec2](https://github.com/argoproj/argo-workflows/commit/6c5608ec2eae3d4b06daf7f76c7891240cb12f50) fix: split pod controller from workflow controller (#14129)
* [1e5d0fe19](https://github.com/argoproj/argo-workflows/commit/1e5d0fe1931dd9b67c2831b8aab42c360057baf5) fix: correct type of pods_total_count metric (#14130)
* [6c34316fe](https://github.com/argoproj/argo-workflows/commit/6c34316fefe3cc1c0f2bc1bdd65e1bbaacddc822) fix: remove `/test` command comment (#14164)
* [f80005b2f](https://github.com/argoproj/argo-workflows/commit/f80005b2f6955c27b443ad66ee0359b3fb6a68d5) fix: template validation failures (revert #14053) (#14168)
* [e4f51c23a](https://github.com/argoproj/argo-workflows/commit/e4f51c23af08ce2e0cd449b1e454b2d9103e3980) feat: enable cherry-pick bot (#14151)
* [abeffc7fa](https://github.com/argoproj/argo-workflows/commit/abeffc7fa306c7dc08738afd7e1db78f201ffb03) fix(controller): task progress with mutexes not updated. Fixes #14148 (#14149)
* [e79d1c5c5](https://github.com/argoproj/argo-workflows/commit/e79d1c5c527491b05d7fc421e8b903208b8a5f1c) fix: locking in metrics (#14144)
* [63b9e906f](https://github.com/argoproj/argo-workflows/commit/63b9e906f87686f9d7affa69f4de56851978f73b) fix: bump deps for k8schain to fix ecr-login (#14008)
* [573d985cf](https://github.com/argoproj/argo-workflows/commit/573d985cf150b426aa43245da340cbb7663196bb) feat: add documentation section to PR template (#14126)
* [5a2e08c0e](https://github.com/argoproj/argo-workflows/commit/5a2e08c0e694002f03b5781b0ea0b43e8c42aa21) feat: set template display name in yaml (#14077)
* [c92ac3630](https://github.com/argoproj/argo-workflows/commit/c92ac3630b07bb83b94d7d690fb694c9067bc4a8) fix(examples): map-reduce parts artifact path. Fixes #14091 (#14096)
* [7305c1800](https://github.com/argoproj/argo-workflows/commit/7305c180066b3ab4d3140e5ac966e5a473b18966) feat: label actor action when making change to workflow/template. Fixes #14102 (#14104)
* [b2a2c8f47](https://github.com/argoproj/argo-workflows/commit/b2a2c8f473483d92813e9752eb699e9a308f3e7f) feat(controller): support cap on retryStrategy backoff. Fixes #13772 (#13782)
* [8304dc704](https://github.com/argoproj/argo-workflows/commit/8304dc704c138a105c6a0e66bac5292d74a60879) fix(manifests): fix full CRDs and update tests to use them. Fixes #8532 (#14044)
* [b1a65e79f](https://github.com/argoproj/argo-workflows/commit/b1a65e79fe583134117c33814b54dcaba40c4238) fix: get logs from artifact when workflow deleted instead of archived. Fixes: #14083 (#14087)
* [a7a72e7ca](https://github.com/argoproj/argo-workflows/commit/a7a72e7ca3c2bbf127b65fdcf1b5b85fbd2aad19) fix: update upload-artifact and download-artifact (#14070)
* [09d5ee75e](https://github.com/argoproj/argo-workflows/commit/09d5ee75ed1cf480e4a8c2663256a4b12db720da) fix: ensure namespace parallelism and parallelism work together. Fixes #10985 (#14039)
* [e088cfc66](https://github.com/argoproj/argo-workflows/commit/e088cfc66e6bf263d273b20d15a7e6722ff4e3ea) feat: Fixes #8646 visualize wf before submitting (#14034)
* [2ece85c23](https://github.com/argoproj/argo-workflows/commit/2ece85c23646574434518a3ffb2236f2162f58b1) fix: validate template of the same name. Fixes #13763 (#14043)
* [82537a773](https://github.com/argoproj/argo-workflows/commit/82537a7737028a0a2ee579f7d8d1e54dabcf6481) fix(controller): validation failed when dynamic templateRef is used in nested template (#14053)
* [2719e064d](https://github.com/argoproj/argo-workflows/commit/2719e064de5e5b7635c59d155b1b4c1f81c05066) fix: add workflow template level pod annotations and labels to template. Fixes: #12945 (#12987)
* [a91bd843b](https://github.com/argoproj/argo-workflows/commit/a91bd843b54c7511121da3561776e41c46f28809) chore(deps): bump github.com/go-git/go-git/v5 from 5.11.0 to 5.13.1 in the go_modules group (#14055)
* [b93ffd871](https://github.com/argoproj/argo-workflows/commit/b93ffd871383972de1ed2904a8b36f61c251b480) fix: search and replace null bytes for postgres only.Fixes #13711 (#14030)
* [ef41f83f8](https://github.com/argoproj/argo-workflows/commit/ef41f83f801a6ff48c87f882a6b75d0e37529134) feat: Allow markdown title and description in KeyValueEditor (#13935)
* [14dedc80f](https://github.com/argoproj/argo-workflows/commit/14dedc80f8edda5a7980c663928667c278a3a08a) refactor(ui): artifacts table Fixes #8306 (#14037)
* [2e0f2f776](https://github.com/argoproj/argo-workflows/commit/2e0f2f7767fe5ef767bc3e20ec6952a51027b9f9) feat: support archive logs in resource template. Fixes:#9900 (#13933)
* [1add49ed5](https://github.com/argoproj/argo-workflows/commit/1add49ed5adea43b34f35a74d2ae23743ec2b2d9) fix: respect `workflowDefaults` when validating workflows. Fixes #10946. Fixes #11465 (#13642)
* [1d3695640](https://github.com/argoproj/argo-workflows/commit/1d36956408aa5652959a4c58a7743f89a8f9e0a4) fix(controller): step group stuck on running when exit hook has illegal expression (#14032)
* [6699ab396](https://github.com/argoproj/argo-workflows/commit/6699ab396f830210f6dcac4f00a9328a629c142f) chore(deps): bump golang.org/x/net from 0.28.0 to 0.33.0 (#14025)
* [1d85e685d](https://github.com/argoproj/argo-workflows/commit/1d85e685d41d25524fcc99a9fbccbe1bfe3fe2a7) fix: ensure that nodes complete when workflow fails with `parallelism` and `failFast`. Fixes #13806 (#13827)
* [53eaef83f](https://github.com/argoproj/argo-workflows/commit/53eaef83f4518e7ac8fc1c8ca500f31ed727e1d3) chore(deps): bump golang.org/x/crypto from 0.26.0 to 0.31.0 in the go_modules group (#13992)
* [abd14b780](https://github.com/argoproj/argo-workflows/commit/abd14b7801647984859fd33e738a060199b40d9a) fix: further optimize archive workflow listing. Fixes #13601 (#13819)
* [f796449df](https://github.com/argoproj/argo-workflows/commit/f796449df96888538af873bd6871374c2156db7b) fix(ui): skip history.push on initial page load. Fixes #13940 (#13995)
* [43c6abd1f](https://github.com/argoproj/argo-workflows/commit/43c6abd1fe009f36c290545655080eb9c9c197fd) fix: mark all its children(container) as deleted if pod deleted. Fixes #13951 (#13978)
* [95182526f](https://github.com/argoproj/argo-workflows/commit/95182526f57158e6b2a9877ff9d993fad5333e60) refactor: autogenerate metrics docs (#13943)
* [3d5ba1b87](https://github.com/argoproj/argo-workflows/commit/3d5ba1b8747265db206a5bd4cd4564547163ece6) feat(ui): prefill parameters for workflow submit form. Fixes #12124 (#13922)
* [231d548ee](https://github.com/argoproj/argo-workflows/commit/231d548ee276e9fbd0acf0991702ebec88d65a65) fix(ui): improve markdown styles for workflow-row, workflow-templates, cluster-workflow-templates, and cron-workflows (#13930)
* [b6124464e](https://github.com/argoproj/argo-workflows/commit/b6124464ec19e6014459641c4fb69b6e44929c31) fix(ci): snyk scan (#13976)
* [10aaf3eb1](https://github.com/argoproj/argo-workflows/commit/10aaf3eb1873d88d3f9ab3027d9452b40c91d9d9) fix: replace deprecated `bouk/staticfiles` with native Go `embed`. fixes #11654 (#11707)
* [9d1d2cd51](https://github.com/argoproj/argo-workflows/commit/9d1d2cd518628b2d4298b435386e82c7e7960f0d) fix: cronOperator/serverResubmitWf retry create workflow on transient error. Fixes #13970 (#13971)
* [de10e066d](https://github.com/argoproj/argo-workflows/commit/de10e066da8e824fb28e2f79421af1cdb2665eb8) chore(deps)!: bump k8s dependencies to 1.31 (#13944)
* [97b94f0f6](https://github.com/argoproj/argo-workflows/commit/97b94f0f67714beacd7b6f2ef086d13f09cd158f) fix(api): properly authorize GET workflow fallback to archive (#13957)
* [82f69a56c](https://github.com/argoproj/argo-workflows/commit/82f69a56c0a2f9cf8120d6a34ae39315737e03c4) chore(deps): update nixpkgs to nixos-24.11 (#13914)
* [1f304ba67](https://github.com/argoproj/argo-workflows/commit/1f304ba6780ee043ea5ecd74e06787bcba298f8f) feat: Move contextless log messages to debug logging - fixes #13918 (#13920)
* [7d6d8f310](https://github.com/argoproj/argo-workflows/commit/7d6d8f310ce57ae515b1580aa40cd8399eb37746) fix: Skip execution control for agent pod during pod reconciliation. Fixes #12726 (#12732)
* [fe8df345d](https://github.com/argoproj/argo-workflows/commit/fe8df345d316a869e99e7105cbc6df87bb539101) fix(ui): handle parsing errors properly in object editor (#13931)
* [f22ae3b87](https://github.com/argoproj/argo-workflows/commit/f22ae3b87a6cf5c0b7131bb49da6dea097edee0e) perf: Add workflow template informer to server (#13672)
* [db6206a22](https://github.com/argoproj/argo-workflows/commit/db6206a22271259c66344f7bcf77de551bf777de) feat: Allow markdown title and description in CronWorkflows, WorkflowTemplates, & ClusterWorkflowTemplates. Fixes #12644 (#12697)
* [6b221f460](https://github.com/argoproj/argo-workflows/commit/6b221f460a5823cd0ada098f21ca5eb37797f2df) fix: don't log non-errors as "Non-transient error: <nil>". Fixes #13881 (#13917)
* [f2159dcd8](https://github.com/argoproj/argo-workflows/commit/f2159dcd841c8168df6acaf7afa0c54d24293c84) fix: consistent variable substitution for `configMapKeyRef`. Fixes #13890 (#13921)
* [1392ef516](https://github.com/argoproj/argo-workflows/commit/1392ef516fd69a2cae875d746c1155f940e4948c) fix(ui): improve editor performance and fix Submit button. Fixes #13892 (#13915)
* [eb4f2456e](https://github.com/argoproj/argo-workflows/commit/eb4f2456e077d89324ea71ef5f5e92cdccd8157a) fix(ui): Clickable URLs are messing up formatting in the UI (#13923)
* [497f33876](https://github.com/argoproj/argo-workflows/commit/497f338764b68d6f3607113bf7a451d57d1427c4) fix: correct retry logic (#13734)
* [40e95a0f0](https://github.com/argoproj/argo-workflows/commit/40e95a0f0af9d5e38f218be2ad4c6aca1b933a7c) chore(deps): bump github.com/golang-jwt/jwt/v4 from 4.5.0 to 4.5.1 in the go_modules group (#13865)
* [2fd548848](https://github.com/argoproj/argo-workflows/commit/2fd54884844bb76d760466027afa023c5bfd6b64) fix(ui): fix broken workflowtemplate submit button. Fixes #13892 (#13913)
* [217b59868](https://github.com/argoproj/argo-workflows/commit/217b598684c6d0cb9384e8c649f8e73659c5f9e5) fix: consistently set executor log options (#12979)
* [f470fdab2](https://github.com/argoproj/argo-workflows/commit/f470fdab279a4e0f28c6e324f1a337dafb73ec13) feat: include container name in error message. Fixes #10007 (#13790)
* [2f3d6a677](https://github.com/argoproj/argo-workflows/commit/2f3d6a6771bdeb7c144ba9f925c8f76c0f9dcd17) refactor(deps): remove `moment` dep and usage (#12611)
* [5d893b161](https://github.com/argoproj/argo-workflows/commit/5d893b161bbe4833d578be9f6c0322849215c23f) fix: bump minio-go to version that supports eks pod identity #13800 (#13854)
* [3df05eba8](https://github.com/argoproj/argo-workflows/commit/3df05eba8cd114c3b956c8dbe1d533f52216f136) fix: Set default value to output parameters if suspend node timeout. Fixes #12230 (#12960)
<details><summary><h3>Contributors</h3></summary>
* Adrien Delannoy
* Alan Clucas
* Alec Kloss
* Alex Collins
* Anton Gilgur
* atgane
* Bing Hongtao
* Blair Drummond
* Carlos R.F.
* chengjoey
* chenrui
* chenrui7
* Chris Reilly
* Copilot
* Darko Janjic
* dmarquez-splunk
* Dmitri Rabinowitz
* eduardodbr
* Elliot Gunton
* Emmanuel Ferdman
* Eric S
* garireo2549
* htquanq
* instauro
* Isitha Subasinghe
* isubasinghe
* Jakub Buczak
* Joibel
* jswxstw
* Kaan C. Fidan
* Kat
* koichi
* Luis Pflamminger
* Mason Malone
* Matteo Baiguini
* MenD32
* Michael Crenshaw
* Mikael Johansson
* Oliver
* panicboat
* Paul Watts
* Prabakaran Kumaresshan
* Radu Sora
* Ralph Bean
* Rauñ
* Roger Peppe
* Rohan K
* Ryan Currah
* Sairam Arunachalam
* Saravanan Balasubramanian
* shuangkun tian
* Son Bui
* Steven Johnson
* Tianchu Zhao
* Tim Collins
* tooptoop4
* Tzu-Ting
* Unperceivable
* Vaibhav Kaushik
* Ville Vesilehto
* williamburgson
* William Van Hevelingen
* Yuan Tang
* Yulin Li
* Yusuke Abe
</details>
## v3.6.10 (2025-06-04)
Full Changelog: [v3.6.9...v3.6.10](https://github.com/argoproj/argo-workflows/compare/v3.6.9...v3.6.10)
### Selected Changes
* [1285c11c8](https://github.com/argoproj/argo-workflows/commit/1285c11c80efa606ba87b138300309a57dd36368) Revert "fix: update TLS config to support ALPN. Fixes #14422 (#14435)"
<details><summary><h3>Contributors</h3></summary>
* Alan Clucas
</details>
## v3.6.9 (2025-06-03)
Full Changelog: [v3.6.8...v3.6.9](https://github.com/argoproj/argo-workflows/compare/v3.6.8...v3.6.9)
@ -344,7 +627,7 @@ Full Changelog: [v3.6.0-rc1...v3.6.0-rc2](https://github.com/argoproj/argo-workf
## v3.6.0-rc1 (2024-09-18)
Full Changelog: [v3.5.14...v3.6.0-rc1](https://github.com/argoproj/argo-workflows/compare/v3.5.14...v3.6.0-rc1)
Full Changelog: [v3.5.15...v3.6.0-rc1](https://github.com/argoproj/argo-workflows/compare/v3.5.15...v3.6.0-rc1)
### Selected Changes
@ -888,6 +1171,25 @@ Full Changelog: [v3.5.14...v3.6.0-rc1](https://github.com/argoproj/argo-workflow
</details>
## v3.5.15 (2025-06-13)
Full Changelog: [v3.5.14...v3.5.15](https://github.com/argoproj/argo-workflows/compare/v3.5.14...v3.5.15)
### Selected Changes
* [b44c9b6a2](https://github.com/argoproj/argo-workflows/commit/b44c9b6a21cc60c00ef0cf3ca6a6a49e197a59b7) chore(deps): update for CVEs in deps
* [dd6368466](https://github.com/argoproj/argo-workflows/commit/dd6368466244ed692dec7c10c1bbe1ad87ae4414) fix: ensure variable sub and processing is performed. Fixes #12941 (cherry-pick #14444) (#14485)
* [e9e5ed760](https://github.com/argoproj/argo-workflows/commit/e9e5ed76032eafd60f5bc2c9031c3bde3dd80f22) feat: enable cherry-pick bot (#14151)
<details><summary><h3>Contributors</h3></summary>
* Alan Clucas
* Alex Collins
* Isitha Subasinghe
* Rohan K
</details>
## v3.5.14 (2025-01-31)
Full Changelog: [v3.5.13...v3.5.14](https://github.com/argoproj/argo-workflows/compare/v3.5.13...v3.5.14)

View File

@ -3,7 +3,7 @@ ARG GIT_COMMIT=unknown
ARG GIT_TAG=unknown
ARG GIT_TREE_STATE=unknown
FROM golang:1.24-alpine3.21 as builder
FROM golang:1.24.4-alpine3.22 as builder
# libc-dev to build openapi-gen
RUN apk update && apk add --no-cache \
@ -123,6 +123,8 @@ USER 8737
WORKDIR /home/argo
# Temporary workaround for https://github.com/grpc/grpc-go/issues/434
ENV GRPC_ENFORCE_ALPN_ENABLED=false
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
COPY --from=argocli-build /go/src/github.com/argoproj/argo-workflows/dist/argo /bin/

View File

@ -184,6 +184,7 @@ SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \
pkg/apiclient/workflowarchive/workflow-archive.swagger.json \
pkg/apiclient/workflowtemplate/workflow-template.swagger.json
PROTO_BINARIES := $(TOOL_PROTOC_GEN_GOGO) $(TOOL_PROTOC_GEN_GOGOFAST) $(TOOL_GOIMPORTS) $(TOOL_PROTOC_GEN_GRPC_GATEWAY) $(TOOL_PROTOC_GEN_SWAGGER) $(TOOL_CLANG_FORMAT)
GENERATED_DOCS := docs/fields.md docs/cli/argo.md docs/workflow-controller-configmap.md
# protoc,my.proto
define protoc
@ -297,18 +298,18 @@ argoexec-nonroot-image:
-t $$image_name \
--target $* \
--load \
.
[ ! -e $* ] || mv $* dist/
docker run --rm -t $$image_name version
.; \
[ ! -e $* ] || mv $* dist/; \
docker run --rm -t $$image_name version; \
if [ $(K3D) = true ]; then \
k3d image import -c $(K3D_CLUSTER_NAME) $$image_name; \
fi
fi; \
if [ $(DOCKER_PUSH) = true ] && [ $(IMAGE_NAMESPACE) != argoproj ] ; then \
docker push $$image_name; \
fi
.PHONY: codegen
codegen: types swagger manifests $(TOOL_MOCKERY) docs/fields.md docs/cli/argo.md
codegen: types swagger manifests $(TOOL_MOCKERY) $(GENERATED_DOCS)
go generate ./...
# The generated markdown contains links to nowhere for interfaces, so remove them
sed -i.bak 's/\[interface{}\](#interface)/`interface{}`/g' docs/executor_swagger.md && rm -f docs/executor_swagger.md.bak
@ -410,7 +411,8 @@ else
endif
endif
pkg/apis/workflow/v1alpha1/generated.proto: $(TOOL_GO_TO_PROTOBUF) $(PROTO_BINARIES) $(TYPES) $(GOPATH)/src/github.com/gogo/protobuf
# go-to-protobuf fails with mysterious errors on code that doesn't compile, hence lint-go as a dependency here
pkg/apis/workflow/v1alpha1/generated.proto: $(TOOL_GO_TO_PROTOBUF) $(PROTO_BINARIES) $(TYPES) $(GOPATH)/src/github.com/gogo/protobuf lint-go
# These files are generated on a v3/ folder by the tool. Link them to the root folder
[ -e ./v3 ] || ln -s . v3
# Format proto files. Formatting changes generated code, so we do it here, rather that at lint time.
@ -504,8 +506,9 @@ manifests-validate:
$(TOOL_GOLANGCI_LINT): Makefile
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v2.1.6
.PHONY: lint
lint: ui/dist/app/index.html $(TOOL_GOLANGCI_LINT)
.PHONY: lint lint-go lint-ui
lint: lint-go lint-ui features-validate
lint-go: $(TOOL_GOLANGCI_LINT) ui/dist/app/index.html
rm -Rf v3 vendor
# If you're using `woc.wf.Spec` or `woc.execWf.Status` your code probably won't work with WorkflowTemplate.
# * Change `woc.wf.Spec` to `woc.execWf.Spec`.
@ -515,6 +518,8 @@ lint: ui/dist/app/index.html $(TOOL_GOLANGCI_LINT)
go mod tidy
# Lint Go files
$(TOOL_GOLANGCI_LINT) run --fix --verbose
lint-ui: ui/dist/app/index.html
# Lint the UI
if [ -e ui/node_modules ]; then yarn --cwd ui lint ; fi
# Deduplicate Node modules
@ -753,12 +758,13 @@ docs/assets/diagram.png: go-diagrams/diagram.dot
docs/fields.md: api/openapi-spec/swagger.json $(shell find examples -type f) ui/dist/app/index.html hack/docs/fields.go
env ARGO_SECURE=false ARGO_INSECURE_SKIP_VERIFY=false ARGO_SERVER= ARGO_INSTANCEID= go run ./hack/docs fields
docs/workflow-controller-configmap.md: config/*.go hack/docs/workflow-controller-configmap.md hack/docs/configdoc.go
go run ./hack/docs configdoc
# generates several other files
docs/cli/argo.md: $(CLI_PKG_FILES) go.sum ui/dist/app/index.html hack/docs/cli.go
go run ./hack/docs cli
# docs
$(TOOL_MDSPELL): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
@ -768,9 +774,9 @@ endif
.PHONY: docs-spellcheck
docs-spellcheck: $(TOOL_MDSPELL) docs/metrics.md
# check docs for spelling mistakes
$(TOOL_MDSPELL) --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name README.md -not -name fields.md -not -name upgrading.md -not -name executor_swagger.md -not -path '*/cli/*' -not -name tested-kubernetes-versions.md)
$(TOOL_MDSPELL) --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name README.md -not -name fields.md -not -name workflow-controller-configmap.md -not -name upgrading.md -not -name executor_swagger.md -not -path '*/cli/*' -not -name tested-kubernetes-versions.md)
# alphabetize spelling file -- ignore first line (comment), then sort the rest case-sensitive and remove duplicates
$(shell cat .spelling | awk 'NR<2{ print $0; next } { print $0 | "LC_COLLATE=C sort" }' | uniq | tee .spelling > /dev/null)
$(shell cat .spelling | awk 'NR<2{ print $0; next } { print $0 | "LC_COLLATE=C sort" }' | uniq > .spelling.tmp && mv .spelling.tmp .spelling)
$(TOOL_MARKDOWN_LINK_CHECK): Makefile
# update this in Nix when upgrading it here
@ -789,7 +795,6 @@ ifneq ($(USE_NIX), true)
npm list -g markdownlint-cli@0.33.0 > /dev/null || npm i -g markdownlint-cli@0.33.0
endif
.PHONY: docs-lint
docs-lint: $(TOOL_MARKDOWNLINT) docs/metrics.md
# lint docs
@ -847,6 +852,41 @@ release-notes: /dev/null
checksums:
sha256sum ./dist/argo-*.gz | awk -F './dist/' '{print $$1 $$2}' > ./dist/argo-workflows-cli-checksums.txt
# feature notes
FEATURE_FILENAME?=$(shell git branch --show-current)
.PHONY: feature-new
feature-new: hack/featuregen/featuregen
# Create a new feature documentation file in .features/pending/ ready for editing
# Uses the current branch name as the filename by default, or specify with FEATURE_FILENAME=name
$< new --filename $(FEATURE_FILENAME)
.PHONY: features-validate
features-validate: hack/featuregen/featuregen
# Validate all pending feature documentation files
$< validate
.PHONY: features-preview
features-preview: hack/featuregen/featuregen
# Preview how the features will appear in the documentation (dry run)
# Output to stdout
$< update --dry
.PHONY: features-update
features-update: hack/featuregen/featuregen
# Update the features documentation, but keep the feature files in the pending directory
# Updates docs/new-features.md for release-candidates
$< update --version $(VERSION)
.PHONY: features-release
features-release: hack/featuregen/featuregen
# Update the features documentation AND move the feature files to the released directory
# Use this for the final update when releasing a version
$< update --version $(VERSION) --final
hack/featuregen/featuregen: hack/featuregen/main.go hack/featuregen/contents.go hack/featuregen/contents_test.go hack/featuregen/main_test.go
go test ./hack/featuregen
go build -o $@ ./hack/featuregen
# dev container
$(TOOL_DEVCONTAINER): Makefile

View File

@ -105,6 +105,7 @@ Currently, the following organizations are **officially** using Argo Workflows:
1. [Hemisphere Digital](https://hemisphere.digital)
1. [HOVER](https://hover.to)
1. [HSBC](https://hsbc.com)
1. [Hydrogrid](https://hydrogrid.ai)
1. [IBM](https://ibm.com)
1. [Iflytek](https://www.iflytek.com/)
1. [Inceptio Technology](https://www.inceptio.ai/)

View File

@ -5782,6 +5782,10 @@
"description": "\"workflow\", \"pod\", \"pod-logs\", \"event-source-logs\", \"sensor-logs\", \"workflow-list\" or \"chat\"",
"type": "string"
},
"target": {
"description": "Target attribute specifies where a linked document will be opened when a user clicks on a link. E.g. \"_blank\", \"_self\". If the target is _blank, it will open in a new tab.",
"type": "string"
},
"url": {
"description": "The URL. Can contain \"${metadata.namespace}\", \"${metadata.name}\", \"${status.startedAt}\", \"${status.finishedAt}\" or any other element in workflow yaml, e.g. \"${io.argoproj.workflow.v1alpha1.metadata.annotations.userDefinedKey}\"",
"type": "string"
@ -5790,7 +5794,8 @@
"required": [
"name",
"scope",
"url"
"url",
"target"
],
"type": "object",
"x-kubernetes-patch-merge-key": "name",
@ -6090,6 +6095,10 @@
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.NodeSynchronizationStatus",
"description": "SynchronizationStatus is the synchronization status of the node"
},
"taskResultSynced": {
"description": "TaskResultSynced is used to determine if the node's output has been received",
"type": "boolean"
},
"templateName": {
"description": "TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)",
"type": "string"

View File

@ -95,6 +95,12 @@
"type": "string",
"name": "namespace",
"in": "query"
},
{
"type": "string",
"description": "Filter type used for name filtering. Exact | Contains | Prefix. Default to Exact.",
"name": "nameFilter",
"in": "query"
}
],
"responses": {
@ -9871,7 +9877,8 @@
"required": [
"name",
"scope",
"url"
"url",
"target"
],
"properties": {
"name": {
@ -9882,6 +9889,10 @@
"description": "\"workflow\", \"pod\", \"pod-logs\", \"event-source-logs\", \"sensor-logs\", \"workflow-list\" or \"chat\"",
"type": "string"
},
"target": {
"description": "Target attribute specifies where a linked document will be opened when a user clicks on a link. E.g. \"_blank\", \"_self\". If the target is _blank, it will open in a new tab.",
"type": "string"
},
"url": {
"description": "The URL. Can contain \"${metadata.namespace}\", \"${metadata.name}\", \"${status.startedAt}\", \"${status.finishedAt}\" or any other element in workflow yaml, e.g. \"${io.argoproj.workflow.v1alpha1.metadata.annotations.userDefinedKey}\"",
"type": "string"
@ -10190,6 +10201,10 @@
"description": "SynchronizationStatus is the synchronization status of the node",
"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.NodeSynchronizationStatus"
},
"taskResultSynced": {
"description": "TaskResultSynced is used to determine if the node's output has been received",
"type": "boolean"
},
"templateName": {
"description": "TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)",
"type": "string"

View File

@ -1,8 +1,11 @@
package archive
import (
"context"
"fmt"
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -39,11 +42,17 @@ func NewListLabelValueCommand() *cobra.Command {
for _, str := range labels.Items {
fmt.Printf("%s\n", str)
}
return nil
},
}
ctx := command.Context()
if ctx != nil {
ctx = logging.WithLogger(context.Background(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
command.SetContext(ctx)
}
command.Flags().StringVarP(&selector, "selector", "l", "", "Selector (label query) to query on, allows 1 value (e.g. -l key1)")
err := command.MarkFlagRequired("selector")
errors.CheckError(err)
errors.CheckError(ctx, err)
return command
}

View File

@ -6,6 +6,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func TestGetAuthString(t *testing.T) {
@ -24,7 +26,8 @@ func TestCreateOfflineClient(t *testing.T) {
t.Run("creating an offline client with no files should not fail", func(t *testing.T) {
Offline = true
OfflineFiles = []string{}
_, _, err := NewAPIClient(context.TODO())
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
_, _, err := NewAPIClient(ctx)
assert.NoError(t, err)
})
@ -32,7 +35,8 @@ func TestCreateOfflineClient(t *testing.T) {
t.Run("creating an offline client with a non-existing file should fail", func(t *testing.T) {
Offline = true
OfflineFiles = []string{"non-existing-file"}
_, _, err := NewAPIClient(context.TODO())
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
_, _, err := NewAPIClient(ctx)
assert.Error(t, err)
})

View File

@ -36,10 +36,10 @@ func WatchWorkflow(ctx context.Context, serviceClient workflowpkg.WorkflowServic
if err == io.EOF {
log.Debug("Re-establishing workflow watch")
stream, err = serviceClient.WatchWorkflows(ctx, req)
errors.CheckError(err)
errors.CheckError(ctx, err)
continue
}
errors.CheckError(err)
errors.CheckError(ctx, err)
if event == nil {
continue
}
@ -64,7 +64,7 @@ func WatchWorkflow(ctx context.Context, serviceClient workflowpkg.WorkflowServic
return nil
}
err := printWorkflowStatus(wf, getArgs)
err := printWorkflowStatus(ctx, wf, getArgs)
if err != nil {
return err
}
@ -74,11 +74,11 @@ func WatchWorkflow(ctx context.Context, serviceClient workflowpkg.WorkflowServic
}
}
func printWorkflowStatus(wf *wfv1.Workflow, getArgs GetFlags) error {
func printWorkflowStatus(ctx context.Context, wf *wfv1.Workflow, getArgs GetFlags) error {
if wf == nil {
return nil
}
if err := packer.DecompressWorkflow(wf); err != nil {
if err := packer.DecompressWorkflow(ctx, wf); err != nil {
return err
}
print("\033[H\033[2J")

View File

@ -9,6 +9,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
var invalidCwf = `
@ -58,13 +59,17 @@ Conditions:
func TestPrintCronWorkflow(t *testing.T) {
var cronWf = v1alpha1.MustUnmarshalCronWorkflow(invalidCwf)
out := getCronWorkflowGet(context.Background(), cronWf)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
out := getCronWorkflowGet(ctx, cronWf)
assert.Contains(t, out, expectedOut)
}
func TestNextRuntime(t *testing.T) {
var cronWf = v1alpha1.MustUnmarshalCronWorkflow(invalidCwf)
next, err := GetNextRuntime(context.Background(), cronWf)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
next, err := GetNextRuntime(ctx, cronWf)
require.NoError(t, err)
assert.LessOrEqual(t, next.Unix(), time.Now().Add(1*time.Minute).Unix())
assert.Greater(t, next.Unix(), time.Now().Unix())
@ -96,7 +101,9 @@ spec:
func TestNextRuntimeWithMultipleSchedules(t *testing.T) {
var cronWf = v1alpha1.MustUnmarshalCronWorkflow(cronMultipleSchedules)
next, err := GetNextRuntime(context.Background(), cronWf)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
next, err := GetNextRuntime(ctx, cronWf)
require.NoError(t, err)
assert.LessOrEqual(t, next.Unix(), time.Now().Add(1*time.Minute).Unix())
assert.Greater(t, next.Unix(), time.Now().Unix())

View File

@ -9,6 +9,8 @@ import (
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func Test_OfflineLint(t *testing.T) {
@ -90,7 +92,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{workflowPath}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowPath}, true, nil, "pretty", true)
require.NoError(t, err)
assert.True(t, fatal, "should have exited")
@ -101,7 +105,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{workflowPath, clusterWftmplPath}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowPath, clusterWftmplPath}, true, nil, "pretty", true)
require.NoError(t, err)
assert.True(t, fatal, "should have exited")
@ -112,7 +118,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{workflowPath, wftmplPath}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowPath, wftmplPath}, true, nil, "pretty", true)
require.NoError(t, err)
assert.True(t, fatal, "should have exited")
@ -123,7 +131,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{wftmplPath}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{wftmplPath}, true, nil, "pretty", true)
require.NoError(t, err)
assert.False(t, fatal, "should not have exited")
@ -134,7 +144,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{clusterWftmplPath}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{clusterWftmplPath}, true, nil, "pretty", true)
require.NoError(t, err)
assert.False(t, fatal, "should not have exited")
@ -145,7 +157,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{workflowPath, wftmplPath, clusterWftmplPath}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowPath, wftmplPath, clusterWftmplPath}, true, nil, "pretty", true)
require.NoError(t, err)
assert.False(t, fatal, "should not have exited")
@ -156,7 +170,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{dir}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{dir}, true, nil, "pretty", true)
require.NoError(t, err)
assert.False(t, fatal, "should not have exited")
@ -173,7 +189,9 @@ spec:
require.NoError(t, err)
defer func() { _ = os.Stdin.Close() }() // close previously opened path to avoid errors trying to remove the file.
err = runLint(context.Background(), []string{workflowPath, wftmplPath, "-"}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowPath, wftmplPath, "-"}, true, nil, "pretty", true)
require.NoError(t, err)
assert.False(t, fatal, "should not have exited")
@ -205,7 +223,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{workflowCaseSensitivePath}, true, nil, "pretty", true)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowCaseSensitivePath}, true, nil, "pretty", true)
require.NoError(t, err)
assert.True(t, fatal, "should have exited")
@ -216,7 +236,9 @@ spec:
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{workflowCaseSensitivePath}, true, nil, "pretty", false)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowCaseSensitivePath}, true, nil, "pretty", false)
require.NoError(t, err)
assert.False(t, fatal, "should not have exited")
@ -276,7 +298,9 @@ spec:
defer func() { logrus.StandardLogger().ExitFunc = nil }()
var fatal bool
logrus.StandardLogger().ExitFunc = func(int) { fatal = true }
err = runLint(context.Background(), []string{workflowMultiDocsPath}, true, nil, "pretty", false)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err = runLint(ctx, []string{workflowMultiDocsPath}, true, nil, "pretty", false)
require.NoError(t, err)
assert.False(t, fatal, "should not have exited")

View File

@ -13,6 +13,7 @@ import (
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowmocks "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow/mocks"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/argoproj/argo-workflows/v3/workflow/common"
)
@ -81,21 +82,25 @@ func Test_listWorkflows(t *testing.T) {
func list(listOptions *metav1.ListOptions, flags listFlags) (wfv1.Workflows, error) {
c := &workflowmocks.WorkflowServiceClient{}
c.On("ListWorkflows", mock.Anything, &workflow.WorkflowListRequest{ListOptions: listOptions, Fields: flags.displayFields()}).Return(&wfv1.WorkflowList{Items: wfv1.Workflows{
{ObjectMeta: metav1.ObjectMeta{Name: "foo-", CreationTimestamp: metav1.Time{Time: time.Now().Add(-2 * time.Hour)}}, Status: wfv1.WorkflowStatus{FinishedAt: metav1.Time{Time: time.Now().Add(-2 * time.Hour)}}},
{ObjectMeta: metav1.ObjectMeta{Name: "bar-", CreationTimestamp: metav1.Time{Time: time.Now()}}},
{ObjectMeta: metav1.ObjectMeta{
wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{Name: "foo-", CreationTimestamp: metav1.Time{Time: time.Now().Add(-2 * time.Hour)}}, Status: wfv1.WorkflowStatus{FinishedAt: metav1.Time{Time: time.Now().Add(-2 * time.Hour)}}},
wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{Name: "bar-", CreationTimestamp: metav1.Time{Time: time.Now()}}},
wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{
Name: "baz-",
CreationTimestamp: metav1.Time{Time: time.Now().Add(-2 * time.Hour)},
Labels: map[string]string{common.LabelKeyPreviousWorkflowName: "foo-"},
}},
}}, nil)
workflows, err := listWorkflows(context.Background(), c, flags)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
workflows, err := listWorkflows(ctx, c, flags)
return workflows, err
}
func listEmpty(listOptions *metav1.ListOptions, flags listFlags) (wfv1.Workflows, error) {
c := &workflowmocks.WorkflowServiceClient{}
c.On("ListWorkflows", mock.Anything, &workflow.WorkflowListRequest{ListOptions: listOptions, Fields: defaultFields}).Return(&wfv1.WorkflowList{Items: wfv1.Workflows{}}, nil)
workflows, err := listWorkflows(context.Background(), c, flags)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
workflows, err := listWorkflows(ctx, c, flags)
return workflows, err
}

View File

@ -13,6 +13,7 @@ import (
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowmocks "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow/mocks"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func Test_resubmitWorkflows(t *testing.T) {
@ -25,7 +26,9 @@ func Test_resubmitWorkflows(t *testing.T) {
c.On("ResubmitWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := resubmitWorkflows(context.Background(), c, resubmitOpts, cliSubmitOpts, []string{"foo", "bar"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := resubmitWorkflows(ctx, c, resubmitOpts, cliSubmitOpts, []string{"foo", "bar"})
c.AssertNumberOfCalls(t, "ResubmitWorkflow", 2)
require.NoError(t, err)
@ -41,7 +44,9 @@ func Test_resubmitWorkflows(t *testing.T) {
c.On("ResubmitWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := resubmitWorkflows(context.Background(), c, resubmitOpts, cliSubmitOpts, []string{"foo"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := resubmitWorkflows(ctx, c, resubmitOpts, cliSubmitOpts, []string{"foo"})
c.AssertNumberOfCalls(t, "ResubmitWorkflow", 1)
c.AssertCalled(t, "ResubmitWorkflow", mock.Anything, &workflowpkg.WorkflowResubmitRequest{
Name: "foo",
@ -77,7 +82,9 @@ func Test_resubmitWorkflows(t *testing.T) {
c.On("ListWorkflows", mock.Anything, wfListReq).Return(wfList, nil)
c.On("ResubmitWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := resubmitWorkflows(context.Background(), c, resubmitOpts, cliSubmitOpts, []string{})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := resubmitWorkflows(ctx, c, resubmitOpts, cliSubmitOpts, []string{})
c.AssertNumberOfCalls(t, "ResubmitWorkflow", 3)
for _, wf := range wfList.Items {
@ -118,7 +125,9 @@ func Test_resubmitWorkflows(t *testing.T) {
c.On("ResubmitWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := resubmitWorkflows(context.Background(), c, resubmitOpts, cliSubmitOpts, []string{"foo", "qux"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := resubmitWorkflows(ctx, c, resubmitOpts, cliSubmitOpts, []string{"foo", "qux"})
// after de-duplication, there will be 4 workflows to resubmit
c.AssertNumberOfCalls(t, "ResubmitWorkflow", 4)
@ -150,7 +159,9 @@ func Test_resubmitWorkflows(t *testing.T) {
}
cliSubmitOpts := common.CliSubmitOpts{}
c.On("ListWorkflows", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
err := resubmitWorkflows(context.Background(), c, resubmitOpts, cliSubmitOpts, []string{})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := resubmitWorkflows(ctx, c, resubmitOpts, cliSubmitOpts, []string{})
require.Errorf(t, err, "mock error")
})
@ -161,7 +172,9 @@ func Test_resubmitWorkflows(t *testing.T) {
}
cliSubmitOpts := common.CliSubmitOpts{}
c.On("ResubmitWorkflow", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
err := resubmitWorkflows(context.Background(), c, resubmitOpts, cliSubmitOpts, []string{"foo"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := resubmitWorkflows(ctx, c, resubmitOpts, cliSubmitOpts, []string{"foo"})
require.Errorf(t, err, "mock error")
})
}

View File

@ -13,6 +13,7 @@ import (
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
type retryOps struct {
@ -86,6 +87,10 @@ func NewRetryCommand() *cobra.Command {
if err != nil {
return err
}
if log := logging.GetLoggerFromContext(ctx); log != nil {
log = logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())
ctx = logging.WithLogger(ctx, log)
}
serviceClient := apiClient.NewWorkflowServiceClient()
retryOpts.namespace = client.Namespace()

View File

@ -13,6 +13,7 @@ import (
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowmocks "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow/mocks"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func Test_retryWorkflows(t *testing.T) {
@ -24,8 +25,9 @@ func Test_retryWorkflows(t *testing.T) {
cliSubmitOpts := common.CliSubmitOpts{}
c.On("RetryWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := retryWorkflows(context.Background(), c, retryOpts, cliSubmitOpts, []string{"foo", "bar"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := retryWorkflows(ctx, c, retryOpts, cliSubmitOpts, []string{"foo", "bar"})
c.AssertNumberOfCalls(t, "RetryWorkflow", 2)
require.NoError(t, err)
@ -56,7 +58,9 @@ func Test_retryWorkflows(t *testing.T) {
c.On("ListWorkflows", mock.Anything, wfListReq).Return(wfList, nil)
c.On("RetryWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := retryWorkflows(context.Background(), c, retryOpts, cliSubmitOpts, []string{})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := retryWorkflows(ctx, c, retryOpts, cliSubmitOpts, []string{})
c.AssertNumberOfCalls(t, "RetryWorkflow", 3)
for _, wf := range wfList.Items {
@ -98,7 +102,9 @@ func Test_retryWorkflows(t *testing.T) {
c.On("RetryWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := retryWorkflows(context.Background(), c, retryOpts, cliSubmitOpts, []string{"foo", "qux"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := retryWorkflows(ctx, c, retryOpts, cliSubmitOpts, []string{"foo", "qux"})
// after de-duplication, there will be 4 workflows to retry
c.AssertNumberOfCalls(t, "RetryWorkflow", 4)
@ -132,7 +138,9 @@ func Test_retryWorkflows(t *testing.T) {
}
cliSubmitOpts := common.CliSubmitOpts{}
c.On("ListWorkflows", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
err := retryWorkflows(context.Background(), c, retryOpts, cliSubmitOpts, []string{})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := retryWorkflows(ctx, c, retryOpts, cliSubmitOpts, []string{})
require.Errorf(t, err, "mock error")
})
@ -143,7 +151,9 @@ func Test_retryWorkflows(t *testing.T) {
}
cliSubmitOpts := common.CliSubmitOpts{}
c.On("RetryWorkflow", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
err := retryWorkflows(context.Background(), c, retryOpts, cliSubmitOpts, []string{"foo"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := retryWorkflows(ctx, c, retryOpts, cliSubmitOpts, []string{"foo"})
require.Errorf(t, err, "mock error")
})
}

View File

@ -1,10 +1,10 @@
package commands
import (
"context"
"fmt"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
@ -19,6 +19,7 @@ import (
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/template"
cmdutil "github.com/argoproj/argo-workflows/v3/util/cmd"
grpcutil "github.com/argoproj/argo-workflows/v3/util/grpc"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
const (
@ -92,7 +93,6 @@ If your server is behind an ingress with a path (running "argo server --base-hre
return cmd.Help()
},
}
command.AddCommand(NewCompletionCommand())
command.AddCommand(NewDeleteCommand())
command.AddCommand(NewGetCommand())
@ -126,16 +126,30 @@ If your server is behind an ingress with a path (running "argo server --base-hre
var glogLevel int
var verbose bool
command.PersistentPostRun = func(cmd *cobra.Command, args []string) {
cmdutil.PrintVersionMismatchWarning(argo.GetVersion(), grpcutil.LastSeenServerVersion)
cmdutil.PrintVersionMismatchWarning(cmd.Context(), argo.GetVersion(), grpcutil.LastSeenServerVersion)
}
command.PersistentPreRun = func(cmd *cobra.Command, args []string) {
ctx := cmd.Context()
if ctx == nil {
ctx = logging.WithLogger(context.Background(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
}
if verbose {
logLevel = "debug"
glogLevel = 6
}
cmdutil.SetLogLevel(logLevel)
cmdutil.SetGLogLevel(glogLevel)
log.WithField("version", argo.GetVersion()).Debug("CLI version")
parsedLogLevel, err := logging.ParseLevel(logLevel)
if err != nil {
panic("")
}
log := logging.NewSlogLogger(parsedLogLevel, logging.GetGlobalFormat())
ctx = logging.WithLogger(ctx, log)
cmd.SetContext(ctx)
command.SetContext(ctx)
log.WithField("version", argo.GetVersion()).Debug(ctx, "CLI version")
// Disable printing of usage string on errors, except for argument validation errors
// (i.e. when the "Args" function returns an error).
@ -148,6 +162,12 @@ If your server is behind an ingress with a path (running "argo server --base-hre
command.PersistentFlags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.PersistentFlags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
command.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Enabled verbose logging, i.e. --loglevel debug")
cctx := command.Context()
if cctx == nil {
cctx = logging.WithLogger(context.Background(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
command.SetContext(cctx)
}
log := logging.GetLoggerFromContext(cctx)
// set-up env vars for the CLI such that ARGO_* env vars can be used instead of flags
viper.AutomaticEnv()
@ -155,14 +175,14 @@ If your server is behind an ingress with a path (running "argo server --base-hre
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_"))
// bind flags to env vars (https://github.com/spf13/viper/tree/v1.17.0#working-with-flags)
if err := viper.BindPFlags(command.PersistentFlags()); err != nil {
log.Fatal(err)
log.WithError(err).WithFatal().Error(cctx, "Failed to bind flags to env vars")
}
// workaround for handling required flags (https://github.com/spf13/viper/issues/397#issuecomment-544272457)
command.PersistentFlags().VisitAll(func(f *pflag.Flag) {
if !f.Changed && viper.IsSet(f.Name) {
val := viper.Get(f.Name)
if err := command.PersistentFlags().Set(f.Name, fmt.Sprintf("%v", val)); err != nil {
log.Fatal(err)
log.WithError(err).WithFatal().Error(cctx, "Failed to set flag")
}
}
})

View File

@ -10,8 +10,8 @@ import (
events "github.com/argoproj/argo-events/pkg/client/clientset/versioned"
"github.com/argoproj/pkg/stats"
"github.com/pkg/browser"
log "github.com/sirupsen/logrus"
"github.com/skratchdot/open-golang/open"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
@ -29,6 +29,7 @@ import (
"github.com/argoproj/argo-workflows/v3/server/types"
"github.com/argoproj/argo-workflows/v3/util/cmd"
"github.com/argoproj/argo-workflows/v3/util/help"
"github.com/argoproj/argo-workflows/v3/util/logging"
pprofutil "github.com/argoproj/argo-workflows/v3/util/pprof"
tlsutils "github.com/argoproj/argo-workflows/v3/util/tls"
"github.com/argoproj/argo-workflows/v3/workflow/common"
@ -65,9 +66,14 @@ func NewServerCommand() *cobra.Command {
See %s`, help.ArgoServer()),
RunE: func(c *cobra.Command, args []string) error {
cmd.SetLogFormatter(logFormat)
ctx := c.Context()
if ctx == nil {
ctx = logging.WithLogger(context.Background(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
c.SetContext(ctx)
}
stats.RegisterStackDumper()
stats.StartStatsTicker(5 * time.Minute)
pprofutil.Init()
pprofutil.Init(ctx)
config, err := client.GetConfig().ClientConfig()
if err != nil {
@ -85,7 +91,7 @@ See %s`, help.ArgoServer()),
Kubernetes: kubernetes.NewForConfigOrDie(config),
Workflow: wfclientset.NewForConfigOrDie(config),
}
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if !namespaced && managedNamespace != "" {
@ -171,7 +177,7 @@ See %s`, help.ArgoServer()),
if enableOpenBrowser {
browserOpenFunc = func(url string) {
log.Infof("Argo UI is available at %s", url)
err := open.Run(url)
err := browser.OpenURL(url)
if err != nil {
log.Warnf("Unable to open the browser. %v", err)
}

View File

@ -12,6 +12,7 @@ import (
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowmocks "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow/mocks"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func Test_stopWorkflows(t *testing.T) {
@ -21,7 +22,9 @@ func Test_stopWorkflows(t *testing.T) {
dryRun: true,
}
err := stopWorkflows(context.Background(), c, stopArgs, []string{"foo", "bar"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := stopWorkflows(ctx, c, stopArgs, []string{"foo", "bar"})
c.AssertNotCalled(t, "StopWorkflow")
require.NoError(t, err)
@ -35,7 +38,9 @@ func Test_stopWorkflows(t *testing.T) {
c.On("StopWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := stopWorkflows(context.Background(), c, stopArgs, []string{"foo", "bar"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := stopWorkflows(ctx, c, stopArgs, []string{"foo", "bar"})
c.AssertNumberOfCalls(t, "StopWorkflow", 2)
require.NoError(t, err)
@ -64,7 +69,9 @@ func Test_stopWorkflows(t *testing.T) {
c.On("StopWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := stopWorkflows(context.Background(), c, stopArgs, []string{})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := stopWorkflows(ctx, c, stopArgs, []string{})
c.AssertNumberOfCalls(t, "StopWorkflow", 3)
require.NoError(t, err)
@ -93,7 +100,9 @@ func Test_stopWorkflows(t *testing.T) {
c.On("StopWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := stopWorkflows(context.Background(), c, stopArgs, []string{"foo", "qux"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := stopWorkflows(ctx, c, stopArgs, []string{"foo", "qux"})
// after de-duplication, there will be 4 workflows to stop
c.AssertNumberOfCalls(t, "StopWorkflow", 4)
@ -107,7 +116,9 @@ func Test_stopWorkflows(t *testing.T) {
labelSelector: "custom-label=true",
}
c.On("ListWorkflows", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
err := stopWorkflows(context.Background(), c, stopArgs, []string{})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := stopWorkflows(ctx, c, stopArgs, []string{})
require.Errorf(t, err, "mock error")
})
@ -117,7 +128,9 @@ func Test_stopWorkflows(t *testing.T) {
namespace: "argo",
}
c.On("StopWorkflow", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("mock error"))
err := stopWorkflows(context.Background(), c, stopArgs, []string{"foo"})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := stopWorkflows(ctx, c, stopArgs, []string{"foo"})
require.Errorf(t, err, "mock error")
})
}

View File

@ -13,17 +13,20 @@ import (
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowmocks "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow/mocks"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func Test_submitWorkflows(t *testing.T) {
t.Run("Submit workflow with invalid options", func(t *testing.T) {
c := &workflowmocks.WorkflowServiceClient{}
err := submitWorkflows(context.TODO(), c, "argo", []wfv1.Workflow{}, &wfv1.SubmitOpts{}, &common.CliSubmitOpts{Watch: true, Wait: true})
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := submitWorkflows(ctx, c, "argo", []wfv1.Workflow{}, &wfv1.SubmitOpts{}, &common.CliSubmitOpts{Watch: true, Wait: true})
require.Error(t, err, "--wait cannot be combined with --watch")
})
t.Run("Submit without providing workflow", func(t *testing.T) {
c := &workflowmocks.WorkflowServiceClient{}
err := submitWorkflows(context.TODO(), c, "argo", []wfv1.Workflow{}, &wfv1.SubmitOpts{}, &common.CliSubmitOpts{})
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := submitWorkflows(ctx, c, "argo", []wfv1.Workflow{}, &wfv1.SubmitOpts{}, &common.CliSubmitOpts{})
require.Error(t, err, "No Workflow found in given files")
})
t.Run("Submit workflow with priority set in spec", func(t *testing.T) {
@ -32,7 +35,8 @@ func Test_submitWorkflows(t *testing.T) {
workflow := wfv1.Workflow{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "argo"}, Spec: wfv1.WorkflowSpec{Priority: &priority}}
c.On("CreateWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := submitWorkflows(context.TODO(), c, "argo", []wfv1.Workflow{workflow}, &wfv1.SubmitOpts{}, &common.CliSubmitOpts{})
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := submitWorkflows(ctx, c, "argo", []wfv1.Workflow{workflow}, &wfv1.SubmitOpts{}, &common.CliSubmitOpts{})
require.NoError(t, err)
arg := c.Mock.Calls[0].Arguments[1]
@ -53,7 +57,8 @@ func Test_submitWorkflows(t *testing.T) {
cliSubmitOpts := common.CliSubmitOpts{Priority: &priorityCLI}
c.On("CreateWorkflow", mock.Anything, mock.Anything).Return(&wfv1.Workflow{}, nil)
err := submitWorkflows(context.TODO(), c, "argo", []wfv1.Workflow{workflow}, &wfv1.SubmitOpts{}, &cliSubmitOpts)
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
err := submitWorkflows(ctx, c, "argo", []wfv1.Workflow{workflow}, &wfv1.SubmitOpts{}, &cliSubmitOpts)
require.NoError(t, err)
arg := c.Mock.Calls[0].Arguments[1]

View File

@ -14,6 +14,13 @@ func NewGetCommand() *cobra.Command {
command := &cobra.Command{
Use: "get WORKFLOW_TEMPLATE...",
Short: "display details about a workflow template",
Example: `
# Get information about a workflow template by its name:
argo template get my-template
# Get information about a workflow template in YAML format:
argo template get my-template -o yaml
`,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {

View File

@ -119,7 +119,7 @@ func Lint(ctx context.Context, opts *LintOptions) (*LintResults, error) {
}
for _, file := range opts.Files {
err := fileutil.WalkManifests(file, func(path string, data []byte) error {
err := fileutil.WalkManifests(ctx, file, func(path string, data []byte) error {
res := lintData(ctx, path, data, opts)
results.Results = append(results.Results, res)

View File

@ -18,6 +18,7 @@ import (
wftemplatemocks "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowtemplate/mocks"
wf "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
var lintFileData = []byte(`
@ -80,7 +81,9 @@ func TestLintFile(t *testing.T) {
wftServiceSclientMock := &wftemplatemocks.WorkflowTemplateServiceClient{}
wfServiceClientMock.On("LintWorkflow", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("lint error"))
res, err := Lint(context.Background(), &LintOptions{
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
res, err := Lint(ctx, &LintOptions{
Files: []string{file.Name()},
ServiceClients: ServiceClients{
WorkflowsClient: wfServiceClientMock,
@ -110,7 +113,9 @@ func TestLintMultipleKinds(t *testing.T) {
wfServiceClientMock.On("LintWorkflow", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("lint error"))
wftServiceSclientMock.On("LintWorkflowTemplate", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("lint error"))
res, err := Lint(context.Background(), &LintOptions{
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
res, err := Lint(ctx, &LintOptions{
Files: []string{file.Name()},
ServiceClients: ServiceClients{
WorkflowsClient: wfServiceClientMock,
@ -154,7 +159,9 @@ func TestLintWithOutput(t *testing.T) {
mw := &mocks.MockWriter{}
mw.On("Write", mock.Anything).Return(0, nil)
res, err := Lint(context.Background(), &LintOptions{
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
res, err := Lint(ctx, &LintOptions{
Files: []string{file.Name(), "-"},
ServiceClients: ServiceClients{
WorkflowsClient: wfServiceClientMock,
@ -197,7 +204,9 @@ func TestLintStdin(t *testing.T) {
wfServiceClientMock.On("LintWorkflow", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("lint error"))
wftServiceSclientMock.On("LintWorkflowTemplate", mock.Anything, mock.Anything).Return(nil, fmt.Errorf("lint error"))
res, err := Lint(context.Background(), &LintOptions{
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
res, err := Lint(ctx, &LintOptions{
Files: []string{"-"},
ServiceClients: ServiceClients{
WorkflowsClient: wfServiceClientMock,
@ -235,7 +244,9 @@ func TestLintDeviceFile(t *testing.T) {
deviceFileName := fmt.Sprintf("/dev/fd/%d", fd)
res, err := Lint(context.Background(), &LintOptions{
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
res, err := Lint(ctx, &LintOptions{
Files: []string{deviceFileName},
ServiceClients: ServiceClients{
WorkflowsClient: wfServiceClientMock,
@ -295,7 +306,9 @@ func TestGetFormatter(t *testing.T) {
}
}
r, err := Lint(context.Background(), &LintOptions{Formatter: fmtr})
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
r, err := Lint(ctx, &LintOptions{Formatter: fmtr})
require.NoError(t, err)
assert.Equal(t, test.expectedOutput, r.Msg())
})

View File

@ -7,6 +7,8 @@ import (
"os"
"path/filepath"
"github.com/argoproj/argo-workflows/v3/util/logging"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/util/rand"
@ -77,7 +79,17 @@ func NewAgentMainCommand() *cobra.Command {
return &cobra.Command{
Use: "main",
RunE: func(cmd *cobra.Command, args []string) error {
return initAgentExecutor().Agent(context.Background())
if cmd.Context() == nil {
ctx := context.Background()
cmd.SetContext(ctx)
}
log := logging.GetLoggerFromContext(cmd.Context())
if log == nil {
log = logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())
ctx := logging.WithLogger(cmd.Context(), log)
cmd.SetContext(ctx)
}
return initAgentExecutor().Agent(cmd.Context())
},
}
}
@ -90,7 +102,7 @@ func initAgentExecutor() *executor.AgentExecutor {
config = restclient.AddUserAgent(config, fmt.Sprintf("argo-workflows/%s argo-executor/%s", version.Version, "agent Executor"))
logs.AddK8SLogTransportWrapper(config) // lets log all request as we should typically do < 5 per pod, so this is will show up problems
logs.AddK8SLogTransportWrapper(logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())), config) // lets log all request as we should typically do < 5 per pod, so this is will show up problems
namespace, _, err := clientConfig.Namespace()
checkErr(err)

View File

@ -54,7 +54,7 @@ func NewArtifactDeleteCommand() *cobra.Command {
func deleteArtifacts(labelSelector string, ctx context.Context, artifactGCTaskInterface wfv1alpha1.WorkflowArtifactGCTaskInterface) error {
taskList, err := artifactGCTaskInterface.List(context.Background(), metav1.ListOptions{LabelSelector: labelSelector})
taskList, err := artifactGCTaskInterface.List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return err
}
@ -84,8 +84,8 @@ func deleteArtifacts(labelSelector string, ctx context.Context, artifactGCTaskIn
return err
}
err = waitutil.Backoff(retry.DefaultRetry, func() (bool, error) {
err = drv.Delete(&artifact)
err = waitutil.Backoff(retry.DefaultRetry(ctx), func() (bool, error) {
err = drv.Delete(ctx, &artifact)
if err != nil {
errString := err.Error()
artResultNodeStatus.ArtifactResults[artifact.Name] = v1alpha1.ArtifactResult{Name: artifact.Name, Success: false, Error: &errString}
@ -102,7 +102,7 @@ func deleteArtifacts(labelSelector string, ctx context.Context, artifactGCTaskIn
if err != nil {
return err
}
_, err = artifactGCTaskInterface.Patch(context.Background(), task.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
_, err = artifactGCTaskInterface.Patch(ctx, task.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status")
if err != nil {
return err
}

View File

@ -5,6 +5,8 @@ import (
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func NewDataCommand() *cobra.Command {
@ -28,6 +30,7 @@ func execData(ctx context.Context) error {
// Don't allow cancellation to impact capture of results, parameters, artifacts, or defers.
bgCtx := context.Background()
bgCtx = logging.WithLogger(bgCtx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
// Create a new empty (placeholder) task result with LabelKeyReportOutputsCompleted set to false.
wfExecutor.InitializeOutput(bgCtx)
defer wfExecutor.HandleError(bgCtx)

View File

@ -19,11 +19,16 @@ import (
"github.com/spf13/cobra"
"k8s.io/client-go/util/retry"
"github.com/argoproj/argo-workflows/v3/workflow/executor"
"github.com/argoproj/argo-workflows/v3/workflow/executor/emissary"
"github.com/argoproj/argo-workflows/v3/util/archive"
"github.com/argoproj/argo-workflows/v3/util/errors"
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/argoproj/argo-workflows/v3/workflow/executor/osspecific"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/archive"
"github.com/argoproj/argo-workflows/v3/workflow/common"
)
@ -154,7 +159,20 @@ func NewEmissaryCommand() *cobra.Command {
}
}()
pid := command.Process.Pid
ctx, cancel := context.WithCancel(context.Background())
cmdCtx := cmd.Context()
if cmdCtx == nil {
cmdCtx = context.Background()
cmd.SetContext(cmdCtx)
}
log := logging.GetLoggerFromContext(cmdCtx)
if log == nil {
log = logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())
cmdCtx = logging.WithLogger(context.Background(), log)
cmd.SetContext(cmdCtx)
}
ctx, cancel := context.WithCancel(cmdCtx)
defer cancel()
go func() {
for {
@ -172,6 +190,32 @@ func NewEmissaryCommand() *cobra.Command {
}
}
}()
for _, sidecarName := range template.GetSidecarNames() {
if sidecarName == containerName {
em, err := emissary.New()
if err != nil {
return fmt.Errorf("failed to create emissary: %w", err)
}
go func() {
mainContainerNames := template.GetMainContainerNames()
err = em.Wait(ctx, mainContainerNames)
if err != nil {
logger.WithError(err).Errorf("failed to wait for main container(s) %v", mainContainerNames)
}
logger.Infof("main container(s) %v exited, terminating container %s", mainContainerNames, containerName)
err = em.Kill(ctx, []string{containerName}, executor.GetTerminationGracePeriodDuration())
if err != nil {
logger.WithError(err).Errorf("failed to terminate/kill container %s", containerName)
}
}()
break
}
}
return osspecific.Wait(command.Process)
})

View File

@ -6,6 +6,8 @@ import (
"github.com/argoproj/pkg/stats"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func NewInitCommand() *cobra.Command {
@ -13,7 +15,17 @@ func NewInitCommand() *cobra.Command {
Use: "init",
Short: "Load artifacts",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
ctx := cmd.Context()
if ctx == nil {
ctx = context.Background()
cmd.SetContext(ctx)
}
log := logging.GetLoggerFromContext(ctx)
if log == nil {
log = logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())
ctx = logging.WithLogger(ctx, log)
cmd.SetContext(ctx)
}
err := loadArtifacts(ctx)
if err != nil {
return fmt.Errorf("%+v", err)

View File

@ -6,6 +6,7 @@ import (
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/argoproj/argo-workflows/v3/workflow/common"
)
@ -31,6 +32,7 @@ func execResource(ctx context.Context, action string) error {
// Don't allow cancellation to impact capture of results, parameters, artifacts, or defers.
bgCtx := context.Background()
bgCtx = logging.WithLogger(bgCtx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
wfExecutor.InitializeOutput(bgCtx)
defer wfExecutor.HandleError(bgCtx)
@ -59,7 +61,7 @@ func execResource(ctx context.Context, action string) error {
}
}
}
resourceNamespace, resourceName, selfLink, err := wfExecutor.ExecResource(
resourceNamespace, resourceName, selfLink, err := wfExecutor.ExecResource(ctx,
action, manifestPath, wfExecutor.Template.Resource.Flags,
)
if err != nil {

View File

@ -1,6 +1,7 @@
package commands
import (
"context"
"encoding/json"
"fmt"
"os"
@ -21,6 +22,7 @@ import (
"github.com/argoproj/argo-workflows/v3/util"
"github.com/argoproj/argo-workflows/v3/util/cmd"
kubecli "github.com/argoproj/argo-workflows/v3/util/kube/cli"
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/argoproj/argo-workflows/v3/util/logs"
"github.com/argoproj/argo-workflows/v3/workflow/common"
"github.com/argoproj/argo-workflows/v3/workflow/executor"
@ -90,7 +92,9 @@ func initExecutor() *executor.WorkflowExecutor {
checkErr(err)
config = restclient.AddUserAgent(config, fmt.Sprintf("argo-workflows/%s argo-executor", version.Version))
logs.AddK8SLogTransportWrapper(config) // lets log all request as we should typically do < 5 per pod, so this is will show up problems
bgCtx := context.Background()
bgCtx = logging.WithLogger(bgCtx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
logs.AddK8SLogTransportWrapper(bgCtx, config) // lets log all request as we should typically do < 5 per pod, so this is will show up problems
namespace, _, err := clientConfig.Namespace()
checkErr(err)

View File

@ -7,6 +7,8 @@ import (
"github.com/argoproj/pkg/stats"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func NewWaitCommand() *cobra.Command {
@ -30,6 +32,7 @@ func waitContainer(ctx context.Context) error {
// Don't allow cancellation to impact capture of results, parameters, artifacts, or defers.
bgCtx := context.Background()
bgCtx = logging.WithLogger(bgCtx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
defer wfExecutor.HandleError(bgCtx) // Must be placed at the bottom of defers stack.
defer wfExecutor.FinalizeOutput(bgCtx) // Ensures the LabelKeyReportOutputsCompleted is set to true.

View File

@ -13,10 +13,11 @@ import (
"github.com/argoproj/argo-workflows/v3/cmd/argoexec/commands"
"github.com/argoproj/argo-workflows/v3/util"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func main() {
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM)
ctx, stop := signal.NotifyContext(logging.WithLogger(context.Background(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())), syscall.SIGTERM)
defer stop()
err := commands.NewRootCommand().ExecuteContext(ctx)
if err != nil {

View File

@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"strings"
@ -10,7 +11,6 @@ import (
"time"
"github.com/argoproj/pkg/stats"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
@ -30,6 +30,7 @@ import (
cmdutil "github.com/argoproj/argo-workflows/v3/util/cmd"
"github.com/argoproj/argo-workflows/v3/util/env"
kubecli "github.com/argoproj/argo-workflows/v3/util/kube/cli"
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/argoproj/argo-workflows/v3/util/logs"
pprofutil "github.com/argoproj/argo-workflows/v3/util/pprof"
"github.com/argoproj/argo-workflows/v3/workflow/common"
@ -69,29 +70,37 @@ func NewRootCommand() *cobra.Command {
Use: CLIName,
Short: "workflow-controller is the controller to operate on workflows",
RunE: func(c *cobra.Command, args []string) error {
defer runtimeutil.HandleCrashWithContext(context.Background(), runtimeutil.PanicHandlers...)
defer runtimeutil.HandleCrashWithContext(c.Context(), runtimeutil.PanicHandlers...)
log := logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())
if c.Context() == nil {
ctx := context.Background()
ctx = logging.WithLogger(ctx, log)
c.SetContext(ctx)
}
cmdutil.SetLogLevel(logLevel)
cmdutil.SetGLogLevel(glogLevel)
cmdutil.SetLogFormatter(logFormat)
stats.RegisterStackDumper()
stats.StartStatsTicker(5 * time.Minute)
pprofutil.Init()
pprofutil.Init(c.Context())
config, err := clientConfig.ClientConfig()
if err != nil {
return err
}
// start a controller on instances of our custom resource
ctx, cancel := context.WithCancel(context.Background())
ctx, cancel := context.WithCancel(c.Context())
defer cancel()
ctx = logging.WithLogger(ctx, log)
version := argo.GetVersion()
config = restclient.AddUserAgent(config, fmt.Sprintf("argo-workflows/%s argo-controller", version.Version))
config.Burst = burst
config.QPS = qps
logs.AddK8SLogTransportWrapper(config)
logs.AddK8SLogTransportWrapper(c.Context(), config)
metrics.AddMetricsTransportWrapper(ctx, config)
namespace, _, err := clientConfig.Namespace()
@ -103,7 +112,7 @@ func NewRootCommand() *cobra.Command {
wfclientset := wfclientset.NewForConfigOrDie(config)
if !namespaced && managedNamespace != "" {
log.Warn("ignoring --managed-namespace because --namespaced is false")
log.Warn(ctx, "ignoring --managed-namespace because --namespaced is false")
managedNamespace = ""
}
if namespaced && managedNamespace == "" {
@ -117,15 +126,15 @@ func NewRootCommand() *cobra.Command {
leaderElectionOff := os.Getenv("LEADER_ELECTION_DISABLE")
if leaderElectionOff == "true" {
log.Info("Leader election is turned off. Running in single-instance mode")
log.WithField("id", "single-instance").Info("starting leading")
log.Info(ctx, "Leader election is turned off. Running in single-instance mode")
log.WithField("id", "single-instance").Info(ctx, "starting leading")
go wfController.Run(ctx, workflowWorkers, workflowTTLWorkers, podCleanupWorkers, cronWorkflowWorkers, workflowArchiveWorkers)
go wfController.RunPrometheusServer(ctx, false)
} else {
nodeID, ok := os.LookupEnv("LEADER_ELECTION_IDENTITY")
if !ok {
log.Fatal("LEADER_ELECTION_IDENTITY must be set so that the workflow controllers can elect a leader")
log.WithFatal().Error(ctx, "LEADER_ELECTION_IDENTITY must be set so that the workflow controllers can elect a leader")
}
leaderName := "workflow-controller"
@ -135,7 +144,7 @@ func NewRootCommand() *cobra.Command {
// for controlling the dummy metrics server
var wg sync.WaitGroup
dummyCtx, dummyCancel := context.WithCancel(context.Background())
dummyCtx, dummyCancel := context.WithCancel(c.Context())
defer dummyCancel()
wg.Add(1)
@ -150,9 +159,9 @@ func NewRootCommand() *cobra.Command {
LockConfig: resourcelock.ResourceLockConfig{Identity: nodeID, EventRecorder: events.NewEventRecorderManager(kubeclientset).Get(namespace)},
},
ReleaseOnCancel: false,
LeaseDuration: env.LookupEnvDurationOr("LEADER_ELECTION_LEASE_DURATION", 15*time.Second),
RenewDeadline: env.LookupEnvDurationOr("LEADER_ELECTION_RENEW_DEADLINE", 10*time.Second),
RetryPeriod: env.LookupEnvDurationOr("LEADER_ELECTION_RETRY_PERIOD", 5*time.Second),
LeaseDuration: env.LookupEnvDurationOr(c.Context(), "LEADER_ELECTION_LEASE_DURATION", 15*time.Second),
RenewDeadline: env.LookupEnvDurationOr(c.Context(), "LEADER_ELECTION_RENEW_DEADLINE", 10*time.Second),
RetryPeriod: env.LookupEnvDurationOr(c.Context(), "LEADER_ELECTION_RETRY_PERIOD", 5*time.Second),
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
dummyCancel()
@ -165,13 +174,13 @@ func NewRootCommand() *cobra.Command {
}()
},
OnStoppedLeading: func() {
log.WithField("id", nodeID).Info("stopped leading")
log.WithField("id", nodeID).Info(ctx, "stopped leading")
cancel()
wg.Wait()
go wfController.RunPrometheusServer(dummyCtx, true)
},
OnNewLeader: func(identity string) {
log.WithField("leader", identity).Info("new leader")
log.WithField("leader", identity).Info(ctx, "new leader")
},
},
})
@ -180,7 +189,7 @@ func NewRootCommand() *cobra.Command {
http.HandleFunc("/healthz", wfController.Healthz)
go func() {
log.Println(http.ListenAndServe(":6060", nil))
log.Error(ctx, http.ListenAndServe(":6060", nil).Error())
}()
<-ctx.Done()
@ -206,6 +215,13 @@ func NewRootCommand() *cobra.Command {
command.Flags().BoolVar(&namespaced, "namespaced", false, "run workflow-controller as namespaced mode")
command.Flags().StringVar(&managedNamespace, "managed-namespace", "", "namespace that workflow-controller watches, default to the installation namespace")
command.Flags().BoolVar(&executorPlugins, "executor-plugins", false, "enable executor plugins")
ctx := command.Context()
if ctx == nil {
ctx = context.Background()
log := logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat())
ctx = logging.WithLogger(ctx, log)
command.SetContext(ctx)
}
// set-up env vars for the CLI such that ARGO_* env vars can be used instead of flags
viper.AutomaticEnv()
@ -228,17 +244,6 @@ func NewRootCommand() *cobra.Command {
return &command
}
func init() {
cobra.OnInitialize(initConfig)
}
func initConfig() {
log.SetFormatter(&log.TextFormatter{
TimestampFormat: "2006-01-02T15:04:05.000Z",
FullTimestamp: true,
})
}
func main() {
if err := NewRootCommand().Execute(); err != nil {
fmt.Println(err)

View File

@ -16,13 +16,15 @@ import (
)
type ResourceRateLimit struct {
// Limit is the maximum rate at which pods can be created
Limit float64 `json:"limit"`
// Burst allows temporary spikes above the limit
Burst int `json:"burst"`
}
// Config contains the configuration settings for the workflow controller
// Config contains the root of the configuration settings for the workflow controller
// as read from the ConfigMap called workflow-controller-configmap
type Config struct {
// NodeEvents configures how node events are emitted
NodeEvents NodeEvents `json:"nodeEvents,omitempty"`
@ -191,22 +193,31 @@ type KubeConfig struct {
MountPath string `json:"mountPath,omitempty"`
}
// DBConfig contains database configuration settings
type DBConfig struct {
// PostgreSQL configuration for PostgreSQL database, don't use MySQL at the same time
PostgreSQL *PostgreSQLConfig `json:"postgresql,omitempty"`
// MySQL configuration for MySQL database, don't use PostgreSQL at the same time
MySQL *MySQLConfig `json:"mysql,omitempty"`
// Pooled connection settings for all types of database connections
ConnectionPool *ConnectionPool `json:"connectionPool,omitempty"`
}
// PersistConfig contains workflow persistence configuration
type PersistConfig struct {
DBConfig
// NodeStatusOffload saves node status only to the persistence DB to avoid the 1MB limit in etcd
NodeStatusOffload bool `json:"nodeStatusOffLoad,omitempty"`
// Archive workflows to persistence.
// Archive completed and Workflows to persistence so you can access them after they're
// removed from kubernetes
Archive bool `json:"archive,omitempty"`
// ArchivelabelSelector holds LabelSelector to determine workflow persistence.
// ArchiveLabelSelector holds LabelSelector to determine which Workflows to archive
ArchiveLabelSelector *metav1.LabelSelector `json:"archiveLabelSelector,omitempty"`
// in days
// ArchiveTTL is the time to live for archived Workflows
ArchiveTTL TTL `json:"archiveTTL,omitempty"`
// ClusterName is the name of the cluster (or technically controller) for the persistence database
ClusterName string `json:"clusterName,omitempty"`
// SkipMigration skips database migration even if needed
SkipMigration bool `json:"skipMigration,omitempty"`
}
@ -224,34 +235,55 @@ func (c PersistConfig) GetClusterName() string {
return "default"
}
// SyncConfig contains synchronization configuration for database locks (semaphores and mutexes)
type SyncConfig struct {
DBConfig
// ControllerName sets a unique name for this controller instance
ControllerName string `json:"controllerName"`
// SkipMigration skips database migration if needed
SkipMigration bool `json:"skipMigration,omitempty"`
// LimitTableName customizes the table name for semaphore limits, if not set, the default value is "sync_limit"
LimitTableName string `json:"limitTableName,omitempty"`
// StateTableName customizes the table name for current lock state, if not set, the default value is "sync_state"
StateTableName string `json:"stateTableName,omitempty"`
// ControllerTableName customizes the table name for controller heartbeats, if not set, the default value is "sync_controller"
ControllerTableName string `json:"controllerTableName,omitempty"`
// LockTableName customizes the table name for lock coordination data, if not set, the default value is "sync_lock"
LockTableName string `json:"lockTableName,omitempty"`
// PollSeconds specifies how often to check for lock changes, if not set, the default value is 5 seconds
PollSeconds *int `json:"pollSeconds,omitempty"`
// HeartbeatSeconds specifies how often to update controller heartbeat, if not set, the default value is 60 seconds
HeartbeatSeconds *int `json:"heartbeatSeconds,omitempty"`
// InactiveControllerSeconds specifies when to consider a controller dead, if not set, the default value is 300 seconds
InactiveControllerSeconds *int `json:"inactiveControllerSeconds,omitempty"`
// SemaphoreLimitCacheSeconds specifies the duration in seconds before the workflow controller will re-fetch the limit
// for a semaphore from its associated data source. Defaults to 0 seconds (re-fetch every time the semaphore is checked).
SemaphoreLimitCacheSeconds *int64 `json:"semaphoreLimitCacheSeconds,omitempty"`
}
// ConnectionPool contains database connection pool settings
type ConnectionPool struct {
// MaxIdleConns sets the maximum number of idle connections in the pool
MaxIdleConns int `json:"maxIdleConns,omitempty"`
// MaxOpenConns sets the maximum number of open connections to the database
MaxOpenConns int `json:"maxOpenConns,omitempty"`
// ConnMaxLifetime sets the maximum amount of time a connection may be reused
ConnMaxLifetime TTL `json:"connMaxLifetime,omitempty"`
}
// DatabaseConfig contains common database connection settings
type DatabaseConfig struct {
// Host is the database server hostname
Host string `json:"host"`
// Port is the database server port
Port int `json:"port,omitempty"`
// Database is the name of the database to connect to
Database string `json:"database"`
// TableName is the name of the table to use, must be set
TableName string `json:"tableName,omitempty"`
// UsernameSecret references a secret containing the database username
UsernameSecret apiv1.SecretKeySelector `json:"userNameSecret,omitempty"`
// PasswordSecret references a secret containing the database password
PasswordSecret apiv1.SecretKeySelector `json:"passwordSecret,omitempty"`
}
@ -262,14 +294,19 @@ func (c DatabaseConfig) GetHostname() string {
return fmt.Sprintf("%s:%v", c.Host, c.Port)
}
// PostgreSQLConfig contains PostgreSQL-specific database configuration
type PostgreSQLConfig struct {
DatabaseConfig
// SSL enables SSL connection to the database
SSL bool `json:"ssl,omitempty"`
// SSLMode specifies the SSL mode (disable, require, verify-ca, verify-full)
SSLMode string `json:"sslMode,omitempty"`
}
// MySQLConfig contains MySQL-specific database configuration
type MySQLConfig struct {
DatabaseConfig
// Options contains additional MySQL connection options
Options map[string]string `json:"options,omitempty"`
}
@ -284,10 +321,13 @@ type MetricModifier struct {
HistogramBuckets []float64 `json:"histogramBuckets,omitempty"`
}
// MetricsTemporality defines the temporality of OpenTelemetry metrics
type MetricsTemporality string
const (
// MetricsTemporalityCumulative indicates cumulative temporality
MetricsTemporalityCumulative MetricsTemporality = "Cumulative"
// MetricsTemporalityDelta indicates delta temporality
MetricsTemporalityDelta MetricsTemporality = "Delta"
)
@ -338,14 +378,19 @@ func (mc *MetricsConfig) GetTemporality() metricsdk.TemporalitySelector {
}
}
// WorkflowRestrictions contains restrictions for workflow execution
type WorkflowRestrictions struct {
// TemplateReferencing controls how templates can be referenced
TemplateReferencing TemplateReferencing `json:"templateReferencing,omitempty"`
}
// TemplateReferencing defines how templates can be referenced in workflows
type TemplateReferencing string
const (
// TemplateReferencingStrict requires templates to be referenced, not embedded
TemplateReferencingStrict TemplateReferencing = "Strict"
// TemplateReferencingSecure requires templates to be referenced and prevents spec changes
TemplateReferencingSecure TemplateReferencing = "Secure"
)

View File

@ -1,6 +1,9 @@
package config
// Image contains command and entrypoint configuration for container images
type Image struct {
// Entrypoint overrides the container entrypoint
Entrypoint []string `json:"entrypoint,omitempty"`
// Cmd overrides the container command
Cmd []string `json:"cmd,omitempty"`
}

View File

@ -1,7 +1,10 @@
package config
// NodeEvents configures how node events are emitted
type NodeEvents struct {
// Enabled controls whether node events are emitted
Enabled *bool `json:"enabled,omitempty"`
// SendAsPod emits events as if from the Pod instead of the Workflow with annotations linking the event to the Workflow
SendAsPod bool `json:"sendAsPod,omitempty"`
}

View File

@ -1,6 +1,8 @@
package config
// RBACConfig contains role-based access control configuration
type RBACConfig struct {
// Enabled controls whether RBAC is enabled
Enabled bool `json:"enabled,omitempty"`
}

View File

@ -2,7 +2,10 @@ package config
// Workflow retention by number of workflows
type RetentionPolicy struct {
// Completed is the number of completed Workflows to retain
Completed int `json:"completed,omitempty"`
// Failed is the number of failed Workflows to retain
Failed int `json:"failed,omitempty"`
// Errored is the number of errored Workflows to retain
Errored int `json:"errored,omitempty"`
}

View File

@ -7,20 +7,31 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SSOConfig contains single sign-on configuration settings
type SSOConfig struct {
// Issuer is the OIDC issuer URL
Issuer string `json:"issuer"`
// IssuerAlias is an optional alias for the issuer
IssuerAlias string `json:"issuerAlias,omitempty"`
// ClientID references a secret containing the OIDC client ID
ClientID apiv1.SecretKeySelector `json:"clientId"`
// ClientSecret references a secret containing the OIDC client secret
ClientSecret apiv1.SecretKeySelector `json:"clientSecret"`
// RedirectURL is the OIDC redirect URL
RedirectURL string `json:"redirectUrl"`
// RBAC contains role-based access control settings
RBAC *RBACConfig `json:"rbac,omitempty"`
// additional scopes (on top of "openid")
Scopes []string `json:"scopes,omitempty"`
// SessionExpiry specifies how long user sessions last
SessionExpiry metav1.Duration `json:"sessionExpiry,omitempty"`
// customGroupClaimName will override the groups claim name
// CustomGroupClaimName will override the groups claim name
CustomGroupClaimName string `json:"customGroupClaimName,omitempty"`
// UserInfoPath specifies the path to user info endpoint
UserInfoPath string `json:"userInfoPath,omitempty"`
// InsecureSkipVerify skips TLS certificate verification
InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"`
// FilterGroupsRegex filters groups using regular expressions
FilterGroupsRegex []string `json:"filterGroupsRegex,omitempty"`
}

View File

@ -1,6 +1,8 @@
package config
// WorkflowEvents configures how workflow events are emitted
type WorkflowEvents struct {
// Enabled controls whether workflow events are emitted
Enabled *bool `json:"enabled,omitempty"`
}

View File

@ -10,13 +10,14 @@ See [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-o
## Community Meetings (monthly)
A monthly opportunity for users and maintainers of Workflows and Events to share their current work and
hear about whats coming on the roadmap. Please join us! For Community Meeting information, minutes and recordings
please [see here](http://bit.ly/argo-wf-cmty-mtng).
A monthly opportunity for users and maintainers of Workflows and Events to share their current work and hear about what's coming on the roadmap.
Please join us!
For Community Meeting information, minutes and recordings please [see here](http://bit.ly/argo-wf-cmty-mtng).
## Contributor Meetings (twice monthly)
An opportunity for contributors and maintainers of Workflows and Events to discuss their current work and talk about whats next. Feel free to join us!
An opportunity for contributors and maintainers of Workflows and Events to discuss their current work and talk about what's next.
Feel free to join us!
See the [Contributor Meeting doc](https://bit.ly/argo-data-weekly) for minutes, recordings, and more information.
## Slack
@ -78,8 +79,7 @@ The dependency must pass these test:
* It is actively maintained.
* It has no security issues.
Example, should we add `fasttemplate`
, [view the Snyk report](https://snyk.io/advisor/golang/github.com/valyala/fasttemplate):
Example, should we add `fasttemplate`, [view the Snyk report](https://snyk.io/advisor/golang/github.com/valyala/fasttemplate):
| Test | Outcome |
|-----------------------------------------|-------------------------------------|
@ -108,6 +108,19 @@ Anybody can review a PR.
If you are in a [designated role](#roles), add yourself as an "Assignee" to a PR if you plan to lead the review.
If you are a Reviewer or below, then once you have approved a PR, request a review from one or more Approvers and above.
#### Timeliness
We encourage PR authors and reviewers to respond to change requests in a reasonable time frame.
If you're on vacation or will be unavailable, please let others know on the PR.
##### PR Author Timeliness
If a PR hasn't seen activity from the author for 10 business days, someone else may ask to take it over.
We suggest commenting on the original PR and tagging the author to check on their plans.
Maintainers can reassign PRs to new contributors if the original author doesn't respond with a plan.
For PRs that have been inactive for 3 months, the takeover process can happen immediately.
**IMPORTANT:** If a PR is taken over and uses any code from the previous PR, the original author *must* be credited using `Co-authored-by` on the commits.
#### Triaging Bugs
New bugs need to be triaged to identify the highest priority ones.
@ -123,8 +136,6 @@ Bugs can be [sorted by "👍"](https://github.com/argoproj/argo-workflows/issues
If the issue is determined to be a user error and not a bug, remove the `type/bug` label (and the `type/regression` label, if applicable) and replace it with the `type/support` label.
If more information is needed from the author to diagnose the issue, then apply the `problem/more information needed` label.
Please only assign issues to members. New contributors are encouraged to work on a PR directly without being assigned.
##### Staleness
Only issues and PRs that have the [`problem/more information needed` label](https://github.com/argoproj/argo-workflows/labels/problem%2Fmore%20information%20needed) will be considered for staleness.

View File

@ -181,3 +181,7 @@ You can access additional information through the following headers.
* `X-Rate-Limit-Remaining` - the number of requests left for the current rate-limit window.
* `X-Rate-Limit-Reset` - the time at which the rate limit resets, specified in UTC time.
* `Retry-After` - indicate when a client should retry requests (when the rate limit expires), in UTC time.
### GRPC ALPN
The grpc library wants to enforce ALPN, but we are not prepared for this so the argo-server binary is built with `GRPC_ENFORCE_ALPN_ENABLED` set to `false` in the docker image as a short term workaround, as documented in https://github.com/grpc/grpc-go/issues/434

View File

@ -6,6 +6,18 @@ display details about a workflow template
argo template get WORKFLOW_TEMPLATE... [flags]
```
### Examples
```
# Get information about a workflow template by its name:
argo template get my-template
# Get information about a workflow template in YAML format:
argo template get my-template -o yaml
```
### Options
```

View File

@ -46,6 +46,7 @@ This document outlines environment variables that can be used to customize behav
| `POD_NAMES` | `string` | `v2` | Whether to have pod names contain the template name (v2) or be the node id (v1) - should be set the same for Argo Server. |
| `RECENTLY_STARTED_POD_DURATION` | `time.Duration` | `10s` | The duration of a pod before the pod is considered to be recently started. |
| `RECENTLY_DELETED_POD_DURATION` | `time.Duration` | `2m` | The duration of a pod before the pod is considered to be recently deleted. |
| `TASK_RESULT_TIMEOUT_DURATION` | `time.Duration` | `10m` | The duration of time before a node is marked completed but the `taskresult` has not arrived yet, this is a more general and more conservative version of `RECENTLY_DELETED_POD_DURATION` that is used when a `taskresult` hasn't arrived but the pod is still around. |
| `RETRY_BACKOFF_DURATION` | `time.Duration` | `10ms` | The retry back-off duration when retrying API calls. |
| `RETRY_BACKOFF_FACTOR` | `float` | `2.0` | The retry back-off factor when retrying API calls. |
| `RETRY_BACKOFF_STEPS` | `int` | `5` | The retry back-off steps when retrying API calls. |

View File

@ -470,6 +470,19 @@ WorkflowEventBinding is the definition of an event resource
|`metadata`|[`ObjectMeta`](#objectmeta)|_No description available_|
|`spec`|[`WorkflowEventBindingSpec`](#workfloweventbindingspec)|_No description available_|
## InfoResponse
_No description available_
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
|`columns`|`Array<`[`Column`](#column)`>`|_No description available_|
|`links`|`Array<`[`Link`](#link)`>`|_No description available_|
|`managedNamespace`|`string`|_No description available_|
|`modals`|`Map< boolean , string >`|which modals to show|
|`navColor`|`string`|_No description available_|
## WorkflowSpec
WorkflowSpec is the specification of a Workflow.
@ -1374,6 +1387,29 @@ _No description available_
|`event`|[`Event`](#event)|Event is the event to bind to|
|`submit`|[`Submit`](#submit)|Submit is the workflow template to submit|
## Column
Column is a custom column that will be exposed in the Workflow List View.
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
|`key`|`string`|The key of the label or annotation, e.g., "workflows.argoproj.io/completed".|
|`name`|`string`|The name of this column, e.g., "Workflow Completed".|
|`type`|`string`|The type of this column, "label" or "annotation".|
## Link
A link to another app.
### Fields
| Field Name | Field Type | Description |
|:----------:|:----------:|---------------|
|`name`|`string`|The name of the link, E.g. "Workflow Logs" or "Pod Logs"|
|`scope`|`string`|"workflow", "pod", "pod-logs", "event-source-logs", "sensor-logs", "workflow-list" or "chat"|
|`target`|`string`|Target attribute specifies where a linked document will be opened when a user clicks on a link. E.g. "_blank", "_self". If the target is _blank, it will open in a new tab.|
|`url`|`string`|The URL. Can contain "${metadata.namespace}", "${metadata.name}", "${status.startedAt}", "${status.finishedAt}" or any other element in workflow yaml, e.g. "${io.argoproj.workflow.v1alpha1.metadata.annotations.userDefinedKey}"|
## Arguments
Arguments to a template
@ -1984,6 +2020,7 @@ NodeStatus contains status information about an individual node in the workflow
|`resourcesDuration`|`Map< integer , int64 >`|ResourcesDuration is indicative, but not accurate, resource duration. This is populated when the nodes completes.|
|`startedAt`|[`Time`](#time)|Time at which this node started|
|`synchronizationStatus`|[`NodeSynchronizationStatus`](#nodesynchronizationstatus)|SynchronizationStatus is the synchronization status of the node|
|`taskResultSynced`|`boolean`|TaskResultSynced is used to determine if the node's output has been received|
|`templateName`|`string`|TemplateName is the template name which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)|
|`templateRef`|[`TemplateRef`](#templateref)|TemplateRef is the reference to the template resource which this node corresponds to. Not applicable to virtual nodes (e.g. Retry, StepGroup)|
|`templateScope`|`string`|TemplateScope is the template scope in which the template of this node was retrieved.|

View File

@ -1,10 +1,5 @@
# Metrics
> v2.7 and after
!!! Warning "Metrics changes in 3.6"
Please read [this short guide](upgrading.md#metrics-changes) on what you must consider when upgrading to 3.6.
## Introduction
Argo emits a certain number of controller metrics that inform on the state of the controller at any given time.

View File

@ -1,80 +1,50 @@
# New features
# New Features
This is a concise list of new features.
For a more detailed list, see the [3.6 blog post](https://blog.argoproj.io/argo-workflows-3-6-aa037cd782be).
See [the upgrade notes](upgrading.md#upgrading-to-v36) for information on breaking changes and deprecations.
## Retry Improvements
## UI
* [#13738](https://github.com/argoproj/argo-workflows/pull/13738) : Support retry strategy on daemon containers
* [#13782](https://github.com/argoproj/argo-workflows/pull/13782) : Support cap on retryStrategy backoff
* [#14450](https://github.com/argoproj/argo-workflows/pull/14450) : Allow last retry variables in expressions
* [#13519](https://github.com/argoproj/argo-workflows/pull/13519): The full name of the workflow is now visible in the list details of a workflow.
* [#13284](https://github.com/argoproj/argo-workflows/pull/13284): Various time displays can be switched between relative and absolute ISO time.
* [#10553](https://github.com/argoproj/argo-workflows/pull/10553): You can now use markdown in workflow titles and descriptions and it will be displayed in the UI.
* [#12350](https://github.com/argoproj/argo-workflows/pull/12350): The UI will now show the directory used for input artifacts.
* [#12873](https://github.com/argoproj/argo-workflows/pull/12873): You can also now see line numbers in the object view.
* [#13452](https://github.com/argoproj/argo-workflows/pull/13452): WorkflowTemplate and ClusterWorkflowTemplate will show you their execution history like you can see for CronWorkflows.
* [#12024](https://github.com/argoproj/argo-workflows/pull/12024): You will be able to see live logs from pods if retrieval of logs from archived workflows fails and the pod logs are available.
* [#12674](https://github.com/argoproj/argo-workflows/pull/12674): CronWorkflows and WorkflowTemplates now display their title and descriptions in the list view.
* [#12199](https://github.com/argoproj/argo-workflows/pull/12199): You can specify HTTP headers to use to detect IP addresses using the `IP_KEY_FUNC_HEADERS` environment variable. This is used in the rate limiter.
* [#13695](https://github.com/argoproj/argo-workflows/pull/13695): You can now retry a single node from a workflow, even if the workflow succeeded.
* [#13610](https://github.com/argoproj/argo-workflows/pull/13610): You can now filter with prefixes and patterns in the workflow list.
* [#13494](https://github.com/argoproj/argo-workflows/pull/13494): URLs are now clickable links in workflow node info
* [#13511](https://github.com/argoproj/argo-workflows/pull/13511): You can now group nodes based on `templateRef` and see invoking template name instead of the execution order DAG.
## Parallelism Improvements
## Metrics
* [#14309](https://github.com/argoproj/argo-workflows/pull/14309) : Multi-controller locks (semaphores and mutexes)
* [#14188](https://github.com/argoproj/argo-workflows/pull/14188) : Dynamic namespace parallelism
* [#13265](https://github.com/argoproj/argo-workflows/pull/13265): The workflow controller can now emit metrics over OpenTelemetry GRPC protocol
* [#13267](https://github.com/argoproj/argo-workflows/pull/13267): with selectable temporality
* [#13268](https://github.com/argoproj/argo-workflows/pull/13268): configuration of what is emitted
* Many of the metrics have been updated which will require you [to change how you use them](upgrading.md#metrics-changes) and there are some new ones:
* [#13269](https://github.com/argoproj/argo-workflows/pull/13269): Version information in the controller
* [#13270](https://github.com/argoproj/argo-workflows/pull/13270): Is this controller the leader
* [#13271](https://github.com/argoproj/argo-workflows/pull/13271): Kubernetes API calls duration
* [#13272](https://github.com/argoproj/argo-workflows/pull/13272): Pod phase monitoring
* [#13274](https://github.com/argoproj/argo-workflows/pull/13274): CronWorkflows counters
* [#13497](https://github.com/argoproj/argo-workflows/pull/13497): CronWorkflows policy counters
* [#13275](https://github.com/argoproj/argo-workflows/pull/13275): Workflow Template counters
* [#13735](https://github.com/argoproj/argo-workflows/pull/13735): Counters to check if you're using deprecated features
* [#11927](https://github.com/argoproj/argo-workflows/pull/11927): There is a new `retries` variable available in metrics describing the number of retries.
* [#11857](https://github.com/argoproj/argo-workflows/pull/11857): Pod missing metrics will be emitted before pods are created
## Enhanced Caching
## General
* [#14304](https://github.com/argoproj/argo-workflows/pull/14304) : More granular caching options for the argo kubernetes informer
* [#14205](https://github.com/argoproj/argo-workflows/pull/14205) : Cache semaphore limit lookup
* [#13358](https://github.com/argoproj/argo-workflows/pull/13358): You can use multiple mutexes and semaphores in the same workflow or template, and use both type of lock at the same time
* [#13419](https://github.com/argoproj/argo-workflows/pull/13419): The controller uses a queue when archiving workflows to improve memory management when archiving a large number of workflows at once
* [#12441](https://github.com/argoproj/argo-workflows/pull/12441): Plugins can now be stopped, so that a stopped workflow will shutdown its plugin nodes
* The OSS artifact driver:
* [#12188](https://github.com/argoproj/argo-workflows/pull/12188): Can now work with directories,
* [#12907](https://github.com/argoproj/argo-workflows/pull/12907): Supports deletion,
* [#12908](https://github.com/argoproj/argo-workflows/pull/12908): Supports streaming.
* [#12419](https://github.com/argoproj/argo-workflows/pull/12419): Pod deletion will now happen in parallel to speed it up.
* [#13360](https://github.com/argoproj/argo-workflows/pull/13360): You can use Shared Access Signatures to access artifacts stored in Azure.
* [#12413](https://github.com/argoproj/argo-workflows/pull/12413): Workflow pods now have a kubernetes finalizer to try to prevent them being deleted prematurely
* [#12325](https://github.com/argoproj/argo-workflows/pull/12325): Large environment variables will be offloaded to Config Maps
* [#12328](https://github.com/argoproj/argo-workflows/pull/12328): Large and flat workflows where there are many steps that need resolving at the same time could time out during template referencing. This is now much faster.
* [#12568](https://github.com/argoproj/argo-workflows/pull/12568): Kubernetes scheduling constraints such as node selectors and tolerations will now be honored where they are specified in a WorkflowTemplate. These will be applied to the task and step pods.
* [#12984](https://github.com/argoproj/argo-workflows/pull/12984): The pods created by workflows will have a `seccompProfile` of `RuntimeDefault` by default.
* [#12842](https://github.com/argoproj/argo-workflows/pull/12842): You can now template the `name` and `template` in a `templateRef`. This allows for fully data driven workflow DAGs.
* [#13194](https://github.com/argoproj/argo-workflows/pull/13194): The expr library has been upgraded providing some new functions in expressions.
* [#13746](https://github.com/argoproj/argo-workflows/pull/13746): Configuration option to avoid sending kubernetes Events for workflows.
* [#13745](https://github.com/argoproj/argo-workflows/pull/13745): Added an option to skip workflow duration estimation because it can be expensive.
## UI Enhancements
## CronWorkflows
* [#14034](https://github.com/argoproj/argo-workflows/pull/14034) : Visualize workflows before submitting
* [#13962](https://github.com/argoproj/argo-workflows/pull/13962) : Filter workflows by "Finished before" and "Created since" via API
* [#13935](https://github.com/argoproj/argo-workflows/pull/13935) : Allow markdown titles and descriptions in KeyValueEditor
* [#12644](https://github.com/argoproj/argo-workflows/pull/12644) : Allow markdown titles and descriptions in WorkflowTemplates & ClusterWorkflowTemplates
* [#13883](https://github.com/argoproj/argo-workflows/pull/13883) : Mark memoized nodes as cached
* [#13922](https://github.com/argoproj/argo-workflows/pull/13922) : Pre-fill parameters for workflow submit form
* [#14077](https://github.com/argoproj/argo-workflows/pull/14077) : Set template display name in YAML
* [#12616](https://github.com/argoproj/argo-workflows/pull/12616): You can now specify multiple cron schedules on a single CronWorkflow.
* [#12305](https://github.com/argoproj/argo-workflows/pull/12305): You can also use a stop strategy on CronWorkflows to stop them running any more workflows after a set of conditions occur such as too many errors.
* [#13474](https://github.com/argoproj/argo-workflows/pull/13474): CronWorkflows also now have a when expression to further tune which occurrences of the workflow will run and which may be skipped
## User Experience
## CLI
* [#14104](https://github.com/argoproj/argo-workflows/pull/14104) : Label actor action when making changes to workflows/templates
* [#13933](https://github.com/argoproj/argo-workflows/pull/13933) : Support archive logs in resource templates
* [#13790](https://github.com/argoproj/argo-workflows/pull/13790) : Include container name in error messages
* [#12803](https://github.com/argoproj/argo-workflows/pull/12803): You can now update Cron Workflows, Workflow Templates and Cluster Workflow Templates with the `update` command via the CLI
* [#13364](https://github.com/argoproj/argo-workflows/pull/13364): You can selectively list workflow templates using a `-l` label selector
* [#13128](https://github.com/argoproj/argo-workflows/pull/13128): The CLI will now generate shell completions for the [fish shell](https://fishshell.com/)
* [#12977](https://github.com/argoproj/argo-workflows/pull/12977): We also build and ship the CLI complied for [Risc-V](https://riscv.org/)
* [#12953](https://github.com/argoproj/argo-workflows/pull/12953): The lint command supports a `--no-color` flag
* [#13695](https://github.com/argoproj/argo-workflows/pull/13695): The `--output` flag is now validated
## Cron Workflow Enhancement
## Build and Development
* [#13999](https://github.com/argoproj/argo-workflows/pull/13999) : Support backfill for cron workflows
* [#13000](https://github.com/argoproj/argo-workflows/pull/13000): There is now a `/retest` command for retesting PRs in Github that occasionally fail in a flakey test
* [#12867](https://github.com/argoproj/argo-workflows/pull/12867): You can supply your own HTTP client when using the go API client, allowing for adding a proxy
## Security Improvements
* [#14477](https://github.com/argoproj/argo-workflows/pull/14477) : Non-root argoexec image
## Developer Experience
* [#14412](https://github.com/argoproj/argo-workflows/pull/14412) : Add React Testing Library and initial component coverage
* [#13920](https://github.com/argoproj/argo-workflows/pull/13920) : Move contextless log messages to debug level
* [#14151](https://github.com/argoproj/argo-workflows/pull/14151) : Enable cherry-pick bot
* [#14103](https://github.com/argoproj/argo-workflows/pull/14103) : Add support for databases enforcing strict data integrity through primary keys

View File

@ -16,3 +16,4 @@
| [Atomic Workflow Plugin](https://github.com/LinuxSuRen/argo-workflow-atomic-plugin) | Stop the workflows which comes from the same `WorkflowTemplate` and have the same parameters |
| [AWS Plugin](https://github.com/greenpau/argo-workflows-aws-plugin) | Argo Workflows Executor Plugin for AWS Services, e.g. SageMaker Pipelines, Glue, etc. |
| [Pytorch Job Plugin](https://github.com/shuangkun/argo-workflows-pytorch-plugin) | Argo Workflows Executor Plugin for Pytorch Job |
| [Ray Job Plugin](https://github.com/argoproj-labs/argo-workflows-ray-plugin) | Argo Workflows Executor Plugin for Ray Job | |

View File

@ -1,10 +1,22 @@
# Release Instructions
## Cherry-Picking Fixes
This page covers instructions for releasing Argo Workflows.
It is intended for Argo Workflows release managers, who will be responsible for coordinating the release.
Release managers must be Approvers on the Argo Workflows sub-project.
## Patch Releases
Patch releases are for bug fixes and are released from an existing release branch.
Using the `cherry-pick` comment you can cherry-pick PRs from `main` to the release branch in advance of the release.
This is recommended to ensure that each PR is tested before it is published, and makes the process of releasing a new patch version much easier.
All members of the Argo project can cherry-pick fixes to release branches using this mechanism.
Manually raising cherry-pick PRs against a release branch is also acceptable, and can be done by anyone.
### Manually Cherry-Picking Fixes for patch releases
✋ Before you start, make sure you have created a release branch (e.g. `release-3.3`) and it's passing CI.
Please make sure that all patch releases (e.g. `v3.3.5`) should be released from their associated minor release branches (e.g. `release-3.3`)
to work well with our versioned website.
Please make sure that all patch releases (e.g. `v3.3.5`) should be released from their associated minor release branches (e.g. `release-3.3`) to work well with our versioned website.
Then get a list of commits you may want to cherry-pick:
@ -21,8 +33,7 @@ To automatically cherry-pick, run the following:
./hack/cherry-pick.sh release-3.3 "fix" false
```
Then look for "failed to cherry-pick" in the log to find commits that fail to be cherry-picked and decide if a
manual patch is necessary.
Then look for "failed to cherry-pick" in the log to find commits that fail to be cherry-picked and decide if a manual patch is necessary.
Ignore:
@ -30,33 +41,105 @@ Ignore:
* Dependency upgrades, unless they fix known security issues.
* Build or CI improvements, unless the release pipeline is blocked without them.
Cherry-pick the first commit. Run `make test` locally before pushing. If the build timeouts the build caches may have
gone, try re-running.
Cherry-pick the first commit.
Run `make test` locally before pushing.
If the build timeouts the build caches may have gone, try re-running.
Don't cherry-pick another commit until the CI passes. It is harder to find the cause of a new failed build if the last
build failed too.
Don't cherry-pick another commit until the CI passes.
It is harder to find the cause of a new failed build if the last build failed too.
Cherry-picking commits one-by-one and then waiting for the CI will take a long time. Instead, cherry-pick each commit then
run `make test` locally before pushing.
Cherry-picking commits one-by-one and then waiting for the CI will take a long time.
Instead, cherry-pick each commit then run `make test` locally before pushing.
## Publish Release
## Feature releases
If you're releasing a version of Argo where the minor or major version is changing, you're releasing a feature release and there is more work to do.
You must start with at least one release candidate.
See [Release Cycle](releases.md#release-cycle) for information about release candidates.
### Release candidates
For release candidates you should tag `main` for the release.
These take the form of `3.6.0-rc1` and the final digit increases for each RC.
### Documentation
Before or after the first release candidate you should ensure that [`new-features.md`](new-features.md) and [`upgrading.md`](upgrading.md) are updated.
A post should be made on a blog site (we usually use medium) announcing the release, and the new features.
This post should celebrate the new features and thank the contributors, including statistics from the release.
Post this blog post to the [Argo Workflows Contributors](https://cloud-native.slack.com/archives/C0510EUH90V) Slack channel and [Argo Maintainers](https://cloud-native.slack.com/archives/C022F03E6BD) Slack channel for comments.
Update these three items ([`new-features.md`](new-features.md), [`upgrading.md`](upgrading.md), blog post) for each release candidate and the final release.
### Final release
There should be no changes between the final release candidate and the actual release.
For the final release you should create a tag at the same place as the final release candidate.
You must also create a `release/<version>` branch from that same point.
Now you can add the branch to ["Read the Docs"](https://app.readthedocs.org/projects/argo-workflows/) and then the new branch should be built and published.
Close the release candidate GitHub issue and unpin it, and create a new issue for patches to this branch.
### Expire old branches
Release n-2 is now out of support.
You should not delete anything to do with it.
Consider whether to do one final release for it.
Once that is done the old branch should be kept, but the pinned issue tracker issue should be unpinned and closed.
The "Read the Docs" documentation build should be kept.
## Publish Release (all releases)
✋ Before you start, make sure the branch is passing CI.
Push a new tag to the release branch. E.g.:
Push a new tag to the release branch.
E.g.:
```bash
git tag v3.3.4
git push upstream v3.3.4 # or origin if you do not use upstream
```
GitHub Actions will automatically build and publish your release. This takes about 1h. Set your self a reminder to check
this was successful.
### Feature Releases
## Update Changelog
For feature releases (e.g., v3.6.0, v3.7.0) and not patch releases (e.g., v3.6.1, v3.6.5), you need to update the feature descriptions with the new version.
Once the tag is published, GitHub Actions will automatically open a PR to update the changelog. Once the PR is ready,
you can approve it, enable auto-merge, and then run the following to force trigger the CI build:
For release candidates, use:
```bash
make features-update VERSION=v3.6.0
git add docs/new-features.md
git commit -m "chore: Update feature descriptions for v3.6.0"
git push
```
This will update all pending feature descriptions with the current version and include them in the upcoming release notes.
The features will remain in the pending directory, allowing for further updates if needed.
For the final release, use:
```bash
make features-release VERSION=v3.6.0
git add .features
git add docs/new-features.md
git commit -m "chore: Release feature descriptions for v3.6.0"
git push
```
This will update the feature descriptions and move them from the pending directory to the released directory for the specific version.
This is the final step that should be done when releasing a new version.
### Release Build
GitHub Actions will automatically build and publish your release.
This takes about 1h.
Set yourself a reminder to check this was successful.
## Update Changelog (all releases)
Once the tag is published, GitHub Actions will automatically open a PR to update the changelog.
Once the PR is ready, you can approve it, enable auto-merge, and then run the following to force trigger the CI build:
```bash
git branch -D create-pull-request/changelog
@ -66,7 +149,5 @@ git commit -s --allow-empty -m "chore: Force trigger CI"
git push upstream create-pull-request/changelog
```
## Announce on Slack
Once the changelog updates have been merged, you should announce on our Slack channels, [`#argo-workflows`](https://cloud-native.slack.com/archives/C01QW9QSSSK) and [`#argo-announcements`](https://cloud-native.slack.com/archives/C02165G1L48).
See [previous](https://cloud-native.slack.com/archives/C02165G1L48/p1701112932434469) [announcements](https://cloud-native.slack.com/archives/C01QW9QSSSK/p1701112957127489) as examples of what to write in the patch announcement.

View File

@ -278,6 +278,44 @@ git commit --signoff -m 'fix: Fixed broken thing. Fixes #1234'
git commit --signoff -m 'feat: Added a new feature. Fixes #1234'
```
### Creating Feature Descriptions
When adding a new feature, you must create a feature description file that will be used to generate new feature information when we do a feature release:
```bash
make feature-new
```
This will create a new feature description file in the `.features` directory which you must then edit to describe your feature.
By default, it uses your current branch name as the file name.
The name of the file doesn't get used by the tooling, it just needs to be unique to your feature so as not to collide on merge.
You can also specify a custom file name:
```bash
make feature-new FEATURE_FILENAME=my-awesome-feature
```
You must have an issue number to associate with your PR for features, and that must be placed in this file.
It seems reasonable that all new features are discussed in an issue before being developed.
There is a `Component` field which must match one of the fields in `hack/featuregen/components.go`
The feature file should be included in your PR to document your changes.
Before submitting, you can validate your feature file:
```bash
make features-validate
```
The `pre-commit` target will also do that.
You can also preview how your feature will appear in the release notes:
```bash
make features-preview
```
This command runs a dry-run of the release notes generation process, showing you how your feature will appear in the markdown file that will be used to generate the release notes.
## Troubleshooting
* When running `make pre-commit -B`, if you encounter errors like

View File

@ -72,6 +72,7 @@ This would normally be used to share locks across multiple clusters, but can als
To configure multiple controller locks, you need to set up a database (either PostgreSQL or MySQL) and [configure it](#database-configuration) in the workflow-controller-configmap ConfigMap.
All controllers which want to share locks must share all of these tables.
If you do not configure the database you will get an error if you try to use database locks.
A Workflow that uses a Workflow-level database mutex would look like this:
@ -112,7 +113,7 @@ The above example Workflow would need something like
INSERT INTO sync_limit (name, sizelimit) VALUES ('foo/bar', 3);
```
### Time
#### Cluster Time
The time on the clusters must be synchronized.
The time-stamps put into the database are used to determine if a controller is responsive, and if the times on the clusters differ this will not work correctly.
@ -294,6 +295,11 @@ You can also [restrict parallelism at the Controller-level](parallelism.md).
## Database configuration
In order to use multiple controller locks you need to configure the database in the workflow-controller-configmap ConfigMap.
This is done by setting up the [`SyncConfig` section](workflow-controller-configmap.md#syncconfig).
If you try to use multiple controller locks without configuring the database you will get an error.
### Limit Table
This table stores the maximum number of concurrent Workflows/Templates allowed for each semaphore.

View File

@ -5,13 +5,9 @@ the [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/#summar
## Upgrading to v3.7
### Deprecations
See also the list of [new features in 3.7](new-features.md).
Several Sprig functions have been deprecated in favor of Expr standard library alternatives. While these functions continue to work, they will be removed in a future version. See [available Sprig functions](variables.md#sprig-functions) for the complete list and [Expression language](variables.md#expression) for alternatives.
## Upgrading to v3.6
See also the list of [new features in 3.6](new-features.md).
For upgrades to older versions of Argo Workflows, please change to the documentation for the version of interest.
### Deprecations
@ -22,469 +18,20 @@ The following features are deprecated and will be removed in a future verison of
For more information on how to migrate these see [deprecations](deprecations.md)
### Fixed Server `--basehref` inconsistency
### Removed Docker Hub Image Publishing
For consistency, the Server now uses `--base-href` and `ARGO_BASE_HREF`.
Previously it was `--basehref` (no dash in between) and `ARGO_BASEHREF` (no underscore in between).
Pull Request [#14457](https://github.com/argoproj/argo-workflows/pull/14457) removed pushing to docker hub.
Argo Workflows exclusively uses quay.io now.
### Removed redundant Server environment variables
### Made Parameter Value Overriding Consistent
`ALLOWED_LINK_PROTOCOL` and `BASE_HREF` have been removed as redundant.
Use `ARGO_ALLOWED_LINK_PROTOCOL` and `ARGO_BASE_HREF` instead.
Pull Request [#14462](https://github.com/argoproj/argo-workflows/pull/14462) made parameter value overriding consistent.
This fix changes the priority in which the values are processed, meaning that a Workflow argument will now take priority.
For more details see the example provided [here](https://github.com/argoproj/argo-workflows/issues/14426)
### Legacy insecure pod patch fallback removed. ([#13100](https://github.com/argoproj/argo-workflows/pull/13100))
## Upgrading to 4.0
For the Emissary executor to work properly, you must set up RBAC. See [workflow RBAC](workflow-rbac.md)
### Logging levels
### Archived Workflows on PostgreSQL
To improve performance, this upgrade will automatically transform the column used to store archived workflows from type `json` to type `jsonb` on controller start-up.
This requires PostgreSQL version 9.4 or higher.
The migration involves obtaining an [ACCESS EXCLUSIVE](https://www.postgresql.org/docs/current/explicit-locking.html) lock on the `argo_archived_wokflows` table, which blocks all reads and writes until it has finished.
For the vast majority of users, we anticipate this will take less than a minute, but it could take much longer if you have a large number of workflows (100,000+), or the average workflow size is high (100KB+).
**If you don't fall into one of those two categories, or if minimizing downtime isn't important to you, then you don't need to read any further.**
Otherwise, you have a few options to keep downtime to a minimum:
1. If you don't actually need the archived workflows anymore, simply delete them with `delete from argo_archived_workflows` and the migration will complete almost instantly.
2. Using a variation of [Altering a Postgres Column with Minimal Downtime](https://making.lyst.com/2020/05/26/altering-a-postgres-column-with-minimal-downtime/), it's possible to manually perform this migration with nearly no downtime. This is a two-step process;
1. Before the upgrade, run the following queries to create a temporary `workflowjsonb` column and populate it with the existing data. This is safe to do whilst running version 3.5 because the column types are compatible.
```sql
-- Add temporary workflowjsonb column
ALTER TABLE argo_archived_workflows ADD COLUMN workflowjsonb JSONB NULL;
-- Add trigger to update workflowjsonb for each insert
CREATE OR REPLACE FUNCTION update_workflow_jsonb() RETURNS TRIGGER AS $BODY$
BEGIN
NEW.workflowjsonb=NEW.workflow;
RETURN NEW;
END
$BODY$ LANGUAGE PLPGSQL;
CREATE TRIGGER argo_archived_workflows_update_workflow_jsonb
BEFORE INSERT ON argo_archived_workflows
FOR EACH ROW EXECUTE PROCEDURE update_workflow_jsonb();
-- Backfill existing rows
UPDATE argo_archived_workflows SET workflowjsonb = workflow WHERE workflowjsonb IS NULL;
```
2. Once the above has completed and you're ready to proceed with the upgrade, run the following queries before starting the controller:
```sql
BEGIN;
LOCK TABLE argo_archived_workflows IN SHARE ROW EXCLUSIVE MODE;
DROP TRIGGER argo_archived_workflows_update_workflow_jsonb ON argo_archived_workflows;
ALTER TABLE argo_archived_workflows DROP COLUMN workflow;
ALTER TABLE argo_archived_workflows RENAME COLUMN workflowjsonb TO workflow;
ALTER TABLE argo_archived_workflows ADD CONSTRAINT workflow CHECK (workflow IS NOT NULL) NOT VALID;
COMMIT;
```
3. Version 3.6 retains compatibility with workflows stored as type `json`.
Therefore, it's currently safe to [skip the migration](workflow-archive.md#automatic-database-migration) by setting `skipMigration: true`.
This should only be used as an emergency stop-gap, as future versions may drop support for `json` without notice.
### Metrics changes
You can now retrieve metrics using the OpenTelemetry Protocol using the [OpenTelemetry collector](https://opentelemetry.io/docs/collector/), and this is the recommended mechanism.
These notes explain the differences in using the Prometheus `/metrics` endpoint to scrape metrics for a minimal effort upgrade. It is not recommended you follow this guide blindly, the new metrics have been introduced because they add value, and so they should be worth collecting and using.
#### New metrics
The following are new metrics:
* `cronworkflows_concurrencypolicy_triggered`
* `cronworkflows_triggered_total`
* `deprecated_feature`
* `is_leader`
* `k8s_request_duration`
* `pod_pending_count`
* `pods_total_count`
* `queue_duration`
* `queue_longest_running`
* `queue_retries`
* `queue_unfinished_work`
* `total_count`
* `version`
* `workflowtemplate_runtime`
* `workflowtemplate_triggered_total`
and can be disabled with
```yaml
metricsConfig: |
modifiers:
build_info:
disable: true
...
```
#### Renamed metrics
If you are using these metrics in your recording rules, dashboards, or alerts, you will need to update their names after the upgrade:
| Old name | New name |
|------------------------------------|------------------------------------|
| `argo_workflows_count` | `argo_workflows_gauge` |
| `argo_workflows_pods_count` | `argo_workflows_pods_gauge` |
| `argo_workflows_queue_depth_count` | `argo_workflows_queue_depth_gauge` |
| `log_messages` | `argo_workflows_log_messages` |
#### Custom metrics
Custom metric names and labels must be valid Prometheus and OpenTelemetry names now. This prevents the use of `:`, which was usable in earlier versions of workflows
Custom metrics, as defined by a workflow, could be defined as one type (say counter) in one workflow, and then as a histogram of the same name in a different workflow. This would work in 3.5 if the first usage of the metric had reached TTL and been deleted. This will no-longer work in 3.6, and custom metrics may not be redefined. It doesn't really make sense to change a metric in this way, and the OpenTelemetry SDK prevents you from doing so.
`metricsTTL` for histogram metrics is not functional as opentelemetry doesn't allow deletion of metrics. This is faked via asynchronous meters for the other metric types.
#### TLS
The Prometheus `/metrics` endpoint now has TLS enabled by default.
To disable this set `metricsConfig.secure` to `false`.
### Removed Swagger UI
The Swagger UI has been removed from the `/apidocs` page.
It has been replaced with a link to the [Swagger UI in the versioned documentation](swagger.md) and download links for the OpenAPI spec and JSON schema.
### JSON templating fix
When returning a map or array in an expression, you would get a Golang representation.
This now returns plain JSON.
### Added container name to workflow node error messages
Workflow node error messages are now prefixed with the container name.
If you are using [Conditional Retries](retries.md#conditional-retries), you may need to adjust your usage of `lastRetry.message` expressions or the `TRANSIENT_ERROR_PATTERN` environment variable.
### `ARGO_TEMPLATE` removed from main container
The environment variable `ARGO_TEMPLATE` which is an internal implementation detail is no longer available inside the `main` container of your workflow pods.
This is documented here as we are aware that some users of Argo Workflows use this.
## Upgrading to v3.5
There are no known breaking changes in this release.
Please file an issue if you encounter any unexpected problems after upgrading.
### Unified Workflows List API and UI
The Workflows List in the UI now shows Archived Workflows in the same page.
As such, the previously separate Archived Workflows page in the UI has been removed.
The List API `/api/v1/workflows` also returns both types of Workflows now.
This is not breaking as the Archived API still exists and was not removed, so this is an addition.
## Upgrading to v3.4
### Non-Emissary executors are removed. ([#7829](https://github.com/argoproj/argo-workflows/issues/7829))
Emissary executor is now the only supported executor. If you are using other executors, e.g. docker, k8sapi, pns, and kubelet, you need to
remove your `containerRuntimeExecutors` and `containerRuntimeExecutor` from your controller's configmap. If you have workflows that use different
executors with the label `workflows.argoproj.io/container-runtime-executor`, this is no longer supported and will not be effective.
### chore!: Remove dataflow pipelines from codebase. (#9071)
You are affected if you are using [dataflow pipelines](https://github.com/argoproj-labs/argo-dataflow) in the UI or via the `/pipelines` endpoint.
We no longer support dataflow pipelines and all relevant code has been removed.
### feat!: Add entrypoint lookup. Fixes #8344
Affected if:
* Using the Emissary executor.
* Used the `args` field for any entry in `images`.
This PR automatically looks up the command and entrypoint. The implementation for config look-up was incorrect (it
allowed you to specify `args` but not `entrypoint`). `args` has been removed to correct the behaviour.
If you are incorrectly configured, the workflow controller will error on start-up.
#### Actions
You don't need to configure images that use v2 manifests anymore, such as `argoproj/argosay:v2`.
You can remove them:
```bash
% docker manifest inspect argoproj/argosay:v2
# ...
"schemaVersion": 2,
# ...
```
For v1 manifests, such as `docker/whalesay:latest`:
```bash
% docker image inspect -f '{{.Config.Entrypoint}} {{.Config.Cmd}}' docker/whalesay:latest
[] [/bin/bash]
```
```yaml
images:
docker/whalesay:latest:
cmd: [/bin/bash]
```
### feat: Fail on invalid config. (#8295)
The workflow controller will error on start-up if incorrectly configured, rather than silently ignoring
mis-configuration.
```text
Failed to register watch for controller config map: error unmarshaling JSON: while decoding JSON: json: unknown field \"args\"
```
### feat: add indexes for improve archived workflow performance. (#8860)
This PR adds indexes to archived workflow tables. This change may cause a long time to upgrade if the user has a large table.
### feat: enhance artifact visualization (#8655)
For AWS users using S3: visualizing artifacts in the UI and downloading them now requires an additional "Action" to be configured in your S3 bucket policy: "ListBucket".
## Upgrading to v3.3
### [662a7295b](https://github.com/argoproj/argo-workflows/commit/662a7295b) feat: Replace `patch pod` with `create workflowtaskresult`. Fixes #3961 (#8000)
The PR changes the permissions that can be used by a workflow to remove the `pod patch` permission.
See [workflow RBAC](workflow-rbac.md) and [#8013](https://github.com/argoproj/argo-workflows/issues/3961).
### [06d4bf76f](https://github.com/argoproj/argo-workflows/commit/06d4bf76f) fix: Reduce agent permissions. Fixes #7986 (#7987)
The PR changes the permissions used by the agent to report back the outcome of HTTP template requests. The permission `patch workflowtasksets/status` replaces `patch workflowtasksets`, for example:
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: agent
rules:
- apiGroups:
- argoproj.io
resources:
- workflowtasksets/status
verbs:
- patch
```
Workflows running during any upgrade should be give both permissions.
See [#8013](https://github.com/argoproj/argo-workflows/issues/8013).
### feat!: Remove deprecated config flags
This PR removes the following configmap items -
* executorImage (use executor.image in configmap instead)
e.g.
Workflow controller configmap similar to the following one given below won't be valid anymore:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
data:
...
executorImage: argoproj/argocli:latest
...
```
From now and onwards, only provide the executor image in workflow controller as a command argument as shown below:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
data:
...
executor: |
image: argoproj/argocli:latest
...
```
* executorImagePullPolicy (use executor.imagePullPolicy in configmap instead)
e.g.
Workflow controller configmap similar to the following one given below won't be valid anymore:
```yaml
data:
...
executorImagePullPolicy: IfNotPresent
...
```
Change it as shown below:
```yaml
data:
...
executor: |
imagePullPolicy: IfNotPresent
...
```
* executorResources (use executor.resources in configmap instead)
e.g.
Workflow controller configmap similar to the following one given below won't be valid anymore:
```yaml
data:
...
executorResources:
requests:
cpu: 0.1
memory: 64Mi
limits:
cpu: 0.5
memory: 512Mi
...
```
Change it as shown below:
```yaml
data:
...
executor: |
resources:
requests:
cpu: 0.1
memory: 64Mi
limits:
cpu: 0.5
memory: 512Mi
...
```
### [fce82d572](https://github.com/argoproj/argo-workflows/commit/fce82d5727b89cfe49e8e3568fff40725bd43734) feat: Remove pod workers (#7837)
This PR removes pod workers from the code, the pod informer directly writes into the workflow queue. As a result the `--pod-workers` flag has been removed.
### [93c11a24ff](https://github.com/argoproj/argo-workflows/commit/93c11a24ff06049c2197149acd787f702e5c1f9b) feat: Add TLS to Metrics and Telemetry servers (#7041)
This PR adds the ability to send metrics over TLS with a self-signed certificate. In v3.5 this will be enabled by default, so it is recommended that users enable this functionality now.
### [0758eab11](https://github.com/argoproj/argo-workflows/commit/0758eab11decb8a1e741abef3e0ec08c48a69ab8) feat(server)!: Sync dispatch of webhook events by default
This is not expected to impact users.
Events dispatch in the Argo Server has been change from async to sync by default. This is so that errors are surfaced to
the client, rather than only appearing as logs or Kubernetes events. It is possible that response times under load are
too long for your client and you may prefer to revert this behaviour.
To revert this behaviour, restart Argo Server with `ARGO_EVENT_ASYNC_DISPATCH=true`. Make sure that `asyncDispatch=true`
is logged.
### [bd49c6303](https://github.com/argoproj/argo-workflows/commit/bd49c630328d30206a5c5b78cbc9a00700a28e7d) fix(artifact)!: default https to any URL missing a scheme. Fixes #6973
HTTPArtifact without a scheme will now defaults to https instead of http
user need to explicitly include a http prefix if they want to retrieve HTTPArtifact through http
### chore!: Remove the hidden flag `--verify` from `argo submit`
The hidden flag `--verify` has been removed from `argo submit`. This is a internal testing flag we don't need anymore.
## Upgrading to v3.2
### [e5b131a33](https://github.com/argoproj/argo-workflows/commit/e5b131a33) feat: Add template node to pod name. Fixes #1319 (#6712)
This add the template name to the pod name, to make it easier to understand which pod ran which step. This behaviour can be reverted by setting `POD_NAMES=v1` on the workflow controller.
### [be63efe89](https://github.com/argoproj/argo-workflows/commit/be63efe89) feat(executor)!: Change `argoexec` base image to alpine. Closes #5720 (#6006)
Changing from Debian to Alpine reduces the size of the `argoexec` image, resulting is faster starting workflow pods, and it also reduce the risk of security issues. There is not such thing as a free lunch. There maybe other behaviour changes we don't know of yet.
Some users found this change prevented workflow with very large parameters from running. See [#7586](https://github.com/argoproj/argo-workflows/issues/7586)
### [48d7ad3](https://github.com/argoproj/argo-workflows/commit/48d7ad36c14e4a50c50332d6decd543a1b732b69) chore: Remove onExit naming transition scaffolding code (#6297)
When upgrading from `<v2.12` to `>v3.2` workflows that are running at the time of the upgrade and have `onExit` steps _may_ experience the `onExit` step running twice. This is only applicable for workflows that began running before a `workflow-controller` upgrade and are still running after the upgrade is complete. This is only applicable for upgrading from `v2.12` or earlier directly to `v3.2` or later. Even under these conditions, duplicate work may not be experienced.
## Upgrading to v3.1
### [3fff791e4](https://github.com/argoproj/argo-workflows/commit/3fff791e4ef5b7e1de82ccb36cae327e8eb726f6) build!: Automatically add manifests to `v*` tags (#5880)
The manifests in the repository on the tag will no longer contain the image tag, instead they will contain `:latest`.
* You must not get your manifests from the Git repository, you must get them from the release notes.
* You must not use the `stable` tag. This is defunct, and will be removed in v3.1.
### [ab361667a](https://github.com/argoproj/argo-workflows/commit/ab361667a) feat(controller) Emissary executor. (#4925)
The Emissary executor is not a breaking change per-se, but it is brand new so we would not recommend you use it by default yet. Instead, we recommend you test it out on some workflows using [a `workflow-controller-configmap` configuration](https://github.com/argoproj/argo-workflows/blob/v3.1.0/docs/workflow-controller-configmap.yaml#L125).
```yaml
# Specifies the executor to use.
#
# You can use this to:
# * Tailor your executor based on your preference for security or performance.
# * Test out an executor without committing yourself to use it for every workflow.
#
# To find out which executor was actually use, see the `wait` container logs.
#
# The list is in order of precedence; the first matching executor is used.
# This has precedence over `containerRuntimeExecutor`.
containerRuntimeExecutors: |
- name: emissary
selector:
matchLabels:
workflows.argoproj.io/container-runtime-executor: emissary
```
### [be63efe89](https://github.com/argoproj/argo-workflows/commit/e6fa41a) feat(controller): Expression template tags. Resolves #4548 & #1293 (#5115)
This PR introduced a new expression syntax know as "expression tag template". A user has reported that this does not
always play nicely with the `when` condition syntax (Goevaluate).
This can be resolved using a single quote in your when expression:
```yaml
when: "'{{inputs.parameters.should-print}}' != '2021-01-01'"
```
[Learn more](https://github.com/argoproj/argo-workflows/issues/6314)
## Upgrading to v3.0
### [defbd600e](https://github.com/argoproj/argo-workflows/commit/defbd600e37258c8cdf30f64d4da9f4563eb7901) fix: Default ARGO_SECURE=true. Fixes #5607 (#5626)
The server now starts with TLS enabled by default if a key is available. The original behaviour can be configured with `--secure=false`.
If you have an ingress, you may need to add the appropriate annotations:(varies by ingress):
```yaml
alb.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
```
### [01d310235](https://github.com/argoproj/argo-workflows/commit/01d310235a9349e6d552c758964cc2250a9e9616) chore(server)!: Required authentication by default. Resolves #5206 (#5211)
To login to the user interface, you must provide a login token. The original behaviour can be configured with `--auth-mode=server`.
### [f31e0c6f9](https://github.com/argoproj/argo-workflows/commit/f31e0c6f92ec5e383d2f32f57a822a518cbbef86) chore!: Remove deprecated fields (#5035)
Some fields that were deprecated in early 2020 have been removed.
| Field | Action |
|---|---|
| template.template and template.templateRef | The workflow spec must be changed to use steps or DAG, otherwise the workflow will error. |
| spec.ttlSecondsAfterFinished | change to `spec.ttlStrategy.secondsAfterCompletion`, otherwise the workflow will not be garbage collected as expected. |
To find impacted workflows:
```bash
kubectl get wf --all-namespaces -o yaml | grep templateRef
kubectl get wf --all-namespaces -o yaml | grep ttlSecondsAfterFinished
```
### [c8215f972](https://github.com/argoproj/argo-workflows/commit/c8215f972502435e6bc5b232823ecb6df919f952) feat(controller)!: Key-only artifacts. Fixes #3184 (#4618)
This change is not breaking per-se, but many users do not appear to aware of [artifact repository ref](artifact-repository-ref.md), so check your usage of that feature if you have problems.
The logging levels available have been reduced to `debug`, `info`, `warn` and `error`.
Other levels will be mapped to their equivalent if you use them, although they were previously undocumented.

View File

@ -113,9 +113,9 @@ Extract data from JSON:
jsonpath(inputs.parameters.json, '$.some.path')
```
#### Sprig Functions
You can also use [Sprig functions](http://masterminds.github.io/sprig/):
You can also use a curated set of [Sprig functions](http://masterminds.github.io/sprig/):
Trim a string:
```text
sprig.trim(inputs.parameters['my-string-param'])
@ -126,26 +126,6 @@ sprig.trim(inputs.parameters['my-string-param'])
For example, if `int` is used on an invalid value, it returns `0`.
Please review the Sprig documentation to understand which functions raise errors and which do not.
Available Sprig functions include:
* Random/crypto: `randAlpha`, `randAlphaNum`, `randAscii`, `randNumeric`, `randBytes`, `randInt`, `uuidv4`
* Regex helpers: `regexFindAll`, `regexSplit`, `regexReplaceAll`, `regexReplaceAllLiteral`, `regexQuoteMeta`
* Text formatting: `wrap`, `wrapWith`, `nospace`, `title`, `untitle`, `plural`, `initials`, `snakecase`, `camelcase`, `kebabcase`, `swapcase`, `shuffle`, `trunc`
* Dictionary and reflection helpers: `dict`, `set`, `deepCopy`, `merge`, `mergeOverwrite`, `mergeRecursive`, `dig`, `pluck`, `typeIsLike`, `kindIs`, `typeOf`
* Path/URL helpers: `base`, `dir`, `ext`, `clean`, `urlParse`, `urlJoin`
* SemVer helpers: `semver`, `semverCompare`
* Flow control: `fail`, `required`
* Encoding/YAML: `b32enc`, `b32dec`, `toYaml`, `fromYaml`
For complete documentation on these functions, refer to the [Sprig documentation](http://masterminds.github.io/sprig/).
## Reference
### All Templates

View File

@ -155,6 +155,79 @@ spec:
Consider parameterizing your S3 keys by {{workflow.uid}}, etc (as shown in the example above) if there's a possibility that you could have concurrent Workflows of the same spec. This would be to avoid a scenario in which the artifact from one Workflow is being deleted while the same S3 key is being generated for a different Workflow.
In the case of having a whole directory as S3 key, please pay attention to the key value. Here are two examples:
- (A) When changing the default archive option to none, it is important that it ends with a "/". Otherwise, the directory will be created in S3 but the GC pod won't be able to remove it.
- (B) When keeping the default archive option to `.tgz`, in this case, it is important that it does NOT end with "/". Otherwise, Argo will fail to create the archive file.
Example (A) without packaging as `.tgz`
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: artifact-gc-dir-key-
spec:
entrypoint: main
artifactGC:
strategy: OnWorkflowDeletion # default Strategy set here applies to all Artifacts by default
templates:
- name: main
container:
image: argoproj/argosay:v2
command:
- sh
- -c
args:
- |
mkdir /tmp/tmp-directory
echo "can throw this away" > /tmp/tmp-directory/delete-this.txt
echo "and this too" > /tmp/tmp-directory/delete-this-too.txt
outputs:
artifacts:
- name: temporary-artifact
path: /tmp/tmp-directory
archive:
# Avoid having tgz file.
none: {}
s3:
key: "{{workflow.name}}/directory/" # IMPORTANT! ends with "/"
```
Example (B) with packaging as `.tgz`
```yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: artifact-gc-dir-key-
spec:
entrypoint: main
artifactGC:
strategy: OnWorkflowDeletion # default Strategy set here applies to all Artifacts by default
templates:
- name: main
container:
image: argoproj/argosay:v2
command:
- sh
- -c
args:
- |
mkdir /tmp/tmp-directory
echo "can throw this away" > /tmp/tmp-directory/delete-this.txt
echo "and this too" > /tmp/tmp-directory/delete-this-too.txt
outputs:
artifacts:
- name: temporary-artifact
path: /tmp/tmp-directory
archive:
tar:
compressionLevel: 1
s3:
key: "{{workflow.name}}/archive.tgz" # IMPORTANT! must not end with "/"
```
### Service Accounts and Annotations
Does your S3 bucket require you to run with a special Service Account or IAM Role Annotation? You can either use the same ones you use for creating artifacts or generate new ones that are specific for deletion permission. Generally users will probably just have a single Service Account or IAM Role to apply to all artifacts for the Workflow, but you can also customize on the artifact level if you need that:

View File

@ -1,8 +1,8 @@
# Workflow Controller Config Map
# Workflow Controller ConfigMap
## Introduction
The Workflow Controller Config Map is used to set controller-wide settings.
The Workflow Controller ConfigMap is used to set controller-wide settings.
For a detailed example, please see [`workflow-controller-configmap.yaml`](./workflow-controller-configmap.yaml).
@ -60,3 +60,272 @@ data: # "config: |" key is optional in 2.7+!
name: my-s3-credentials
key: secretKey
```
## Config
Config contains the root of the configuration settings for the workflow controller as read from the ConfigMap called workflow-controller-configmap
### Fields
| Field Name | Field Type | Description |
|----------------------------|-------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `NodeEvents` | [`NodeEvents`](#nodeevents) | NodeEvents configures how node events are emitted |
| `WorkflowEvents` | [`WorkflowEvents`](#workflowevents) | WorkflowEvents configures how workflow events are emitted |
| `Executor` | [`apiv1.Container`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#container-v1-core) | Executor holds container customizations for the executor to use when running pods |
| `MainContainer` | [`apiv1.Container`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#container-v1-core) | MainContainer holds container customization for the main container |
| `KubeConfig` | [`KubeConfig`](#kubeconfig) | KubeConfig specifies a kube config file for the wait & init containers |
| `ArtifactRepository` | [`wfv1.ArtifactRepository`](fields.md#artifactrepository) | ArtifactRepository contains the default location of an artifact repository for container artifacts |
| `Namespace` | `string` | Namespace is a label selector filter to limit the controller's watch to a specific namespace |
| `InstanceID` | `string` | InstanceID is a label selector to limit the controller's watch to a specific instance. It contains an arbitrary value that is carried forward into its pod labels, under the key workflows.argoproj.io/controller-instanceid, for the purposes of workflow segregation. This enables a controller to only receive workflow and pod events that it is interested about, in order to support multiple controllers in a single cluster, and ultimately allows the controller itself to be bundled as part of a higher level application. If omitted, the controller watches workflows and pods that *are not* labeled with an instance id. |
| `MetricsConfig` | [`MetricsConfig`](#metricsconfig) | MetricsConfig specifies configuration for metrics emission. Metrics are enabled and emitted on localhost:9090/metrics by default. |
| `TelemetryConfig` | [`MetricsConfig`](#metricsconfig) | TelemetryConfig specifies configuration for telemetry emission. Telemetry is enabled and emitted in the same endpoint as metrics by default, but can be overridden using this config. |
| `Parallelism` | `int` | Parallelism limits the max total parallel workflows that can execute at the same time |
| `NamespaceParallelism` | `int` | NamespaceParallelism limits the max workflows that can execute at the same time in a namespace |
| `ResourceRateLimit` | [`ResourceRateLimit`](#resourceratelimit) | ResourceRateLimit limits the rate at which pods are created |
| `Persistence` | [`PersistConfig`](#persistconfig) | Persistence contains the workflow persistence DB configuration |
| `Links` | `Array<`[`Link`](fields.md#link)`>` | Links to related apps. |
| `Columns` | `Array<`[`Column`](fields.md#column)`>` | Columns are custom columns that will be exposed in the Workflow List View. |
| `WorkflowDefaults` | [`wfv1.Workflow`](fields.md#workflow) | WorkflowDefaults are values that will apply to all Workflows from this controller, unless overridden on the Workflow-level |
| `PodSpecLogStrategy` | [`PodSpecLogStrategy`](#podspeclogstrategy) | PodSpecLogStrategy enables the logging of podspec on controller log. |
| `PodGCGracePeriodSeconds` | `int64` | PodGCGracePeriodSeconds specifies the duration in seconds before a terminating pod is forcefully killed. Value must be non-negative integer. A zero value indicates that the pod will be forcefully terminated immediately. Defaults to the Kubernetes default of 30 seconds. |
| `PodGCDeleteDelayDuration` | [`metav1.Duration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta) | PodGCDeleteDelayDuration specifies the duration before pods in the GC queue get deleted. Value must be non-negative. A zero value indicates that the pods will be deleted immediately. Defaults to 5 seconds. |
| `WorkflowRestrictions` | [`WorkflowRestrictions`](#workflowrestrictions) | WorkflowRestrictions restricts the controller to executing Workflows that meet certain restrictions |
| `InitialDelay` | [`metav1.Duration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta) | Adds configurable initial delay (for K8S clusters with mutating webhooks) to prevent workflow getting modified by MWC. |
| `Images` | `Map<string,`[`Image`](#image)`>` | The command/args for each image, needed when the command is not specified and the emissary executor is used. https://argo-workflows.readthedocs.io/en/latest/workflow-executors/#emissary-emissary |
| `RetentionPolicy` | [`RetentionPolicy`](#retentionpolicy) | Workflow retention by number of workflows |
| `NavColor` | `string` | NavColor is an ui navigation bar background color |
| `SSO` | [`SSOConfig`](#ssoconfig) | SSO in settings for single-sign on |
| `Synchronization` | [`SyncConfig`](#syncconfig) | Synchronization via databases config |
## NodeEvents
NodeEvents configures how node events are emitted
### Fields
| Field Name | Field Type | Description |
|-------------|------------|----------------------------------------------------------------------------------------------------------------------|
| `Enabled` | `bool` | Enabled controls whether node events are emitted |
| `SendAsPod` | `bool` | SendAsPod emits events as if from the Pod instead of the Workflow with annotations linking the event to the Workflow |
## WorkflowEvents
WorkflowEvents configures how workflow events are emitted
### Fields
| Field Name | Field Type | Description |
|------------|------------|------------------------------------------------------|
| `Enabled` | `bool` | Enabled controls whether workflow events are emitted |
## KubeConfig
KubeConfig is used for wait & init sidecar containers to communicate with a k8s apiserver by a outofcluster method, it is used when the workflow controller is in a different cluster with the workflow workloads
### Fields
| Field Name | Field Type | Description |
|--------------|------------|------------------------------------------------------------------------------------|
| `SecretName` | `string` | SecretName of the kubeconfig secret may not be empty if kuebConfig specified |
| `SecretKey` | `string` | SecretKey of the kubeconfig in the secret may not be empty if kubeConfig specified |
| `VolumeName` | `string` | VolumeName of kubeconfig, default to 'kubeconfig' |
| `MountPath` | `string` | MountPath of the kubeconfig secret, default to '/kube/config' |
## MetricsConfig
MetricsConfig defines a config for a metrics server
### Fields
| Field Name | Field Type | Description |
|-----------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `Enabled` | `bool` | Enabled controls metric emission. Default is true, set "enabled: false" to turn off |
| `DisableLegacy` | `bool` | DisableLegacy turns off legacy metrics DEPRECATED: Legacy metrics are now removed, this field is ignored |
| `MetricsTTL` | `TTL` (time.Duration forces you to specify in millis, and does not support days see https://stackoverflow.com/questions/48050945/how-to-unmarshal-json-into-durations (underlying type: time.Duration)) | MetricsTTL sets how often custom metrics are cleared from memory |
| `Path` | `string` | Path is the path where metrics are emitted. Must start with a "/". Default is "/metrics" |
| `Port` | `int` | Port is the port where metrics are emitted. Default is "9090" |
| `IgnoreErrors` | `bool` | IgnoreErrors is a flag that instructs prometheus to ignore metric emission errors |
| `Secure` | `bool` | Secure is a flag that starts the metrics servers using TLS, defaults to true |
| `Modifiers` | `Map<string,`[`MetricModifier`](#metricmodifier)`>` | Modifiers configure metrics by name |
| `Temporality` | `MetricsTemporality` (MetricsTemporality defines the temporality of OpenTelemetry metrics (underlying type: string)) | Temporality of the OpenTelemetry metrics. Enum of Cumulative or Delta, defaulting to Cumulative. No effect on Prometheus metrics, which are always Cumulative. |
## MetricModifier
MetricModifier are modifiers for an individual named metric to change their behaviour
### Fields
| Field Name | Field Type | Description |
|----------------------|------------------|--------------------------------------------------------------------------------------------------------------|
| `Disabled` | `bool` | Disabled disables the emission of this metric completely |
| `DisabledAttributes` | `Array<string>` | DisabledAttributes lists labels for this metric to remove that attributes to save on cardinality |
| `HistogramBuckets` | `Array<float64>` | HistogramBuckets allow configuring of the buckets used in a histogram Has no effect on non-histogram buckets |
## ResourceRateLimit
### Fields
| Field Name | Field Type | Description |
|------------|------------|--------------------------------------------------------|
| `Limit` | `float64` | Limit is the maximum rate at which pods can be created |
| `Burst` | `int` | Burst allows temporary spikes above the limit |
## PersistConfig
PersistConfig contains workflow persistence configuration
### Fields
| Field Name | Field Type | Description |
|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------|
| `PostgreSQL` | [`PostgreSQLConfig`](#postgresqlconfig) | PostgreSQL configuration for PostgreSQL database, don't use MySQL at the same time |
| `MySQL` | [`MySQLConfig`](#mysqlconfig) | MySQL configuration for MySQL database, don't use PostgreSQL at the same time |
| `ConnectionPool` | [`ConnectionPool`](#connectionpool) | Pooled connection settings for all types of database connections |
| `NodeStatusOffload` | `bool` | NodeStatusOffload saves node status only to the persistence DB to avoid the 1MB limit in etcd |
| `Archive` | `bool` | Archive completed and Workflows to persistence so you can access them after they're removed from kubernetes |
| `ArchiveLabelSelector` | [`metav1.LabelSelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#labelselector-v1-meta) | ArchiveLabelSelector holds LabelSelector to determine which Workflows to archive |
| `ArchiveTTL` | `TTL` (time.Duration forces you to specify in millis, and does not support days see https://stackoverflow.com/questions/48050945/how-to-unmarshal-json-into-durations (underlying type: time.Duration)) | ArchiveTTL is the time to live for archived Workflows |
| `ClusterName` | `string` | ClusterName is the name of the cluster (or technically controller) for the persistence database |
| `SkipMigration` | `bool` | SkipMigration skips database migration even if needed |
## PostgreSQLConfig
PostgreSQLConfig contains PostgreSQL-specific database configuration
### Fields
| Field Name | Field Type | Description |
|------------------|-----------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------|
| `Host` | `string` | Host is the database server hostname |
| `Port` | `int` | Port is the database server port |
| `Database` | `string` | Database is the name of the database to connect to |
| `TableName` | `string` | TableName is the name of the table to use, must be set |
| `UsernameSecret` | [`apiv1.SecretKeySelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | UsernameSecret references a secret containing the database username |
| `PasswordSecret` | [`apiv1.SecretKeySelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | PasswordSecret references a secret containing the database password |
| `SSL` | `bool` | SSL enables SSL connection to the database |
| `SSLMode` | `string` | SSLMode specifies the SSL mode (disable, require, verify-ca, verify-full) |
## MySQLConfig
MySQLConfig contains MySQL-specific database configuration
### Fields
| Field Name | Field Type | Description |
|------------------|-----------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------|
| `Host` | `string` | Host is the database server hostname |
| `Port` | `int` | Port is the database server port |
| `Database` | `string` | Database is the name of the database to connect to |
| `TableName` | `string` | TableName is the name of the table to use, must be set |
| `UsernameSecret` | [`apiv1.SecretKeySelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | UsernameSecret references a secret containing the database username |
| `PasswordSecret` | [`apiv1.SecretKeySelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | PasswordSecret references a secret containing the database password |
| `Options` | `Map<string,string>` | Options contains additional MySQL connection options |
## ConnectionPool
ConnectionPool contains database connection pool settings
### Fields
| Field Name | Field Type | Description |
|-------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
| `MaxIdleConns` | `int` | MaxIdleConns sets the maximum number of idle connections in the pool |
| `MaxOpenConns` | `int` | MaxOpenConns sets the maximum number of open connections to the database |
| `ConnMaxLifetime` | `TTL` (time.Duration forces you to specify in millis, and does not support days see https://stackoverflow.com/questions/48050945/how-to-unmarshal-json-into-durations (underlying type: time.Duration)) | ConnMaxLifetime sets the maximum amount of time a connection may be reused |
## PodSpecLogStrategy
PodSpecLogStrategy contains the configuration for logging the pod spec in controller log for debugging purpose
### Fields
| Field Name | Field Type | Description |
|-------------|------------|-------------|
| `FailedPod` | `bool` | - |
| `AllPods` | `bool` | - |
## WorkflowRestrictions
WorkflowRestrictions contains restrictions for workflow execution
### Fields
| Field Name | Field Type | Description |
|-----------------------|----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------|
| `TemplateReferencing` | `TemplateReferencing` (TemplateReferencing defines how templates can be referenced in workflows (underlying type: string)) | TemplateReferencing controls how templates can be referenced |
## Image
Image contains command and entrypoint configuration for container images
### Fields
| Field Name | Field Type | Description |
|--------------|-----------------|-----------------------------------------------|
| `Entrypoint` | `Array<string>` | Entrypoint overrides the container entrypoint |
| `Cmd` | `Array<string>` | Cmd overrides the container command |
## RetentionPolicy
Workflow retention by number of workflows
### Fields
| Field Name | Field Type | Description |
|-------------|------------|----------------------------------------------------------|
| `Completed` | `int` | Completed is the number of completed Workflows to retain |
| `Failed` | `int` | Failed is the number of failed Workflows to retain |
| `Errored` | `int` | Errored is the number of errored Workflows to retain |
## SSOConfig
SSOConfig contains single sign-on configuration settings
### Fields
| Field Name | Field Type | Description |
|------------------------|-----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
| `Issuer` | `string` | Issuer is the OIDC issuer URL |
| `IssuerAlias` | `string` | IssuerAlias is an optional alias for the issuer |
| `ClientID` | [`apiv1.SecretKeySelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | ClientID references a secret containing the OIDC client ID |
| `ClientSecret` | [`apiv1.SecretKeySelector`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | ClientSecret references a secret containing the OIDC client secret |
| `RedirectURL` | `string` | RedirectURL is the OIDC redirect URL |
| `RBAC` | [`RBACConfig`](#rbacconfig) | RBAC contains role-based access control settings |
| `Scopes` | `Array<string>` | additional scopes (on top of "openid") |
| `SessionExpiry` | [`metav1.Duration`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta) | SessionExpiry specifies how long user sessions last |
| `CustomGroupClaimName` | `string` | CustomGroupClaimName will override the groups claim name |
| `UserInfoPath` | `string` | UserInfoPath specifies the path to user info endpoint |
| `InsecureSkipVerify` | `bool` | InsecureSkipVerify skips TLS certificate verification |
| `FilterGroupsRegex` | `Array<string>` | FilterGroupsRegex filters groups using regular expressions |
## RBACConfig
RBACConfig contains role-based access control configuration
### Fields
| Field Name | Field Type | Description |
|------------|------------|------------------------------------------|
| `Enabled` | `bool` | Enabled controls whether RBAC is enabled |
## SyncConfig
SyncConfig contains synchronization configuration for database locks (semaphores and mutexes)
### Fields
| Field Name | Field Type | Description |
|------------------------------|-----------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `PostgreSQL` | [`PostgreSQLConfig`](#postgresqlconfig) | PostgreSQL configuration for PostgreSQL database, don't use MySQL at the same time |
| `MySQL` | [`MySQLConfig`](#mysqlconfig) | MySQL configuration for MySQL database, don't use PostgreSQL at the same time |
| `ConnectionPool` | [`ConnectionPool`](#connectionpool) | Pooled connection settings for all types of database connections |
| `ControllerName` | `string` | ControllerName sets a unique name for this controller instance |
| `SkipMigration` | `bool` | SkipMigration skips database migration if needed |
| `LimitTableName` | `string` | LimitTableName customizes the table name for semaphore limits, if not set, the default value is "sync_limit" |
| `StateTableName` | `string` | StateTableName customizes the table name for current lock state, if not set, the default value is "sync_state" |
| `ControllerTableName` | `string` | ControllerTableName customizes the table name for controller heartbeats, if not set, the default value is "sync_controller" |
| `LockTableName` | `string` | LockTableName customizes the table name for lock coordination data, if not set, the default value is "sync_lock" |
| `PollSeconds` | `int` | PollSeconds specifies how often to check for lock changes, if not set, the default value is 5 seconds |
| `HeartbeatSeconds` | `int` | HeartbeatSeconds specifies how often to update controller heartbeat, if not set, the default value is 60 seconds |
| `InactiveControllerSeconds` | `int` | InactiveControllerSeconds specifies when to consider a controller dead, if not set, the default value is 300 seconds |
| `SemaphoreLimitCacheSeconds` | `int64` | SemaphoreLimitCacheSeconds specifies the duration in seconds before the workflow controller will re-fetch the limit for a semaphore from its associated data source. Defaults to 0 seconds (re-fetch every time the semaphore is checked). |

View File

@ -7,6 +7,8 @@ import (
"os/user"
"path/filepath"
"github.com/argoproj/argo-workflows/v3/util/logging"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@ -55,6 +57,7 @@ func main() {
// submit the hello world workflow
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
createdWf, err := wfClient.Create(ctx, &helloWorldWorkflow, metav1.CreateOptions{})
checkErr(err)
fmt.Printf("Workflow %s submitted\n", createdWf.Name)
@ -62,7 +65,7 @@ func main() {
// wait for the workflow to complete
fieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", createdWf.Name))
watchIf, err := wfClient.Watch(ctx, metav1.ListOptions{FieldSelector: fieldSelector.String(), TimeoutSeconds: ptr.To(int64(180))})
errors.CheckError(err)
errors.CheckError(ctx, err)
defer watchIf.Stop()
for next := range watchIf.ResultChan() {
wf, ok := next.Object.(*wfv1.Workflow)

13
go.mod
View File

@ -1,6 +1,6 @@
module github.com/argoproj/argo-workflows/v3
go 1.24.2
go 1.24.4
require (
cloud.google.com/go/storage v1.55.0
@ -26,6 +26,7 @@ require (
github.com/go-git/go-git/v5 v5.16.0
github.com/go-jose/go-jose/v3 v3.0.4
github.com/go-openapi/jsonreference v0.21.0
github.com/go-playground/webhooks/v6 v6.4.0
github.com/go-sql-driver/mysql v1.9.2
github.com/gogo/protobuf v1.3.2
github.com/golang/protobuf v1.5.4
@ -46,7 +47,6 @@ require (
github.com/robfig/cron/v3 v3.0.1
github.com/sethvargo/go-limiter v1.0.0
github.com/sirupsen/logrus v1.9.3
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966
github.com/soheilhy/cmux v0.1.5
github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6
@ -66,6 +66,7 @@ require (
go.opentelemetry.io/otel/metric v1.36.0
go.opentelemetry.io/otel/sdk v1.36.0
go.opentelemetry.io/otel/sdk/metric v1.36.0
go.uber.org/mock v0.5.2
golang.org/x/crypto v0.38.0
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b
golang.org/x/oauth2 v0.30.0
@ -88,6 +89,8 @@ require (
zombiezen.com/go/sqlite v1.4.2
)
require github.com/pkg/errors v0.9.1 // indirect
require (
cel.dev/expr v0.24.0 // indirect
cloud.google.com/go/auth v0.16.1 // indirect
@ -125,7 +128,7 @@ require (
github.com/go-jose/go-jose/v4 v4.1.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
@ -161,7 +164,7 @@ require (
github.com/olekukonko/tablewriter v1.0.7 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/pjbgf/sha1cd v0.3.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_model v0.6.2 // indirect
@ -263,7 +266,6 @@ require (
github.com/go-openapi/swag v0.23.1 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/mock v1.6.0
github.com/google/btree v1.1.3 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20250521000321-4eb8c4d84ef0 // indirect
@ -308,7 +310,6 @@ require (
github.com/opencontainers/image-spec v1.1.1 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/rs/xid v1.6.0 // indirect

19
go.sum
View File

@ -326,6 +326,8 @@ github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
github.com/go-playground/webhooks/v6 v6.4.0 h1:KLa6y7bD19N48rxJDHM0DpE3T4grV7GxMy1b/aHMWPY=
github.com/go-playground/webhooks/v6 v6.4.0/go.mod h1:5lBxopx+cAJiBI4+kyRbuHrEi+hYRDdRHuRR4Ya5Ums=
github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU=
github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
@ -333,8 +335,8 @@ github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8Wd
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe h1:zn8tqiUbec4wR94o7Qj3LZCAT6uGobhEgnDRg6isG5U=
github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
@ -353,8 +355,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
@ -737,8 +737,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
@ -833,7 +831,6 @@ github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcm
github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
@ -880,6 +877,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
@ -933,7 +932,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
@ -958,7 +956,6 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@ -982,7 +979,6 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
@ -1015,9 +1011,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1088,7 +1082,6 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=

View File

@ -11,13 +11,14 @@ import (
"github.com/upper/db/v4"
mysqladp "github.com/upper/db/v4/adapter/mysql"
postgresqladp "github.com/upper/db/v4/adapter/postgresql"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/uuid"
"github.com/argoproj/argo-workflows/v3/persist/sqldb"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/instanceid"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
var session db.Session
@ -47,7 +48,9 @@ func NewMigrateCommand() *cobra.Command {
Use: "migrate",
Short: "Force DB migration for given cluster/table",
RunE: func(cmd *cobra.Command, args []string) error {
return sqldb.Migrate(context.Background(), session, cluster, table)
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
return sqldb.Migrate(ctx, session, cluster, table)
},
}
migrationCmd.Flags().StringVar(&cluster, "cluster", "default", "Cluster name")
@ -73,7 +76,9 @@ func NewFakeDataCommand() *cobra.Command {
wf := randomizeWorkflow(wfTmpl, namespaces)
cluster := clusters[rand.Intn(len(clusters))]
wfArchive := sqldb.NewWorkflowArchive(session, cluster, "", instanceIDService)
if err := wfArchive.ArchiveWorkflow(wf); err != nil {
ctx := context.Background()
ctx = logging.WithLogger(ctx, logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
if err := wfArchive.ArchiveWorkflow(ctx, wf); err != nil {
return err
}
}

469
hack/docs/configdoc.go Normal file
View File

@ -0,0 +1,469 @@
package main
import (
_ "embed"
"fmt"
"go/ast"
"go/parser"
"go/token"
"log"
"os"
"path/filepath"
"regexp"
"strings"
md "github.com/nao1215/markdown"
)
// Set this to the root of the repo
const configDir = "config"
//go:embed workflow-controller-configmap.md
var header string
const outputFile = "docs/workflow-controller-configmap.md"
// visited tracks which types we've already documented
var visited = map[string]bool{}
// typeSpecs maps type name to its *ast.TypeSpec
var typeSpecs = map[string]*ast.TypeSpec{}
// typeComments maps type name to its documentation comment
var typeComments = map[string]*ast.CommentGroup{}
// documentedTypes tracks which types we will document in this file
var documentedTypes = map[string]bool{}
func generateConfigDocs() {
fset := token.NewFileSet()
// Parse all .go files in the config directory
err := filepath.Walk(configDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Skip test files and non-go files
if !strings.HasSuffix(path, ".go") || strings.HasSuffix(path, "_test.go") {
return nil
}
f, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
if err != nil {
log.Printf("failed to parse %s: %v", path, err)
return nil // Continue with other files
}
// Collect all type specs in this file
for _, decl := range f.Decls {
gd, ok := decl.(*ast.GenDecl)
if !ok {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if ok {
typeSpecs[ts.Name.Name] = ts
// Associate the GenDecl's comment with this type
if gd.Doc != nil {
typeComments[ts.Name.Name] = gd.Doc
}
documentedTypes[ts.Name.Name] = true
}
}
}
return nil
})
if err != nil {
log.Fatalf("failed to walk config directory: %v", err)
}
f, err := os.Create(outputFile)
if err != nil {
log.Fatalf("failed to create output file: %v", err)
}
defer f.Close()
// Create markdown builder starting with the header
builder := md.NewMarkdown(f).PlainText(header)
if ts, ok := typeSpecs["Config"]; ok {
writeStructDoc(builder, ts, "Config")
} else {
log.Fatalf("Config struct not found in %s directory", configDir)
}
err = builder.Build()
if err != nil {
log.Fatalf("failed to build markdown: %v", err)
}
fmt.Printf("Wrote %s\n", outputFile)
}
func writeStructDoc(builder *md.Markdown, ts *ast.TypeSpec, name string) {
if visited[name] {
return
}
visited[name] = true
// Check if this is actually a struct type first
st, ok := ts.Type.(*ast.StructType)
if !ok {
// This is not a struct (e.g., type alias like TTL or MetricsTemporality)
// Don't create a section for it
return
}
builder.H2(name)
builder.PlainText("")
// Check for comment from GenDecl first, then TypeSpec
if comment, ok := typeComments[name]; ok && comment != nil {
builder.PlainText(normalizeComment(comment.Text()))
builder.PlainText("")
} else if ts.Doc != nil {
builder.PlainText(normalizeComment(ts.Doc.Text()))
builder.PlainText("")
}
builder.H3("Fields")
builder.PlainText("")
// Prepare table data
headers := []string{"Field Name", "Field Type", "Description"}
var rows [][]string
// Collect types to recurse into after processing all fields
var typesToRecurse []string
for _, field := range st.Fields.List {
// Handle embedded fields by inlining their fields
if len(field.Names) == 0 {
// Embedded field - inline its fields if it's a struct
typeStr := exprString(field.Type)
baseType := baseTypeName(typeStr)
if embeddedTS, ok := typeSpecs[baseType]; ok {
if embeddedST, ok := embeddedTS.Type.(*ast.StructType); ok {
// Recursively process embedded struct fields
for _, embeddedField := range embeddedST.Fields.List {
processField(embeddedField, &rows, &typesToRecurse)
}
} else {
// Not a struct, treat as regular field with type name
processFieldAsRegular(field, typeStr, &rows, &typesToRecurse)
}
} else {
// Type not found in our specs, treat as regular field
processFieldAsRegular(field, typeStr, &rows, &typesToRecurse)
}
} else {
// Named field - process normally
processField(field, &rows, &typesToRecurse)
}
}
// Add the table to the builder
builder.CustomTable(md.TableSet{
Header: headers,
Rows: rows,
}, md.TableOptions{AutoWrapText: false})
// Now recurse into all the collected types
for _, baseType := range typesToRecurse {
if tts, ok := typeSpecs[baseType]; ok && !visited[baseType] {
writeStructDoc(builder, tts, baseType)
}
}
}
// processField handles processing a single field (named or embedded struct field)
func processField(field *ast.Field, rows *[][]string, typesToRecurse *[]string) {
// Get field name(s)
var names []string
if len(field.Names) == 0 {
// This shouldn't happen in processField, but handle gracefully
names = []string{exprString(field.Type)}
} else {
for _, n := range field.Names {
names = append(names, n.Name)
}
}
typeStr := exprString(field.Type)
processFieldWithNames(field, typeStr, names, rows, typesToRecurse)
}
// processFieldAsRegular processes an embedded field as if it were a regular named field
func processFieldAsRegular(field *ast.Field, typeStr string, rows *[][]string, typesToRecurse *[]string) {
// Use the type name as the field name for embedded non-structs
baseType := baseTypeName(typeStr)
names := []string{baseType}
processFieldWithNames(field, typeStr, names, rows, typesToRecurse)
}
// processFieldWithNames handles the common logic for processing fields with given names
func processFieldWithNames(field *ast.Field, typeStr string, names []string, rows *[][]string, typesToRecurse *[]string) {
linkedTypeStr := createTypeLink(typeStr)
// Get documentation with fallback
doc := "-"
if field.Doc != nil {
doc = normalizeComment(field.Doc.Text())
} else if field.Comment != nil {
doc = normalizeComment(field.Comment.Text())
}
if doc == "" {
doc = "-"
}
// Add table rows for all field names
for _, fname := range names {
*rows = append(*rows, []string{
fmt.Sprintf("`%s`", fname),
linkedTypeStr,
doc,
})
}
// Collect types to recurse into later
if baseType := baseTypeName(typeStr); typeSpecs[baseType] != nil && !visited[baseType] {
addToRecursionList(typesToRecurse, baseType)
}
}
// addToRecursionList adds a type to the recursion list only if it's not already present
func addToRecursionList(typesToRecurse *[]string, baseType string) {
for _, existing := range *typesToRecurse {
if existing == baseType {
return // Already in the list, skip
}
}
*typesToRecurse = append(*typesToRecurse, baseType)
}
// createTypeLink creates markdown links for type references
func createTypeLink(typeStr string) string {
// Remove leading asterisks for display purposes
displayType := strings.TrimPrefix(typeStr, "*")
baseType := baseTypeName(typeStr)
// Check if this is a type alias that we should document inline
if inlineDoc := getInlineTypeDoc(baseType); inlineDoc != "" {
return fmt.Sprintf("`%s` (%s)", displayType, inlineDoc)
}
// For complex types (maps, slices), we need to handle them specially
if strings.Contains(displayType, "[") || strings.Contains(displayType, "map") {
return createComplexTypeLink(displayType, baseType)
}
// Simple types - create appropriate links
return createSimpleTypeLink(displayType, baseType)
}
// createTypeLinkWithSpacing creates a type link and returns both the link and whether it has a link
func createTypeLinkWithSpacing(baseType string) (string, bool) {
cleanBaseType := strings.TrimPrefix(baseType, "*")
if documentedTypes[baseType] {
return fmt.Sprintf("[`%s`](#%s)", cleanBaseType, strings.ToLower(baseType)), true
}
if strings.HasPrefix(baseType, "wfv1.") {
wfType := strings.TrimPrefix(baseType, "wfv1.")
return fmt.Sprintf("[`%s`](fields.md#%s)", wfType, strings.ToLower(wfType)), true
}
if strings.HasPrefix(baseType, "apiv1.") {
typeName := strings.TrimPrefix(baseType, "apiv1.")
anchor := strings.ToLower(typeName)
return fmt.Sprintf("[`%s`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#%s-v1-core)", typeName, anchor), true
}
if strings.HasPrefix(baseType, "metav1.") {
typeName := strings.TrimPrefix(baseType, "metav1.")
anchor := strings.ToLower(typeName)
return fmt.Sprintf("[`%s`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#%s-v1-meta)", typeName, anchor), true
}
// For simple types like string, int, etc., just use the type name
return cleanBaseType, false
}
// createComplexTypeLink handles complex types like maps, slices, and pointers
func createComplexTypeLink(displayType, baseType string) string {
// Handle map types - convert to Map< key , value > format like fields.go
if mapPattern := regexp.MustCompile(`^map\[([^\]]+)\](.+)$`); strings.HasPrefix(displayType, "map[") {
if matches := mapPattern.FindStringSubmatch(displayType); len(matches) == 3 {
keyType, valueType := matches[1], matches[2]
valueBaseType := baseTypeName(valueType)
valueLink, hasLink := createTypeLinkWithSpacing(valueBaseType)
// Format with or without spaces based on whether we have links
if hasLink {
return fmt.Sprintf("`Map<%s,`%s`>`", keyType, valueLink)
} else {
return fmt.Sprintf("`Map<%s,%s>`", keyType, valueLink)
}
}
}
// Handle slice types - convert to Array<> format like fields.go
for _, prefix := range []string{"*[]", "[]"} {
if strings.HasPrefix(displayType, prefix) {
elementType := displayType[len(prefix):]
elementBaseType := baseTypeName(elementType)
elementLink, hasLink := createTypeLinkWithSpacing(elementBaseType)
// Format with or without spaces based on whether we have links
if hasLink {
return fmt.Sprintf("`Array<`%s`>`", elementLink)
} else {
return fmt.Sprintf("`Array<%s>`", elementLink)
}
}
}
return fmt.Sprintf("`%s`", displayType)
}
// createSimpleTypeLink creates links for simple (non-complex) types
func createSimpleTypeLink(displayType, baseType string) string {
cleanBaseType := strings.TrimPrefix(baseType, "*")
// Check if this is a type we document in this file
if documentedTypes[baseType] {
return fmt.Sprintf("[`%s`](#%s)", cleanBaseType, strings.ToLower(baseType))
}
// Define external type mappings
externalTypes := map[string]string{
"wfv1.": "fields.md#%s",
"apiv1.": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#%s-v1-core",
"metav1.": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#%s-v1-meta",
}
// Check for external type prefixes
for prefix, urlTemplate := range externalTypes {
if strings.HasPrefix(displayType, prefix) || strings.HasPrefix(baseType, prefix) {
typeName := strings.TrimPrefix(baseType, prefix)
anchor := strings.ToLower(typeName)
return fmt.Sprintf("[`%s`]("+urlTemplate+")", cleanBaseType, anchor)
}
}
// For other types, just add backticks
return fmt.Sprintf("`%s`", displayType)
}
// exprString returns the string representation of an ast.Expr
func exprString(expr ast.Expr) string {
switch e := expr.(type) {
case *ast.Ident:
return e.Name
case *ast.StarExpr:
return "*" + exprString(e.X)
case *ast.SelectorExpr:
return exprString(e.X) + "." + e.Sel.Name
case *ast.ArrayType:
return "[]" + exprString(e.Elt)
case *ast.MapType:
return fmt.Sprintf("map[%s]%s", exprString(e.Key), exprString(e.Value))
case *ast.InterfaceType:
return "interface{}"
case *ast.StructType:
return "struct" // anonymous struct
default:
return fmt.Sprintf("%T", expr)
}
}
// baseTypeName strips pointer, slice, and map to get the base type name
func baseTypeName(typeStr string) string {
t := typeStr
for {
switch {
case strings.HasPrefix(t, "*"):
t = t[1:]
case strings.HasPrefix(t, "[]"):
t = t[2:]
case strings.HasPrefix(t, "map["):
if closeIdx := strings.Index(t, "]"); closeIdx != -1 {
t = t[closeIdx+1:]
} else {
return t
}
default:
return t
}
}
}
// normalizeComment converts multi-line comments into single-line descriptions
func normalizeComment(comment string) string {
if comment == "" {
return ""
}
// Replace newlines with spaces
result := strings.ReplaceAll(comment, "\n", " ")
// Remove // comment markers, but be careful with URLs
// Split on spaces, process each word, then rejoin
words := strings.Fields(result)
var cleanWords []string
for _, word := range words {
// Skip removing // if it's part of a URL
if strings.Contains(word, "://") {
cleanWords = append(cleanWords, word)
} else {
// Remove // from the beginning of words (comment markers)
cleanWord := strings.TrimPrefix(word, "//")
if cleanWord != "" {
cleanWords = append(cleanWords, cleanWord)
}
}
}
return strings.Join(cleanWords, " ")
}
// getInlineTypeDoc returns inline documentation for type aliases from AST
func getInlineTypeDoc(typeName string) string {
ts, exists := typeSpecs[typeName]
if !exists {
return ""
}
// Only handle type aliases, not structs
if _, isStruct := ts.Type.(*ast.StructType); isStruct {
return ""
}
// Get comment from GenDecl or TypeSpec
var comment string
if commentGroup, ok := typeComments[typeName]; ok && commentGroup != nil {
comment = normalizeComment(commentGroup.Text())
} else if ts.Doc != nil {
comment = normalizeComment(ts.Doc.Text())
}
// Get underlying type
underlyingType := exprString(ts.Type)
// Format result based on available information
if comment != "" && underlyingType != "" {
return fmt.Sprintf("%s (underlying type: %s)", comment, underlyingType)
}
if underlyingType != "" {
return fmt.Sprintf("(underlying type: %s)", underlyingType)
}
return comment
}

View File

@ -203,6 +203,7 @@ func NewDocGeneratorContext() *DocGeneratorContext {
queue: []string{
"io.argoproj.workflow.v1alpha1.Workflow", "io.argoproj.workflow.v1alpha1.CronWorkflow",
"io.argoproj.workflow.v1alpha1.WorkflowTemplate", "io.argoproj.workflow.v1alpha1.WorkflowEventBinding",
"io.argoproj.workflow.v1alpha1.InfoResponse",
},
external: []string{},
index: make(map[string]Set),
@ -239,7 +240,7 @@ FILES:
for _, m := range matches {
kind := m[1]
switch kind {
case "ClusterWorkflowTemplate", "CronWorkflow", "Workflow", "WorkflowTemplate", "WorkflowEventBinding":
case "ClusterWorkflowTemplate", "CronWorkflow", "Workflow", "WorkflowTemplate", "WorkflowEventBinding", "InfoResponse":
default:
continue FILES
}

View File

@ -8,6 +8,8 @@ func main() {
switch os.Args[1] {
case "cli":
generateCLIDocs()
case "configdoc":
generateConfigDocs()
case "diagram":
generateDiagram()
case "fields":

View File

@ -0,0 +1,62 @@
# Workflow Controller ConfigMap
## Introduction
The Workflow Controller ConfigMap is used to set controller-wide settings.
For a detailed example, please see [`workflow-controller-configmap.yaml`](./workflow-controller-configmap.yaml).
## Alternate Structure
In all versions, the configuration may be under a `config: |` key:
```yaml
# This file describes the config settings available in the workflow controller configmap
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
data:
config: |
instanceID: my-ci-controller
artifactRepository:
archiveLogs: true
s3:
endpoint: s3.amazonaws.com
bucket: my-bucket
region: us-west-2
insecure: false
accessKeySecret:
name: my-s3-credentials
key: accessKey
secretKeySecret:
name: my-s3-credentials
key: secretKey
```
In version 2.7+, the `config: |` key is optional. However, if the `config: |` key is not used, all nested maps under top level
keys should be strings. This makes it easier to generate the map with some configuration management tools like Kustomize.
```yaml
# This file describes the config settings available in the workflow controller configmap
apiVersion: v1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
data: # "config: |" key is optional in 2.7+!
instanceID: my-ci-controller
artifactRepository: | # However, all nested maps must be strings
archiveLogs: true
s3:
endpoint: s3.amazonaws.com
bucket: my-bucket
region: us-west-2
insecure: false
accessKeySecret:
name: my-s3-credentials
key: accessKey
secretKeySecret:
name: my-s3-credentials
key: secretKey
```

View File

@ -0,0 +1,30 @@
package main
import (
"strings"
)
// Valid component names.
// This is the order they will appear in the new feature file.
// Try not to just add too many components, this is just for categorization in the feature docs.
var validComponents = []string{
"General",
"UI",
"CLI",
"CronWorkflows",
"Telemetry",
"Build and Development",
}
func isValidComponent(component string) bool {
for _, c := range validComponents {
if c == component {
return true
}
}
return false
}
func listValidComponents() string {
return strings.Join(validComponents, ", ")
}

201
hack/featuregen/contents.go Normal file
View File

@ -0,0 +1,201 @@
package main
import (
"fmt"
"regexp"
"strings"
"time"
)
// timeNow is a variable that can be replaced in tests to mock time.Now()
var timeNow = time.Now
type feature struct {
Component string
Description string
Author string
Issues []string
Details string
}
// Metadata field definitions
var (
metadataFields = []string{"Component", "Issues", "Description", "Author"}
)
func getMetadataPattern(field string) *regexp.Regexp {
return regexp.MustCompile(field + `:\s*(.*?)(?:\n|$)`)
}
func validateHeaders(content string) (bool, string) {
re := regexp.MustCompile(`(?m)^(#+)\s+(.+)$`)
for _, match := range re.FindAllStringSubmatch(content, -1) {
if len(match) != 3 {
continue
}
level := len(match[1])
// Require level 3 or higher for all headers
if level < 3 {
return false, fmt.Sprintf("Header '%s' must be at least level 3 (###)", match[0])
}
}
return true, ""
}
func validateMetadataOrder(content string) (bool, string) {
lines := strings.Split(content, "\n")
// Find the first line that's not a metadata field
metadataEnd := 0
for i := 0; i < len(lines); i++ {
line := lines[i]
isMetadata := false
for _, field := range metadataFields {
if strings.HasPrefix(line, field+":") {
isMetadata = true
break
}
}
if !isMetadata && line != "" {
metadataEnd = i
break
}
}
// Check if any metadata fields appear after this point
for i := metadataEnd; i < len(lines); i++ {
line := lines[i]
for _, field := range metadataFields {
if strings.HasPrefix(line, field+":") {
return false, fmt.Sprintf("Metadata field '%s' must appear before any other content", line)
}
}
}
return true, ""
}
func parseContent(source string, content string) (bool, feature, error) {
// Check required sections
isValid := true
for _, field := range metadataFields {
if !strings.Contains(content, field+":") {
fmt.Printf("Error: Missing required section '%s:' in %s\n", field, source)
isValid = false
}
}
if headerValid, errMsg := validateHeaders(content); !headerValid {
fmt.Printf("Error: %s in %s\n", errMsg, source)
isValid = false
}
if orderValid, errMsg := validateMetadataOrder(content); !orderValid {
fmt.Printf("Error: %s in %s\n", errMsg, source)
isValid = false
}
// Extract metadata fields
component := ""
if matches := getMetadataPattern("Component").FindStringSubmatch(content); len(matches) > 1 {
component = strings.TrimSpace(matches[1])
if !isValidComponent(component) {
fmt.Printf("Error: Invalid component '%s' in %s. Valid components are: %s. Add more in hack/featuregen/components.go\n", component, source, listValidComponents())
isValid = false
}
}
issuesSection := ""
if matches := getMetadataPattern("Issues").FindStringSubmatch(content); len(matches) > 1 {
issuesSection = matches[1]
}
issues := regexp.MustCompile(`(\d+)`).FindAllStringSubmatch(issuesSection, -1)
issueNumbers := make([]string, len(issues))
for i, issue := range issues {
issueNumbers[i] = issue[1]
}
if len(issueNumbers) == 0 {
fmt.Printf("Error: At least one issue number must be present in %s\n", source)
isValid = false
}
description := ""
if matches := getMetadataPattern("Description").FindStringSubmatch(content); len(matches) > 1 {
description = strings.TrimSpace(matches[1])
}
author := ""
if matches := getMetadataPattern("Author").FindStringSubmatch(content); len(matches) > 1 {
author = strings.TrimSpace(matches[1])
}
// Extract details (everything after metadata)
details := ""
pattern := `(?s)(?:` + strings.Join(metadataFields, ":|") + `:).*?\n\n(.*)`
if detailsMatch := regexp.MustCompile(pattern).FindStringSubmatch(content); len(detailsMatch) > 1 {
details = strings.TrimSpace(detailsMatch[1])
}
return isValid, feature{
Component: component,
Description: description,
Author: author,
Issues: issueNumbers,
Details: details,
}, nil
}
func format(version string, features []feature) string {
var output strings.Builder
// Format new content
versionHeader := "Unreleased"
if version != "" {
versionHeader = version
}
currentDate := timeNow().Format("2006-01-02")
output.WriteString(fmt.Sprintf("# New features in %s (%s)\n\nThis is a concise list of new features.\n\n", versionHeader, currentDate))
// Group features by component
featuresByComponent := make(map[string][]feature)
for _, f := range features {
featuresByComponent[f.Component] = append(featuresByComponent[f.Component], f)
}
// Output features in order of validComponents
for _, component := range validComponents {
componentFeatures := featuresByComponent[component]
if len(componentFeatures) == 0 {
continue
}
output.WriteString(fmt.Sprintf("## %s\n\n", component))
for _, feature := range componentFeatures {
issuesStr := ""
if len(feature.Issues) > 0 {
issues := make([]string, len(feature.Issues))
for i, issue := range feature.Issues {
issues[i] = fmt.Sprintf("[#%s](https://github.com/argoproj/argo-workflows/issues/%s)", issue, issue)
}
issuesStr = fmt.Sprintf("(%s)", strings.Join(issues, ", "))
}
output.WriteString(fmt.Sprintf("- %s by %s %s\n", feature.Description, feature.Author, issuesStr))
if feature.Details != "" {
for _, line := range strings.Split(feature.Details, "\n") {
if line != "" {
output.WriteString(fmt.Sprintf(" %s\n", line))
}
}
}
output.WriteString("\n")
}
}
return output.String()
}

View File

@ -0,0 +1,353 @@
package main
import (
"testing"
"time"
)
func TestParseContent(t *testing.T) {
tests := []struct {
name string
source string
content string
wantValid bool
want feature
}{
{
name: "Valid content with issues",
source: "test.md",
content: `Component: UI
Issues: 1234 5678
Description: Test Description
Author: [Alan Clucas](https://github.com/Joibel)
Test Details
- Point 1
- Point 2`,
wantValid: true,
want: feature{
Component: "UI",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234", "5678"},
Details: "Test Details\n- Point 1\n- Point 2",
},
},
{
name: "Invalid metadata order",
source: "invalid.md",
content: `Component: UI
Issues: 1234
Description: Test Description
Author: [Alan Clucas](https://github.com/Joibel)
Some content here
Component: Invalid second component
Issues: 5678
Description: Invalid second description
Author: [Another Author](https://github.com/another)`,
wantValid: false,
want: feature{
Component: "UI",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234"},
Details: "Some content here\n\nComponent: Invalid second component\nIssues: 5678\nDescription: Invalid second description\nAuthor: [Another Author](https://github.com/another)",
},
},
{
name: "Valid content with deep headers",
source: "test.md",
content: `Component: UI
Issues: 1234
Description: Test Description
Author: [Alan Clucas](https://github.com/Joibel)
Test Details
### Level 3 Header
#### Level 4 Header
##### Level 5 Header`,
wantValid: true,
want: feature{
Component: "UI",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234"},
Details: "Test Details\n\n### Level 3 Header\n#### Level 4 Header\n##### Level 5 Header",
},
},
{
name: "Valid content with issue in description",
source: "test.md",
content: `Component: CronWorkflows
Issues: 1234
Description: Test Description with issue 4567
Author: [Alan Clucas](https://github.com/Joibel)
Test Details
- Point 1
- Point 2`,
wantValid: true,
want: feature{
Component: "CronWorkflows",
Description: "Test Description with issue 4567",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234"},
Details: "Test Details\n- Point 1\n- Point 2",
},
},
{
name: "Missing Issues section",
source: "invalid.md",
content: `Component: UI
Description: Test Description
Author: [Alan Clucas](https://github.com/Joibel)
Test Details`,
wantValid: false,
want: feature{
Component: "UI",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{},
Details: "Test Details",
},
},
{
name: "Empty content",
source: "empty.md",
content: ``,
wantValid: false,
want: feature{
Component: "",
Description: "",
Author: "",
Issues: []string{},
Details: "",
},
},
{
name: "Invalid component",
source: "invalid-component.md",
content: `Component: InvalidComponent
Issues: 1234
Description: Test Description
Author: [Alan Clucas](https://github.com/Joibel)
Test Details`,
wantValid: false,
want: feature{
Component: "InvalidComponent",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234"},
Details: "Test Details",
},
},
{
name: "No issues present",
source: "no-issues.md",
content: `Component: UI
Issues:
Description: Test Description
Author: [Alan Clucas](https://github.com/Joibel)
Test Details`,
wantValid: false,
want: feature{
Component: "UI",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{},
Details: "Test Details",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
valid, got, err := parseContent(tt.source, tt.content)
if err != nil {
t.Errorf("parseContent() error = %v", err)
return
}
if valid != tt.wantValid {
t.Errorf("parseContent() valid = %v, want %v", valid, tt.wantValid)
}
if got.Component != tt.want.Component {
t.Errorf("parseContent() Component = %v, want %v", got.Component, tt.want.Component)
}
if got.Description != tt.want.Description {
t.Errorf("parseContent() Description = %v, want %v", got.Description, tt.want.Description)
}
if got.Author != tt.want.Author {
t.Errorf("parseContent() Author = %v, want %v", got.Author, tt.want.Author)
}
if len(got.Issues) != len(tt.want.Issues) {
t.Errorf("parseContent() Issues length = %v, want %v", len(got.Issues), len(tt.want.Issues))
} else {
for i, issue := range got.Issues {
if issue != tt.want.Issues[i] {
t.Errorf("parseContent() Issues[%d] = %v, want %v", i, issue, tt.want.Issues[i])
}
}
}
if got.Details != tt.want.Details {
t.Errorf("parseContent() Details = %v, want %v", got.Details, tt.want.Details)
}
})
}
}
func firstDiff(a, b string) int {
for i := 0; i < len(a) && i < len(b); i++ {
if a[i] != b[i] {
return i
}
}
return len(a)
}
func TestFormat(t *testing.T) {
// Mock time.Now() for consistent testing
now := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)
timeNow = func() time.Time { return now }
defer func() { timeNow = time.Now }()
tests := []struct {
name string
version string
features []feature
want string
}{
{
name: "Unreleased features",
version: "",
features: []feature{
{
Component: "UI",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234"},
Details: "Test Details",
},
},
want: `# New features in Unreleased (2024-01-01)
This is a concise list of new features.
## UI
- Test Description by [Alan Clucas](https://github.com/Joibel) ([#1234](https://github.com/argoproj/argo-workflows/issues/1234))
Test Details
`,
},
{
name: "Released features",
version: "v1.0.0",
features: []feature{
{
Component: "CLI",
Description: "Test Description",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234", "5678"},
Details: "Test Details\n- Point 1\n- Point 2",
},
},
want: `# New features in v1.0.0 (2024-01-01)
This is a concise list of new features.
## CLI
- Test Description by [Alan Clucas](https://github.com/Joibel) ([#1234](https://github.com/argoproj/argo-workflows/issues/1234), [#5678](https://github.com/argoproj/argo-workflows/issues/5678))
Test Details
- Point 1
- Point 2
`,
},
{
name: "Multiple features in different components",
version: "v1.0.0",
features: []feature{
{
Component: "General",
Description: "Description 1",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234"},
Details: "",
},
{
Component: "UI",
Description: "Description 2",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"5678"},
Details: "Details 2",
},
},
want: `# New features in v1.0.0 (2024-01-01)
This is a concise list of new features.
## General
- Description 1 by [Alan Clucas](https://github.com/Joibel) ([#1234](https://github.com/argoproj/argo-workflows/issues/1234))
## UI
- Description 2 by [Alan Clucas](https://github.com/Joibel) ([#5678](https://github.com/argoproj/argo-workflows/issues/5678))
Details 2
`,
},
{
name: "Features in same component",
version: "v1.2.0",
features: []feature{
{
Component: "CLI",
Description: "First CLI feature",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"1234"},
Details: "",
},
{
Component: "CLI",
Description: "Second CLI feature",
Author: "[Alan Clucas](https://github.com/Joibel)",
Issues: []string{"5678"},
Details: "",
},
},
want: `# New features in v1.2.0 (2024-01-01)
This is a concise list of new features.
## CLI
- First CLI feature by [Alan Clucas](https://github.com/Joibel) ([#1234](https://github.com/argoproj/argo-workflows/issues/1234))
- Second CLI feature by [Alan Clucas](https://github.com/Joibel) ([#5678](https://github.com/argoproj/argo-workflows/issues/5678))
`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := format(tt.version, tt.features)
if got != tt.want {
t.Errorf("format() = %v, want %v", got, tt.want)
t.Logf("Diff:\nGot:\n%s\nWant:\n%s", got, tt.want)
t.Logf("Got length: %d, Want length: %d", len(got), len(tt.want))
t.Logf("First difference at position %d: got '%c' (%d), want '%c' (%d)",
firstDiff(got, tt.want), got[firstDiff(got, tt.want)], got[firstDiff(got, tt.want)],
tt.want[firstDiff(got, tt.want)], tt.want[firstDiff(got, tt.want)])
}
})
}
}

271
hack/featuregen/main.go Normal file
View File

@ -0,0 +1,271 @@
package main
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/spf13/cobra"
)
const (
featuresDir = ".features"
docsOutput = "docs/new-features.md"
templateFile = ".features/TEMPLATE.md"
pendingDir = ".features/pending"
)
var (
rootCmd = &cobra.Command{
Use: "featuregen",
Short: "Feature documentation management tool for Argo Workflows",
Long: "A tool for managing feature documentation in Argo Workflows.\nProvides functionality to create, validate, preview, and update feature documentation.",
}
newCmd = &cobra.Command{
Use: "new",
Short: "Create a new feature document",
RunE: func(cmd *cobra.Command, args []string) error {
filename, _ := cmd.Flags().GetString("filename")
return newFeature(filename)
},
}
validateCmd = &cobra.Command{
Use: "validate",
Short: "Validate all feature documents",
RunE: func(cmd *cobra.Command, args []string) error {
return validateFeatures()
},
}
updateCmd = &cobra.Command{
Use: "update",
Short: "Update the feature documentation",
RunE: func(cmd *cobra.Command, args []string) error {
dry, _ := cmd.Flags().GetBool("dry")
version, _ := cmd.Flags().GetString("version")
final, _ := cmd.Flags().GetBool("final")
return updateFeatures(dry, version, final)
},
}
)
func init() {
rootCmd.AddCommand(newCmd, validateCmd, updateCmd)
newCmd.Flags().String("filename", "", "Specify the filename for the new feature")
updateCmd.Flags().Bool("dry", false, "Preview changes without applying them")
updateCmd.Flags().String("version", "", "Specify the version for the update")
updateCmd.Flags().Bool("final", false, "Move features from pending to released")
}
func ensureDirs() error {
for _, dir := range []string{featuresDir, pendingDir} {
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %v", dir, err)
}
}
return nil
}
func getGitBranch() string {
cmd := exec.Command("git", "branch", "--show-current")
if output, err := cmd.Output(); err == nil {
if branch := strings.TrimSpace(string(output)); branch != "" {
return branch
}
}
return "new-feature"
}
func newFeature(filename string) error {
if err := ensureDirs(); err != nil {
return err
}
if filename == "" {
filename = getGitBranch()
}
if !strings.HasSuffix(filename, ".md") {
filename += ".md"
}
targetPath := filepath.Join(pendingDir, filename)
if _, err := os.Stat(targetPath); err == nil {
return fmt.Errorf("file %s already exists", targetPath)
}
if err := copyFile(templateFile, targetPath); err != nil {
return fmt.Errorf("failed to create feature document: %v", err)
}
fmt.Printf("Created new feature document at %s\n", targetPath)
fmt.Println("Please edit this file to describe your feature")
return nil
}
func copyFile(src, dst string) error {
input, err := os.ReadFile(src)
if err != nil {
return err
}
return os.WriteFile(dst, input, 0644)
}
func loadFeatureFile(filePath string) (bool, feature, error) {
content, err := os.ReadFile(filePath)
if err != nil {
return false, feature{}, err
}
return parseContent(filePath, string(content))
}
func getFeatureFiles(dir string) ([]string, error) {
if _, err := os.Stat(dir); os.IsNotExist(err) {
return nil, nil
}
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var featureFiles []string
for _, file := range files {
if !file.IsDir() && strings.HasSuffix(file.Name(), ".md") {
featureFiles = append(featureFiles, file.Name())
}
}
return featureFiles, nil
}
func loadPendingFeatures() (bool, []feature, error) {
featureFiles, err := getFeatureFiles(pendingDir)
if err != nil {
return false, nil, err
}
if len(featureFiles) == 0 {
fmt.Println("No pending features to load")
return true, nil, nil
}
allValid := true
var featuresData []feature
for _, file := range featureFiles {
filePath := filepath.Join(pendingDir, file)
isValid, featureData, err := loadFeatureFile(filePath)
if err != nil {
return false, nil, err
}
if !isValid {
allValid = false
fmt.Printf("Invalid feature document: %s\n", filePath)
} else {
featuresData = append(featuresData, featureData)
}
}
if allValid {
fmt.Printf("All %d feature documents are valid\n", len(featureFiles))
}
return allValid, featuresData, nil
}
func validateFeatures() error {
allValid, _, err := loadPendingFeatures()
if err != nil {
return err
}
if !allValid {
return fmt.Errorf("validation failed")
}
return nil
}
func moveFeaturesToReleasedDir(version string, featureFiles []string) error {
releasedDir := filepath.Join(featuresDir, "released", version)
if err := os.MkdirAll(releasedDir, 0755); err != nil {
return err
}
for _, file := range featureFiles {
source := filepath.Join(pendingDir, file)
target := filepath.Join(releasedDir, file)
if err := copyFile(source, target); err != nil {
return err
}
if err := os.Remove(source); err != nil {
return err
}
}
fmt.Printf("Updated features documentation with version %s\n", version)
fmt.Printf("Moved %d feature files to %s\n", len(featureFiles), releasedDir)
return nil
}
func updateFeatures(dryRun bool, version string, final bool) error {
allValid, features, err := loadPendingFeatures()
if err != nil {
return err
}
if !allValid {
return fmt.Errorf("validation failed, not updating features")
}
if len(features) == 0 {
return nil
}
outputContent := format(version, features)
fmt.Printf("Preview of changes with %d features:\n", len(features))
fmt.Println("===================")
fmt.Println(outputContent)
if !dryRun {
if err := os.MkdirAll(filepath.Dir(docsOutput), 0755); err != nil {
return err
}
if err := os.WriteFile(docsOutput, []byte(outputContent), 0644); err != nil {
return err
}
if final && version != "" {
featureFiles, err := getFeatureFiles(pendingDir)
if err != nil {
return err
}
if err := moveFeaturesToReleasedDir(version, featureFiles); err != nil {
return err
}
} else {
versionStr := ""
if version != "" {
versionStr = fmt.Sprintf(" with version %s", version)
}
fmt.Printf("Updated features documentation%s\n", versionStr)
if !final {
fmt.Println("Features remain in pending directory (--final not specified)")
}
}
}
return nil
}
func main() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@ -0,0 +1,274 @@
package main
import (
"os"
"path/filepath"
"testing"
)
var testContents = map[string]string{
// Missing required Issues
"invalid.md": `Component: UI
Description: Test Description
Author: [Alan Clucas](https://github.com/Joibel)
Test Details`,
// Valid feature file
"valid.md": `Component: UI
Issues: #5678
Description: Valid Description
Author: [Alan Clucas](https://github.com/Joibel)
Valid Details`,
}
var templateMarkdown = `Component: <!-- component name here, see hack/featuregen/components.go for the list -->
Issues: <!-- Space separated list of issues 1234 5678 -->
Description: <!-- A brief one line description of the feature -->
Author: <!-- Author name and GitHub link in markdown format e.g. [Alan Clucas](https://github.com/Joibel) -->
<!--
Optional
Additional details about the feature written in markdown, aimed at users who want to learn about it
* Explain when you would want to use the feature
* Include code examples if applicable
* Provide working examples
* Format code using back-ticks
* Use Kubernetes style
* One sentence per line of markdown
-->`
func setupTestEnv(t *testing.T, files map[string]string) (string, func()) {
// Create a temporary directory for testing
tmpDir, err := os.MkdirTemp("", "featuregen-test-*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
// Change to temp directory
oldDir, err := os.Getwd()
if err != nil {
t.Fatalf("Failed to get current directory: %v", err)
}
if err := os.Chdir(tmpDir); err != nil {
t.Fatalf("Failed to change directory: %v", err)
}
// Create template file
if err := os.MkdirAll(filepath.Dir(templateFile), 0755); err != nil {
t.Fatalf("Failed to create template directory: %v", err)
}
if err := os.WriteFile(templateFile, []byte(templateMarkdown), 0644); err != nil {
t.Fatalf("Failed to create template file: %v", err)
}
// Create pending files
for filename, content := range files {
pendingFile := filepath.Join(pendingDir, filename)
if err := os.MkdirAll(filepath.Dir(pendingFile), 0755); err != nil {
t.Fatalf("Failed to create template directory: %v", err)
}
if err := os.WriteFile(pendingFile, []byte(content), 0644); err != nil {
t.Fatalf("Failed to create template file: %v", err)
}
}
// Return cleanup function
return tmpDir, func() {
os.Chdir(oldDir)
os.RemoveAll(tmpDir)
}
}
func TestNewFeature(t *testing.T) {
_, cleanup := setupTestEnv(t, map[string]string{})
defer cleanup()
tests := []struct {
name string
filename string
wantErr bool
}{
{
name: "Create with custom filename",
filename: "test-feature",
wantErr: false,
},
{
name: "Create with empty filename",
filename: "",
wantErr: false,
},
{
name: "Create with invalid characters",
filename: "test/feature@123",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := newFeature(tt.filename)
if (err != nil) != tt.wantErr {
t.Errorf("newFeature() error = %v, wantErr %v", err, tt.wantErr)
}
if tt.wantErr {
return
}
// Check if file was created
expectedPath := filepath.Join(pendingDir, tt.filename+".md")
if tt.filename == "" {
expectedPath = filepath.Join(pendingDir, "new-feature.md")
}
if _, err := os.Stat(expectedPath); os.IsNotExist(err) {
t.Errorf("Feature file was not created at %s", expectedPath)
}
})
}
}
func TestLoadFeatureFile(t *testing.T) {
_, cleanup := setupTestEnv(t, testContents)
defer cleanup()
tests := []struct {
name string
filePath string
wantValid bool
wantErr bool
}{
{
name: "Valid feature file",
filePath: "valid.md",
wantValid: true,
wantErr: false,
},
{
name: "Non-existent file",
filePath: "nonexistent.md",
wantValid: false,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
valid, data, err := loadFeatureFile(filepath.Join(pendingDir, tt.filePath))
if (err != nil) != tt.wantErr {
t.Errorf("loadFeatureFile() error = %v, wantErr %v", err, tt.wantErr)
}
if valid != tt.wantValid {
t.Errorf("loadFeatureFile() valid = %v, want %v", valid, tt.wantValid)
}
if !tt.wantErr && valid {
if data.Component != "UI" {
t.Errorf("loadFeatureFile() component = %v, want %v", data.Component, "UI")
}
if len(data.Issues) != 1 || data.Issues[0] != "5678" {
t.Errorf("loadFeatureFile() issues = %v, want %v", data.Issues, []string{"5678"})
}
}
})
}
}
func TestUpdateFeatures(t *testing.T) {
// Create a copy of testContents with only valid.md
validContents := map[string]string{
"valid.md": testContents["valid.md"],
}
_, cleanup := setupTestEnv(t, validContents)
defer cleanup()
tests := []struct {
name string
dryRun bool
version string
final bool
wantErr bool
}{
{
name: "Dry run",
dryRun: true,
version: "",
final: false,
wantErr: false,
},
{
name: "Update with version",
dryRun: false,
version: "v1.0.0",
final: false,
wantErr: false,
},
{
name: "Final release",
dryRun: false,
version: "v1.2.0",
final: true,
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := updateFeatures(tt.dryRun, tt.version, tt.final)
if (err != nil) != tt.wantErr {
t.Errorf("updateFeatures() error = %v, wantErr %v", err, tt.wantErr)
}
if !tt.dryRun {
// Check if output file was created
if _, err := os.Stat(docsOutput); os.IsNotExist(err) {
t.Errorf("Output file was not created at %s", docsOutput)
}
if tt.final && tt.version != "" {
// Check if file was moved to released directory
releasedFile := filepath.Join(featuresDir, "released", tt.version, "valid.md")
if _, err := os.Stat(releasedFile); os.IsNotExist(err) {
t.Errorf("Feature file was not moved to %s", releasedFile)
}
}
}
})
}
}
func TestValidateFeatures(t *testing.T) {
tests := []struct {
name string
files []string
wantErr bool
}{
{
name: "Invalid feature file",
files: []string{"invalid.md"},
wantErr: true,
},
{
name: "Invalid feature file and valid file",
files: []string{"invalid.md", "valid.md"},
wantErr: true,
},
{
name: "Valid feature file",
files: []string{"valid.md"},
wantErr: false,
},
}
for _, tt := range tests {
testFiles := map[string]string{}
for _, file := range tt.files {
testFiles[file] = testContents[file]
}
_, cleanup := setupTestEnv(t, testFiles)
defer cleanup()
t.Run(tt.name, func(t *testing.T) {
err := validateFeatures()
if (err != nil) != tt.wantErr {
t.Errorf("validateFeatures() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}

View File

@ -26316,6 +26316,8 @@ spec:
waiting:
type: string
type: object
taskResultSynced:
type: boolean
templateName:
type: string
templateRef:

View File

@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base/workflow-controller
- ../base/argo-server
- ./workflow-controller-rbac
- ./argo-server-rbac
namespace: argo

View File

@ -2,9 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../base/workflow-controller
- ../base/argo-server
- ./workflow-controller-rbac
- ./argo-server-rbac
namespace: argo
- ../base/crds/minimal
- ../cluster-install-no-crds

View File

@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../cluster-install
- ../../cluster-install-no-crds
- minio
- httpbin
- webhooks

View File

@ -1,6 +1,7 @@
package sqldb
import (
"context"
"fmt"
"strconv"
"strings"
@ -15,7 +16,7 @@ import (
// ListWorkflowsLabelKeys returns distinct name from argo_archived_workflows_labels table
// SELECT DISTINCT name FROM argo_archived_workflows_labels
func (r *workflowArchive) ListWorkflowsLabelKeys() (*wfv1.LabelKeys, error) {
func (r *workflowArchive) ListWorkflowsLabelKeys(ctx context.Context) (*wfv1.LabelKeys, error) {
var archivedWfLabels []archivedWorkflowLabelRecord
err := r.session.SQL().
@ -35,7 +36,7 @@ func (r *workflowArchive) ListWorkflowsLabelKeys() (*wfv1.LabelKeys, error) {
// ListWorkflowsLabelValues returns distinct value from argo_archived_workflows_labels table
// SELECT DISTINCT value FROM argo_archived_workflows_labels WHERE name=labelkey
func (r *workflowArchive) ListWorkflowsLabelValues(key string) (*wfv1.LabelValues, error) {
func (r *workflowArchive) ListWorkflowsLabelValues(ctx context.Context, key string) (*wfv1.LabelValues, error) {
var archivedWfLabels []archivedWorkflowLabelRecord
err := r.session.SQL().
Select(db.Raw("DISTINCT value")).

View File

@ -1,13 +1,14 @@
package sqldb
import (
"context"
"encoding/json"
"fmt"
log "github.com/sirupsen/logrus"
"github.com/upper/db/v4"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
type backfillNodes struct {
@ -18,8 +19,9 @@ func (s backfillNodes) String() string {
return fmt.Sprintf("backfillNodes{%s}", s.tableName)
}
func (s backfillNodes) Apply(session db.Session) (err error) {
log.Info("Backfill node status")
func (s backfillNodes) Apply(ctx context.Context, session db.Session) (err error) {
logger := logging.GetLoggerFromContext(ctx)
logger.Info(ctx, "Backfill node status")
rs, err := session.SQL().SelectFrom(s.tableName).
Columns("workflow").
Where(db.Cond{"version": nil}).
@ -53,8 +55,8 @@ func (s backfillNodes) Apply(session db.Session) (err error) {
if err != nil {
return err
}
logCtx := log.WithFields(log.Fields{"name": wf.Name, "namespace": wf.Namespace, "version": version})
logCtx.Info("Back-filling node status")
logCtx := logger.WithFields(logging.Fields{"name": wf.Name, "namespace": wf.Namespace, "version": version})
logCtx.Info(ctx, "Back-filling node status")
res, err := session.SQL().Update(archiveTableName).
Set("version", wf.ResourceVersion).
Set("nodes", marshalled).
@ -69,7 +71,7 @@ func (s backfillNodes) Apply(session db.Session) (err error) {
return err
}
if rowsAffected != 1 {
logCtx.WithField("rowsAffected", rowsAffected).Warn("Expected exactly one row affected")
logCtx.WithField("rowsAffected", rowsAffected).Warn(ctx, "Expected exactly one row affected")
}
}
return nil

View File

@ -1,6 +1,7 @@
package sqldb
import (
"context"
"fmt"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
@ -17,22 +18,22 @@ func (n *explosiveOffloadNodeStatusRepo) IsEnabled() bool {
return false
}
func (n *explosiveOffloadNodeStatusRepo) Save(string, string, wfv1.Nodes) (string, error) {
func (n *explosiveOffloadNodeStatusRepo) Save(context.Context, string, string, wfv1.Nodes) (string, error) {
return "", ErrOffloadNotSupported
}
func (n *explosiveOffloadNodeStatusRepo) Get(string, string) (wfv1.Nodes, error) {
func (n *explosiveOffloadNodeStatusRepo) Get(context.Context, string, string) (wfv1.Nodes, error) {
return nil, ErrOffloadNotSupported
}
func (n *explosiveOffloadNodeStatusRepo) List(string) (map[UUIDVersion]wfv1.Nodes, error) {
func (n *explosiveOffloadNodeStatusRepo) List(context.Context, string) (map[UUIDVersion]wfv1.Nodes, error) {
return nil, ErrOffloadNotSupported
}
func (n *explosiveOffloadNodeStatusRepo) Delete(string, string) error {
func (n *explosiveOffloadNodeStatusRepo) Delete(context.Context, string, string) error {
return ErrOffloadNotSupported
}
func (n *explosiveOffloadNodeStatusRepo) ListOldOffloads(string) (map[string][]string, error) {
func (n *explosiveOffloadNodeStatusRepo) ListOldOffloads(context.Context, string) (map[string][]string, error) {
return nil, ErrOffloadNotSupported
}

View File

@ -3,6 +3,8 @@
package mocks
import (
"context"
mock "github.com/stretchr/testify/mock"
sqldb "github.com/argoproj/argo-workflows/v3/persist/sqldb"
@ -15,7 +17,7 @@ type OffloadNodeStatusRepo struct {
}
// Delete provides a mock function with given fields: uid, version
func (_m *OffloadNodeStatusRepo) Delete(uid string, version string) error {
func (_m *OffloadNodeStatusRepo) Delete(_ context.Context, uid string, version string) error {
ret := _m.Called(uid, version)
var r0 error
@ -29,7 +31,7 @@ func (_m *OffloadNodeStatusRepo) Delete(uid string, version string) error {
}
// Get provides a mock function with given fields: uid, version
func (_m *OffloadNodeStatusRepo) Get(uid string, version string) (v1alpha1.Nodes, error) {
func (_m *OffloadNodeStatusRepo) Get(ctx context.Context, uid string, version string) (v1alpha1.Nodes, error) {
ret := _m.Called(uid, version)
var r0 v1alpha1.Nodes
@ -66,7 +68,7 @@ func (_m *OffloadNodeStatusRepo) IsEnabled() bool {
}
// List provides a mock function with given fields: namespace
func (_m *OffloadNodeStatusRepo) List(namespace string) (map[sqldb.UUIDVersion]v1alpha1.Nodes, error) {
func (_m *OffloadNodeStatusRepo) List(ctx context.Context, namespace string) (map[sqldb.UUIDVersion]v1alpha1.Nodes, error) {
ret := _m.Called(namespace)
var r0 map[sqldb.UUIDVersion]v1alpha1.Nodes
@ -89,7 +91,7 @@ func (_m *OffloadNodeStatusRepo) List(namespace string) (map[sqldb.UUIDVersion]v
}
// ListOldOffloads provides a mock function with given fields: namespace
func (_m *OffloadNodeStatusRepo) ListOldOffloads(namespace string) (map[string][]string, error) {
func (_m *OffloadNodeStatusRepo) ListOldOffloads(ctx context.Context, namespace string) (map[string][]string, error) {
ret := _m.Called(namespace)
var r0 map[string][]string
@ -112,7 +114,7 @@ func (_m *OffloadNodeStatusRepo) ListOldOffloads(namespace string) (map[string][
}
// Save provides a mock function with given fields: uid, namespace, nodes
func (_m *OffloadNodeStatusRepo) Save(uid string, namespace string, nodes v1alpha1.Nodes) (string, error) {
func (_m *OffloadNodeStatusRepo) Save(ctx context.Context, uid string, namespace string, nodes v1alpha1.Nodes) (string, error) {
ret := _m.Called(uid, namespace, nodes)
var r0 string

View File

@ -3,6 +3,8 @@
package mocks
import (
context "context"
mock "github.com/stretchr/testify/mock"
labels "k8s.io/apimachinery/pkg/labels"
@ -18,17 +20,17 @@ type WorkflowArchive struct {
mock.Mock
}
// ArchiveWorkflow provides a mock function with given fields: wf
func (_m *WorkflowArchive) ArchiveWorkflow(wf *v1alpha1.Workflow) error {
ret := _m.Called(wf)
// ArchiveWorkflow provides a mock function with given fields: ctx, wf
func (_m *WorkflowArchive) ArchiveWorkflow(ctx context.Context, wf *v1alpha1.Workflow) error {
ret := _m.Called(ctx, wf)
if len(ret) == 0 {
panic("no return value specified for ArchiveWorkflow")
}
var r0 error
if rf, ok := ret.Get(0).(func(*v1alpha1.Workflow) error); ok {
r0 = rf(wf)
if rf, ok := ret.Get(0).(func(context.Context, *v1alpha1.Workflow) error); ok {
r0 = rf(ctx, wf)
} else {
r0 = ret.Error(0)
}
@ -36,9 +38,9 @@ func (_m *WorkflowArchive) ArchiveWorkflow(wf *v1alpha1.Workflow) error {
return r0
}
// CountWorkflows provides a mock function with given fields: options
func (_m *WorkflowArchive) CountWorkflows(options utils.ListOptions) (int64, error) {
ret := _m.Called(options)
// CountWorkflows provides a mock function with given fields: ctx, options
func (_m *WorkflowArchive) CountWorkflows(ctx context.Context, options utils.ListOptions) (int64, error) {
ret := _m.Called(ctx, options)
if len(ret) == 0 {
panic("no return value specified for CountWorkflows")
@ -46,17 +48,17 @@ func (_m *WorkflowArchive) CountWorkflows(options utils.ListOptions) (int64, err
var r0 int64
var r1 error
if rf, ok := ret.Get(0).(func(utils.ListOptions) (int64, error)); ok {
return rf(options)
if rf, ok := ret.Get(0).(func(context.Context, utils.ListOptions) (int64, error)); ok {
return rf(ctx, options)
}
if rf, ok := ret.Get(0).(func(utils.ListOptions) int64); ok {
r0 = rf(options)
if rf, ok := ret.Get(0).(func(context.Context, utils.ListOptions) int64); ok {
r0 = rf(ctx, options)
} else {
r0 = ret.Get(0).(int64)
}
if rf, ok := ret.Get(1).(func(utils.ListOptions) error); ok {
r1 = rf(options)
if rf, ok := ret.Get(1).(func(context.Context, utils.ListOptions) error); ok {
r1 = rf(ctx, options)
} else {
r1 = ret.Error(1)
}
@ -64,17 +66,17 @@ func (_m *WorkflowArchive) CountWorkflows(options utils.ListOptions) (int64, err
return r0, r1
}
// DeleteExpiredWorkflows provides a mock function with given fields: ttl
func (_m *WorkflowArchive) DeleteExpiredWorkflows(ttl time.Duration) error {
ret := _m.Called(ttl)
// DeleteExpiredWorkflows provides a mock function with given fields: ctx, ttl
func (_m *WorkflowArchive) DeleteExpiredWorkflows(ctx context.Context, ttl time.Duration) error {
ret := _m.Called(ctx, ttl)
if len(ret) == 0 {
panic("no return value specified for DeleteExpiredWorkflows")
}
var r0 error
if rf, ok := ret.Get(0).(func(time.Duration) error); ok {
r0 = rf(ttl)
if rf, ok := ret.Get(0).(func(context.Context, time.Duration) error); ok {
r0 = rf(ctx, ttl)
} else {
r0 = ret.Error(0)
}
@ -82,17 +84,17 @@ func (_m *WorkflowArchive) DeleteExpiredWorkflows(ttl time.Duration) error {
return r0
}
// DeleteWorkflow provides a mock function with given fields: uid
func (_m *WorkflowArchive) DeleteWorkflow(uid string) error {
ret := _m.Called(uid)
// DeleteWorkflow provides a mock function with given fields: ctx, uid
func (_m *WorkflowArchive) DeleteWorkflow(ctx context.Context, uid string) error {
ret := _m.Called(ctx, uid)
if len(ret) == 0 {
panic("no return value specified for DeleteWorkflow")
}
var r0 error
if rf, ok := ret.Get(0).(func(string) error); ok {
r0 = rf(uid)
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, uid)
} else {
r0 = ret.Error(0)
}
@ -100,9 +102,9 @@ func (_m *WorkflowArchive) DeleteWorkflow(uid string) error {
return r0
}
// GetWorkflow provides a mock function with given fields: uid, namespace, name
func (_m *WorkflowArchive) GetWorkflow(uid string, namespace string, name string) (*v1alpha1.Workflow, error) {
ret := _m.Called(uid, namespace, name)
// GetWorkflow provides a mock function with given fields: ctx, uid, namespace, name
func (_m *WorkflowArchive) GetWorkflow(ctx context.Context, uid string, namespace string, name string) (*v1alpha1.Workflow, error) {
ret := _m.Called(ctx, uid, namespace, name)
if len(ret) == 0 {
panic("no return value specified for GetWorkflow")
@ -110,19 +112,19 @@ func (_m *WorkflowArchive) GetWorkflow(uid string, namespace string, name string
var r0 *v1alpha1.Workflow
var r1 error
if rf, ok := ret.Get(0).(func(string, string, string) (*v1alpha1.Workflow, error)); ok {
return rf(uid, namespace, name)
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) (*v1alpha1.Workflow, error)); ok {
return rf(ctx, uid, namespace, name)
}
if rf, ok := ret.Get(0).(func(string, string, string) *v1alpha1.Workflow); ok {
r0 = rf(uid, namespace, name)
if rf, ok := ret.Get(0).(func(context.Context, string, string, string) *v1alpha1.Workflow); ok {
r0 = rf(ctx, uid, namespace, name)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.Workflow)
}
}
if rf, ok := ret.Get(1).(func(string, string, string) error); ok {
r1 = rf(uid, namespace, name)
if rf, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {
r1 = rf(ctx, uid, namespace, name)
} else {
r1 = ret.Error(1)
}
@ -130,9 +132,9 @@ func (_m *WorkflowArchive) GetWorkflow(uid string, namespace string, name string
return r0, r1
}
// GetWorkflowForEstimator provides a mock function with given fields: namespace, requirements
func (_m *WorkflowArchive) GetWorkflowForEstimator(namespace string, requirements []labels.Requirement) (*v1alpha1.Workflow, error) {
ret := _m.Called(namespace, requirements)
// GetWorkflowForEstimator provides a mock function with given fields: ctx, namespace, requirements
func (_m *WorkflowArchive) GetWorkflowForEstimator(ctx context.Context, namespace string, requirements []labels.Requirement) (*v1alpha1.Workflow, error) {
ret := _m.Called(ctx, namespace, requirements)
if len(ret) == 0 {
panic("no return value specified for GetWorkflowForEstimator")
@ -140,19 +142,19 @@ func (_m *WorkflowArchive) GetWorkflowForEstimator(namespace string, requirement
var r0 *v1alpha1.Workflow
var r1 error
if rf, ok := ret.Get(0).(func(string, []labels.Requirement) (*v1alpha1.Workflow, error)); ok {
return rf(namespace, requirements)
if rf, ok := ret.Get(0).(func(context.Context, string, []labels.Requirement) (*v1alpha1.Workflow, error)); ok {
return rf(ctx, namespace, requirements)
}
if rf, ok := ret.Get(0).(func(string, []labels.Requirement) *v1alpha1.Workflow); ok {
r0 = rf(namespace, requirements)
if rf, ok := ret.Get(0).(func(context.Context, string, []labels.Requirement) *v1alpha1.Workflow); ok {
r0 = rf(ctx, namespace, requirements)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.Workflow)
}
}
if rf, ok := ret.Get(1).(func(string, []labels.Requirement) error); ok {
r1 = rf(namespace, requirements)
if rf, ok := ret.Get(1).(func(context.Context, string, []labels.Requirement) error); ok {
r1 = rf(ctx, namespace, requirements)
} else {
r1 = ret.Error(1)
}
@ -178,9 +180,9 @@ func (_m *WorkflowArchive) IsEnabled() bool {
return r0
}
// ListWorkflows provides a mock function with given fields: options
func (_m *WorkflowArchive) ListWorkflows(options utils.ListOptions) (v1alpha1.Workflows, error) {
ret := _m.Called(options)
// ListWorkflows provides a mock function with given fields: ctx, options
func (_m *WorkflowArchive) ListWorkflows(ctx context.Context, options utils.ListOptions) (v1alpha1.Workflows, error) {
ret := _m.Called(ctx, options)
if len(ret) == 0 {
panic("no return value specified for ListWorkflows")
@ -188,19 +190,19 @@ func (_m *WorkflowArchive) ListWorkflows(options utils.ListOptions) (v1alpha1.Wo
var r0 v1alpha1.Workflows
var r1 error
if rf, ok := ret.Get(0).(func(utils.ListOptions) (v1alpha1.Workflows, error)); ok {
return rf(options)
if rf, ok := ret.Get(0).(func(context.Context, utils.ListOptions) (v1alpha1.Workflows, error)); ok {
return rf(ctx, options)
}
if rf, ok := ret.Get(0).(func(utils.ListOptions) v1alpha1.Workflows); ok {
r0 = rf(options)
if rf, ok := ret.Get(0).(func(context.Context, utils.ListOptions) v1alpha1.Workflows); ok {
r0 = rf(ctx, options)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(v1alpha1.Workflows)
}
}
if rf, ok := ret.Get(1).(func(utils.ListOptions) error); ok {
r1 = rf(options)
if rf, ok := ret.Get(1).(func(context.Context, utils.ListOptions) error); ok {
r1 = rf(ctx, options)
} else {
r1 = ret.Error(1)
}
@ -208,9 +210,9 @@ func (_m *WorkflowArchive) ListWorkflows(options utils.ListOptions) (v1alpha1.Wo
return r0, r1
}
// ListWorkflowsLabelKeys provides a mock function with no fields
func (_m *WorkflowArchive) ListWorkflowsLabelKeys() (*v1alpha1.LabelKeys, error) {
ret := _m.Called()
// ListWorkflowsLabelKeys provides a mock function with given fields: ctx
func (_m *WorkflowArchive) ListWorkflowsLabelKeys(ctx context.Context) (*v1alpha1.LabelKeys, error) {
ret := _m.Called(ctx)
if len(ret) == 0 {
panic("no return value specified for ListWorkflowsLabelKeys")
@ -218,19 +220,19 @@ func (_m *WorkflowArchive) ListWorkflowsLabelKeys() (*v1alpha1.LabelKeys, error)
var r0 *v1alpha1.LabelKeys
var r1 error
if rf, ok := ret.Get(0).(func() (*v1alpha1.LabelKeys, error)); ok {
return rf()
if rf, ok := ret.Get(0).(func(context.Context) (*v1alpha1.LabelKeys, error)); ok {
return rf(ctx)
}
if rf, ok := ret.Get(0).(func() *v1alpha1.LabelKeys); ok {
r0 = rf()
if rf, ok := ret.Get(0).(func(context.Context) *v1alpha1.LabelKeys); ok {
r0 = rf(ctx)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.LabelKeys)
}
}
if rf, ok := ret.Get(1).(func() error); ok {
r1 = rf()
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
r1 = rf(ctx)
} else {
r1 = ret.Error(1)
}
@ -238,9 +240,9 @@ func (_m *WorkflowArchive) ListWorkflowsLabelKeys() (*v1alpha1.LabelKeys, error)
return r0, r1
}
// ListWorkflowsLabelValues provides a mock function with given fields: key
func (_m *WorkflowArchive) ListWorkflowsLabelValues(key string) (*v1alpha1.LabelValues, error) {
ret := _m.Called(key)
// ListWorkflowsLabelValues provides a mock function with given fields: ctx, key
func (_m *WorkflowArchive) ListWorkflowsLabelValues(ctx context.Context, key string) (*v1alpha1.LabelValues, error) {
ret := _m.Called(ctx, key)
if len(ret) == 0 {
panic("no return value specified for ListWorkflowsLabelValues")
@ -248,19 +250,19 @@ func (_m *WorkflowArchive) ListWorkflowsLabelValues(key string) (*v1alpha1.Label
var r0 *v1alpha1.LabelValues
var r1 error
if rf, ok := ret.Get(0).(func(string) (*v1alpha1.LabelValues, error)); ok {
return rf(key)
if rf, ok := ret.Get(0).(func(context.Context, string) (*v1alpha1.LabelValues, error)); ok {
return rf(ctx, key)
}
if rf, ok := ret.Get(0).(func(string) *v1alpha1.LabelValues); ok {
r0 = rf(key)
if rf, ok := ret.Get(0).(func(context.Context, string) *v1alpha1.LabelValues); ok {
r0 = rf(ctx, key)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*v1alpha1.LabelValues)
}
}
if rf, ok := ret.Get(1).(func(string) error); ok {
r1 = rf(key)
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
r1 = rf(ctx, key)
} else {
r1 = ret.Error(1)
}

View File

@ -1,6 +1,7 @@
package sqldb
import (
"context"
"fmt"
"time"
@ -18,38 +19,38 @@ func (r *nullWorkflowArchive) IsEnabled() bool {
return false
}
func (r *nullWorkflowArchive) ArchiveWorkflow(*wfv1.Workflow) error {
func (r *nullWorkflowArchive) ArchiveWorkflow(ctx context.Context, wf *wfv1.Workflow) error {
return nil
}
func (r *nullWorkflowArchive) ListWorkflows(options sutils.ListOptions) (wfv1.Workflows, error) {
func (r *nullWorkflowArchive) ListWorkflows(ctx context.Context, options sutils.ListOptions) (wfv1.Workflows, error) {
return wfv1.Workflows{}, nil
}
func (r *nullWorkflowArchive) CountWorkflows(options sutils.ListOptions) (int64, error) {
func (r *nullWorkflowArchive) CountWorkflows(ctx context.Context, options sutils.ListOptions) (int64, error) {
return 0, nil
}
func (r *nullWorkflowArchive) GetWorkflow(string, string, string) (*wfv1.Workflow, error) {
func (r *nullWorkflowArchive) GetWorkflow(ctx context.Context, uid string, namespace string, name string) (*wfv1.Workflow, error) {
return nil, fmt.Errorf("getting archived workflows not supported")
}
func (r *nullWorkflowArchive) GetWorkflowForEstimator(namespace string, requirements []labels.Requirement) (*wfv1.Workflow, error) {
func (r *nullWorkflowArchive) GetWorkflowForEstimator(ctx context.Context, namespace string, requirements []labels.Requirement) (*wfv1.Workflow, error) {
return nil, fmt.Errorf("getting archived workflow for estimator not supported")
}
func (r *nullWorkflowArchive) DeleteWorkflow(string) error {
func (r *nullWorkflowArchive) DeleteWorkflow(ctx context.Context, uid string) error {
return fmt.Errorf("deleting archived workflows not supported")
}
func (r *nullWorkflowArchive) DeleteExpiredWorkflows(time.Duration) error {
func (r *nullWorkflowArchive) DeleteExpiredWorkflows(ctx context.Context, ttl time.Duration) error {
return nil
}
func (r *nullWorkflowArchive) ListWorkflowsLabelKeys() (*wfv1.LabelKeys, error) {
func (r *nullWorkflowArchive) ListWorkflowsLabelKeys(ctx context.Context) (*wfv1.LabelKeys, error) {
return &wfv1.LabelKeys{}, nil
}
func (r *nullWorkflowArchive) ListWorkflowsLabelValues(string) (*wfv1.LabelValues, error) {
func (r *nullWorkflowArchive) ListWorkflowsLabelValues(ctx context.Context, key string) (*wfv1.LabelValues, error) {
return &wfv1.LabelValues{}, nil
}

View File

@ -1,17 +1,18 @@
package sqldb
import (
"context"
"encoding/json"
"fmt"
"hash/fnv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/upper/db/v4"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/env"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
const OffloadNodeStatusDisabled = "Workflow has offloaded nodes, but offloading has been disabled"
@ -22,20 +23,21 @@ type UUIDVersion struct {
}
type OffloadNodeStatusRepo interface {
Save(uid, namespace string, nodes wfv1.Nodes) (string, error)
Get(uid, version string) (wfv1.Nodes, error)
List(namespace string) (map[UUIDVersion]wfv1.Nodes, error)
ListOldOffloads(namespace string) (map[string][]string, error)
Delete(uid, version string) error
Save(ctx context.Context, uid, namespace string, nodes wfv1.Nodes) (string, error)
Get(ctx context.Context, uid, version string) (wfv1.Nodes, error)
List(ctx context.Context, namespace string) (map[UUIDVersion]wfv1.Nodes, error)
ListOldOffloads(ctx context.Context, namespace string) (map[string][]string, error)
Delete(ctx context.Context, uid, version string) error
IsEnabled() bool
}
func NewOffloadNodeStatusRepo(session db.Session, clusterName, tableName string) (OffloadNodeStatusRepo, error) {
func NewOffloadNodeStatusRepo(ctx context.Context, log logging.Logger, session db.Session, clusterName, tableName string) (OffloadNodeStatusRepo, error) {
// this environment variable allows you to make Argo Workflows delete offloaded data more or less aggressively,
// useful for testing
ttl := env.LookupEnvDurationOr("OFFLOAD_NODE_STATUS_TTL", 5*time.Minute)
log.WithField("ttl", ttl).Debug("Node status offloading config")
return &nodeOffloadRepo{session: session, clusterName: clusterName, tableName: tableName, ttl: ttl}, nil
ttl := env.LookupEnvDurationOr(ctx, "OFFLOAD_NODE_STATUS_TTL", 5*time.Minute)
log = log.WithField("ttl", ttl)
log.Debug(ctx, "Node status offloading config")
return &nodeOffloadRepo{session: session, clusterName: clusterName, tableName: tableName, ttl: ttl, log: log}, nil
}
type nodesRecord struct {
@ -51,6 +53,7 @@ type nodeOffloadRepo struct {
tableName string
// time to live - at what ttl an offload becomes old
ttl time.Duration
log logging.Logger
}
func (wdc *nodeOffloadRepo) IsEnabled() bool {
@ -68,7 +71,7 @@ func nodeStatusVersion(s wfv1.Nodes) (string, string, error) {
return string(marshalled), fmt.Sprintf("fnv:%v", h.Sum32()), nil
}
func (wdc *nodeOffloadRepo) Save(uid, namespace string, nodes wfv1.Nodes) (string, error) {
func (wdc *nodeOffloadRepo) Save(ctx context.Context, uid, namespace string, nodes wfv1.Nodes) (string, error) {
marshalled, version, err := nodeStatusVersion(nodes)
if err != nil {
return "", err
@ -84,8 +87,8 @@ func (wdc *nodeOffloadRepo) Save(uid, namespace string, nodes wfv1.Nodes) (strin
Nodes: marshalled,
}
logCtx := log.WithFields(log.Fields{"uid": uid, "version": version})
logCtx.Debug("Offloading nodes")
logCtx := wdc.log.WithFields(logging.Fields{"uid": uid, "version": version})
logCtx.Debug(ctx, "Offloading nodes")
_, err = wdc.session.Collection(wdc.tableName).Insert(record)
if err != nil {
// if we have a duplicate, then it must have the same clustername+uid+version, which MUST mean that we
@ -93,7 +96,7 @@ func (wdc *nodeOffloadRepo) Save(uid, namespace string, nodes wfv1.Nodes) (strin
if !isDuplicateKeyError(err) {
return "", err
}
logCtx.WithField("err", err).Debug("Ignoring duplicate key error")
logCtx.WithField("err", err).Debug(ctx, "Ignoring duplicate key error")
}
// Don't need to clean up the old records here, we have a scheduled cleanup mechanism.
// If we clean them up here, when we update, if there is an update conflict, we will not be able to go back.
@ -112,8 +115,8 @@ func isDuplicateKeyError(err error) bool {
return false
}
func (wdc *nodeOffloadRepo) Get(uid, version string) (wfv1.Nodes, error) {
log.WithFields(log.Fields{"uid": uid, "version": version}).Debug("Getting offloaded nodes")
func (wdc *nodeOffloadRepo) Get(ctx context.Context, uid, version string) (wfv1.Nodes, error) {
wdc.log.WithFields(logging.Fields{"uid": uid, "version": version}).Debug(ctx, "Getting offloaded nodes")
r := &nodesRecord{}
err := wdc.session.SQL().
SelectFrom(wdc.tableName).
@ -132,8 +135,8 @@ func (wdc *nodeOffloadRepo) Get(uid, version string) (wfv1.Nodes, error) {
return *nodes, nil
}
func (wdc *nodeOffloadRepo) List(namespace string) (map[UUIDVersion]wfv1.Nodes, error) {
log.WithFields(log.Fields{"namespace": namespace}).Debug("Listing offloaded nodes")
func (wdc *nodeOffloadRepo) List(ctx context.Context, namespace string) (map[UUIDVersion]wfv1.Nodes, error) {
wdc.log.WithFields(logging.Fields{"namespace": namespace}).Debug(ctx, "Listing offloaded nodes")
var records []nodesRecord
err := wdc.session.SQL().
Select("uid", "version", "nodes").
@ -158,8 +161,8 @@ func (wdc *nodeOffloadRepo) List(namespace string) (map[UUIDVersion]wfv1.Nodes,
return res, nil
}
func (wdc *nodeOffloadRepo) ListOldOffloads(namespace string) (map[string][]string, error) {
log.WithFields(log.Fields{"namespace": namespace}).Debug("Listing old offloaded nodes")
func (wdc *nodeOffloadRepo) ListOldOffloads(ctx context.Context, namespace string) (map[string][]string, error) {
wdc.log.WithFields(logging.Fields{"namespace": namespace}).Debug(ctx, "Listing old offloaded nodes")
var records []UUIDVersion
err := wdc.session.SQL().
Select("uid", "version").
@ -178,15 +181,15 @@ func (wdc *nodeOffloadRepo) ListOldOffloads(namespace string) (map[string][]stri
return x, nil
}
func (wdc *nodeOffloadRepo) Delete(uid, version string) error {
func (wdc *nodeOffloadRepo) Delete(ctx context.Context, uid, version string) error {
if uid == "" {
return fmt.Errorf("invalid uid")
}
if version == "" {
return fmt.Errorf("invalid version")
}
logCtx := log.WithFields(log.Fields{"uid": uid, "version": version})
logCtx.Debug("Deleting offloaded nodes")
logCtx := wdc.log.WithFields(logging.Fields{"uid": uid, "version": version})
logCtx.Debug(ctx, "Deleting offloaded nodes")
rs, err := wdc.session.SQL().
DeleteFrom(wdc.tableName).
Where(db.Cond{"clustername": wdc.clusterName}).
@ -200,7 +203,7 @@ func (wdc *nodeOffloadRepo) Delete(uid, version string) error {
if err != nil {
return err
}
logCtx.WithField("rowsAffected", rowsAffected).Debug("Deleted offloaded nodes")
logCtx.WithField("rowsAffected", rowsAffected).Debug(ctx, "Deleted offloaded nodes")
return nil
}

View File

@ -12,11 +12,26 @@ import (
func BuildArchivedWorkflowSelector(selector db.Selector, tableName, labelTableName string, t sqldb.DBType, options utils.ListOptions, count bool) (db.Selector, error) {
selector = selector.
And(namespaceEqual(options.Namespace)).
And(nameEqual(options.Name)).
And(namePrefixClause(options.NamePrefix)).
And(startedAtFromClause(options.MinStartedAt)).
And(startedAtToClause(options.MaxStartedAt))
if options.Name != "" {
nameFilter := options.NameFilter
if nameFilter == "" {
nameFilter = "Exact"
}
if nameFilter == "Exact" {
selector = selector.And(nameEqual(options.Name))
}
if nameFilter == "Contains" {
selector = selector.And(nameContainsClause(options.Name))
}
if nameFilter == "Prefix" {
selector = selector.And(namePrefixClause(options.Name))
}
}
selector, err := labelsClause(selector, t, options.LabelRequirements, tableName, labelTableName, true)
if err != nil {
return nil, err

Some files were not shown because too many files have changed in this diff Show More