Compare commits

...

No commits in common. "v0.3.1" and "main" have entirely different histories.
v0.3.1 ... main

3596 changed files with 874349 additions and 300241 deletions

View File

@ -1,77 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: argo-cd-ci-
spec:
entrypoint: argo-cd-ci
arguments:
parameters:
- name: revision
value: master
- name: repo
value: https://github.com/argoproj/argo-cd.git
templates:
- name: argo-cd-ci
steps:
- - name: build
template: ci-dind
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- make controller-image
- make server-image
- make repo-server-image
- name: test
template: ci-builder
arguments:
parameters:
- name: cmd
value: "{{item}}"
withItems:
- dep ensure && make lint
- dep ensure && make test test-e2e
- name: ci-builder
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["{{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
- name: ci-dind
inputs:
parameters:
- name: cmd
artifacts:
- name: code
path: /go/src/github.com/argoproj/argo-cd
git:
repo: "{{workflow.parameters.repo}}"
revision: "{{workflow.parameters.revision}}"
container:
image: argoproj/argo-cd-ci-builder:latest
command: [sh, -c]
args: ["until docker ps; do sleep 3; done && {{inputs.parameters.cmd}}"]
workingDir: /go/src/github.com/argoproj/argo-cd
env:
- name: DOCKER_HOST
value: 127.0.0.1
sidecars:
- name: dind
image: docker:17.10-dind
securityContext:
privileged: true
mirrorVolumeMounts: true

2
.clang-format Normal file
View File

@ -0,0 +1,2 @@
# Allow unlimited column length, rather than 80. This prevents word-wrapping comments, which end up in Swagger.
ColumnLimit: 0

17
.codecov.yml Normal file
View File

@ -0,0 +1,17 @@
ignore:
- "**/*.pb.go"
- "**/*.pb.gw.go"
- "**/*generated.go"
- "**/*generated.deepcopy.go"
- "**/*_test.go"
- "pkg/apis/client/.*"
- "pkg/client/.*"
- "vendor/.*"
coverage:
status:
# we've found this not to be useful
patch: off
project:
default:
# allow test coverage to drop by 2%, assume that it's typically due to CI problems
threshold: 2

View File

@ -0,0 +1,24 @@
# TODO: Upgrade to Ubuntu 24.04 when https://bugs.launchpad.net/ubuntu/+source/curl/+bug/2073448 is addressed
FROM mcr.microsoft.com/vscode/devcontainers/base:ubuntu-22.04
# k3d version: https://github.com/k3d-io/k3d/releases
ARG K3D_VERSION=v5.8.3
# TARGETARCH is automatically set by BuildKit to the architecture (e.g. "amd64" or "arm64")
# Docs: https://docs.docker.com/reference/dockerfile/#automatic-platform-args-in-the-global-scope
ARG TARGETARCH
# Use bash to allow us to source hack/k8s-versions.sh
SHELL ["/bin/bash", "-c"]
# install protocol buffer tools
RUN apt-get update && apt-get install -y protobuf-compiler clang-format
# install k3d
RUN wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=${K3D_VERSION} bash
# install kubectl
COPY hack/k8s-versions.sh /tmp/
RUN . /tmp/k8s-versions.sh && \
wget -O /usr/local/bin/kubectl "https://dl.k8s.io/release/${K8S_VERSIONS[min]}/bin/linux/${TARGETARCH}/kubectl" && \
chmod +x /usr/local/bin/kubectl

View File

@ -0,0 +1,24 @@
{
"features": {
"ghcr.io/devcontainers/features/docker-in-docker:2": {
"version": "2.12.0",
"resolved": "ghcr.io/devcontainers/features/docker-in-docker@sha256:5f3e2005aad161ce3ff7700b2603f11935348c039f9166960efd050d69cd3014",
"integrity": "sha256:5f3e2005aad161ce3ff7700b2603f11935348c039f9166960efd050d69cd3014"
},
"ghcr.io/devcontainers/features/go:1": {
"version": "1.3.1",
"resolved": "ghcr.io/devcontainers/features/go@sha256:a485a757492868d4ee3b9dca0b9bb1cbeaef21763e7812a1a804f84720bc5ab5",
"integrity": "sha256:a485a757492868d4ee3b9dca0b9bb1cbeaef21763e7812a1a804f84720bc5ab5"
},
"ghcr.io/devcontainers/features/node:1": {
"version": "1.6.1",
"resolved": "ghcr.io/devcontainers/features/node@sha256:71590121aaf7b2040f3e1e2dfc4bb9a1389277fd5a88a7199094542b82ce5340",
"integrity": "sha256:71590121aaf7b2040f3e1e2dfc4bb9a1389277fd5a88a7199094542b82ce5340"
},
"ghcr.io/devcontainers/features/python:1": {
"version": "1.7.0",
"resolved": "ghcr.io/devcontainers/features/python@sha256:8452f39db0852420728c9f7503dd94b3fc71aa558b5e7c8f6f9ce6687e494ae3",
"integrity": "sha256:8452f39db0852420728c9f7503dd94b3fc71aa558b5e7c8f6f9ce6687e494ae3"
}
}
}

View File

@ -0,0 +1,25 @@
{
// Dev container definition for building the image used by .devcontainer/devcontainer.json.
// Docs: https://containers.dev/guide/prebuild#how-to
"name": "Argo Workflows (builder)",
"build": {
"dockerfile": "Dockerfile",
"context": "../..",
// Override this hardcoded param from the devcontainer CLI because it bloats
// the image and we export the cache to a separate image:
// https://github.com/devcontainers/cli/blob/2fafdcc8a8dee5a922616325f3726043f1ea92c3/src/spec-node/singleContainer.ts#L208
"args": { "BUILDKIT_INLINE_CACHE": "0" }
},
"features": {
"ghcr.io/devcontainers/features/go:1": {
"version": "1.24"
},
"ghcr.io/devcontainers/features/node:1": {
"version": "20"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers/features/python:1": {}
}
}

View File

@ -0,0 +1,56 @@
{
// Dev container definition. Reference: https://containers.dev/implementors/json_reference/
"name": "Argo Workflows (pre-built)",
// This image is built and pushed by .github/workflows/devcontainer.yaml using .devcontainer/builder/devcontainer.json
"image": "quay.io/argoproj/argo-workflows-devcontainer",
"forwardPorts": [9000, 9001, 9090, 2746, 8080, 5556, 6060, 9091, 3306, 5432, 10000, 8000],
"hostRequirements": {
"cpus": 4
},
"runArgs": [
"--add-host=host.docker.internal:host-gateway",
"--add-host=dex:127.0.0.1",
"--add-host=minio:127.0.0.1",
"--add-host=postgres:127.0.0.1",
"--add-host=mysql:127.0.0.1",
"--add-host=azurite:127.0.0.1"
],
"postCreateCommand": ".devcontainer/pre-build.sh",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/vscode/go/src/github.com/argoproj/argo-workflows,type=bind",
"workspaceFolder": "/home/vscode/go/src/github.com/argoproj/argo-workflows",
"remoteEnv": {
"PATH": "${containerEnv:PATH}:/home/vscode/go/bin",
"GOPATH": "/home/vscode/go"
},
"customizations": {
"codespaces": {
"openFiles": [
"docs/running-locally.md"
]
},
"vscode": {
"settings": {
"launch": {
"configurations": [
{
"name": "Attach to argo server",
"type": "go",
"request": "attach",
"mode": "local",
"processId": "argo"
},
{
"name": "Attach to workflow controller",
"type": "go",
"request": "attach",
"mode": "local",
"processId": "workflow-controller"
}
]
}
}
}
}
}

16
.devcontainer/pre-build.sh Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -eux
# create cluster using the minimum tested Kubernetes version
. hack/k8s-versions.sh
k3d cluster get k3s-default || k3d cluster create --image "rancher/k3s:${K8S_VERSIONS[min]}-k3s1" --wait
k3d kubeconfig merge --kubeconfig-merge-default
kubectl cluster-info
# Make sure go path is owned by vscode
sudo chown vscode:vscode /home/vscode/go || true
sudo chown vscode:vscode /home/vscode/go/src || true
sudo chown vscode:vscode /home/vscode/go/src/github.com || true
# Patch CoreDNS to have host.docker.internal inside the cluster available
kubectl get cm coredns -n kube-system -o yaml | sed "s/ NodeHosts: |/ NodeHosts: |\n `grep host.docker.internal /etc/hosts`/" | kubectl apply -f -

View File

@ -1,4 +1,28 @@
# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from
# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from
# a user's workspace, and are only building off of what is locked by dep.
vendor
*.iml
*.md
*.yaml
.devcontainer
# The .git folder gets modified every time git runs preventing caching of builds. Once removed, we must pass GIT_COMMIT as Docker build-args.
.git
.github
.idea
.run
assets
community
coverage.out
dist
docs
examples
logs
manifests
plugins
sdks
site
tasks.yaml
test/e2e
ui/dist
ui/node_modules
v3
vendor

16
.features/TEMPLATE.md Normal file
View File

@ -0,0 +1,16 @@
<!-- Required: All of these fields are required, including at least one issue -->
Description: <!-- A brief one line description of the feature -->
Author: <!-- Author name and GitHub link in markdown format e.g. [Alan Clucas](https://github.com/Joibel) -->
Component: <!-- component name here, see hack/featuregen/components.go for the list -->
Issues: <!-- Space separated list of issues 1234 5678 -->
<!--
Optional
Additional details about the feature written in markdown, aimed at users who want to learn about it
* Explain when you would want to use the feature
* Include code examples if applicable
* Provide working examples
* Format code using back-ticks
* Use Kubernetes style
* One sentence per line of markdown
-->

View File

@ -0,0 +1,12 @@
Component: General
Issues: 14069
Description: Name filter parameter for prefix/contains/exact search in `/archived-workflows`
Author: [Armin Friedl](https://github.com/arminfriedl)
A new `nameFilter` parameter was added to the `GET
/archived-workflows` endpoint. The filter works analogous to the one
in `GET /workflows`. It allows to specify how a search for
`?listOptions.fieldSelector=metadata.name=<search-string>` in these
endpoints should be interpreted. Possible values are `Prefix`,
`Contains` and `Exact`. The `metadata.name` field is matched
accordingly against the value for `<search-string>`.

View File

@ -0,0 +1,9 @@
Component: General
Issues: 11120
Description: This migrates most of the logging off logrus and onto a custom logger.
Author: [Isitha Subasinghe](https://github.com/isubasinghe)
Currently it is quite hard to identify log lines with it's corresponding workflow.
This change propagates a context object down the call hierarchy containing an
annotated logging object. This allows context aware logging from deep within the
codebase.

View File

@ -0,0 +1,6 @@
Component: Build and Development
Issues: 14155
Description: Document features as they are created
Author: [Alan Clucas](https://github.com/Joibel)
To assist with creating release documentation and blog postings, all features now require a document in .features/pending explaining what they do for users.

View File

@ -0,0 +1,14 @@
Component: UI
Issues: 13114
Description: Support open custom links in new tab automatically.
Author: [Shuangkun Tian](https://github.com/shuangkun)
Support configuring a custom link to open in a new tab by default.
If target == _blank, open in new tab, if target is null or _self, open in this tab. For example:
```
- name: Pod Link
scope: pod
target: _blank
url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
```

7
.gitattributes vendored Normal file
View File

@ -0,0 +1,7 @@
sdks/python/client/** linguist-generated
sdks/java/client/** linguist-generated
manifests/base/crds/*/argoproj.io*.yaml linguist-generated
manifests/quick-start-*.yaml linguist-generated
api/jsonschema/schema.json linguist-generated
api/openapi-spec/swagger.json linguist-generated
pkg/client/** linguist-generated

61
.github/ISSUE_TEMPLATE/bug_report.yaml vendored Normal file
View File

@ -0,0 +1,61 @@
name: Reproducible bug report
description: Create a reproducible bug report. Not for support requests.
type: Bug
body:
- type: checkboxes
id: terms
attributes:
label: Pre-requisites
options:
- label: I have double-checked my configuration
required: true
- label: I have tested with the `:latest` image tag (i.e. `quay.io/argoproj/workflow-controller:latest`) and can confirm the issue still exists on `:latest`. If not, I have explained why, **in detail**, in my description below.
required: true
- label: I have searched existing issues and could not find a match for this bug
required: true
- label: I'd like to contribute the fix myself (see [contributing guide](https://github.com/argoproj/argo-workflows/blob/main/docs/CONTRIBUTING.md))
- type: textarea
id: description
attributes:
label: What happened? What did you expect to happen?
validations:
required: true
- type: input
id: version
attributes:
label: Version(s)
description: What versions did you experience this on? Please provide all versions. When testing `:latest`, please provide a SHA
placeholder: v3.5.7, v3.5.8, 3ece3b30f0c445204fec468fd437e77283cab913
validations:
required: true
- type: textarea
id: failing-workflow
attributes:
label: Paste a minimal workflow that reproduces the issue. We must be able to run the workflow; don't enter a workflow that uses private images.
description: A [minimal reproduction](https://stackoverflow.com/help/minimal-reproducible-example) is essential to debugging and prioritizing your issue
render: YAML
validations:
required: true
- type: textarea
id: controller-logs
attributes:
label: Logs from the workflow controller
render: text
value: kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
validations:
required: true
- type: textarea
id: wait-logs
attributes:
label: Logs from in your workflow's wait container
render: text
value: kubectl logs -n argo -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
validations:
required: true
- type: markdown
attributes:
value: |
Thanks for submitting this issue! Are you a contributor? If not, have you thought about it?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.

12
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,12 @@
blank_issues_enabled: false
contact_links:
- name: Have you read the docs?
url: https://argo-workflows.readthedocs.io/en/latest/
about: Much help can be found in the docs
- name: Ask a question
url: https://github.com/argoproj/argo-workflows/discussions/new
about: Ask a question or start a discussion about workflows
- name: Chat on Slack
url: https://argoproj.github.io/community/join-slack
about: Maybe chatting with the community can help

View File

@ -0,0 +1,27 @@
---
name: Feature
about: Propose a feature for this project
type: Feature
---
# Summary
What change needs making?
## Use Cases
When would you use this?
---
<!-- Issue Author: Don't delete this message to encourage other users to support your issue! -->
**Message from the maintainers**:
Love this feature request? Give it a 👍. We prioritise the proposals with the most 👍.
<!--
**Beyond this issue**:
Are you a contributor? If not, have you thought about it?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.
-->

64
.github/ISSUE_TEMPLATE/regression.yaml vendored Normal file
View File

@ -0,0 +1,64 @@
name: Regression report
description: Create a regression report. Not for support requests.
type: Bug
labels: [ type/regression ]
body:
- type: checkboxes
id: terms
attributes:
label: Pre-requisites
options:
- label: I have double-checked my configuration
required: true
- label: I have tested with the `:latest` image tag (i.e. `quay.io/argoproj/workflow-controller:latest`) and can confirm the issue still exists on `:latest`. If not, I have explained why, **in detail**, in my description below.
required: true
- label: I have searched existing issues and could not find a match for this bug
required: true
- label: I'd like to contribute the fix myself (see [contributing guide](https://github.com/argoproj/argo-workflows/blob/main/docs/CONTRIBUTING.md))
- type: textarea
id: description
attributes:
label: What happened? What did you expect to happen?
validations:
required: true
- type: input
id: version
attributes:
label: Version(s)
description: What versions did you experience this on? Please provide all versions. When testing `:latest`, please provide a SHA
placeholder: v3.5.7, v3.5.8, 3ece3b30f0c445204fec468fd437e77283cab913
validations:
required: true
- type: textarea
id: failing-workflow
attributes:
label: Paste a minimal workflow that reproduces the issue. We must be able to run the workflow; don't enter a workflows that uses private images.
description: A [minimal reproduction](https://stackoverflow.com/help/minimal-reproducible-example) is essential to debugging and prioritizing your issue
render: YAML
validations:
required: true
- type: textarea
id: controller-logs
attributes:
label: Logs from the workflow controller
render: text
value: kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
validations:
required: true
- type: textarea
id: wait-logs
attributes:
label: Logs from in your workflow's wait container
render: text
value: kubectl logs -n argo -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
validations:
required: true
- type: markdown
attributes:
value: |
Thanks for submitting this issue! Are you a contributor? If not, have you thought about it?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.

2
.github/cherry-pick-bot.yml vendored Normal file
View File

@ -0,0 +1,2 @@
enabled: true
preservePullRequestTitle: true

87
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,87 @@
version: 2
updates:
# prod dependencies
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
day: "saturday"
ignore:
- dependency-name: k8s.io/*
- dependency-name: github.com/grpc-ecosystem/*
- dependency-name: google.golang.org/grpc
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- go
commit-message:
prefix: chore(deps)
- package-ecosystem: "npm"
directory: "/ui"
schedule:
interval: "weekly"
day: "saturday"
# split devDeps and prod deps as typically only prod deps need security backports
groups:
devDeps:
applies-to: security-updates
dependency-type: "development"
deps:
applies-to: security-updates
dependency-type: "production"
ignore:
- dependency-name: raw-loader
- dependency-name: style-loader
- dependency-name: react-router-dom
- dependency-name: "@types/react-router-dom"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- javascript
commit-message:
prefix: chore(deps)
prefix-development: chore(deps-dev)
# build / CI dependencies
- package-ecosystem: "pip"
directory: "/docs"
schedule:
interval: "weekly"
day: "saturday"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- python
commit-message:
prefix: chore(deps)
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
day: "saturday"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- github_actions
commit-message:
prefix: chore(deps-dev)
# Docs: https://containers.dev/guide/dependabot
- package-ecosystem: "devcontainers"
directory: "/"
schedule:
interval: "weekly"
day: "saturday"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- devcontainer
commit-message:
prefix: chore(deps-dev)

52
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,52 @@
<!-- markdownlint-disable MD041 -- this is rendered within existing HTML, so allow starting without an H1 -->
<!--
### Before you open your PR
- Run `make pre-commit -B` to fix codegen and lint problems (build will fail).
- [Signed-off your commits](https://github.com/apps/dco/) (otherwise the DCO check will fail).
- Used [a conventional commit message](https://www.conventionalcommits.org/en/v1.0.0/).
### When you open your PR
- PR title format should also conform to [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/).
- "Fixes #" is in both the PR title (for release notes) and this description (to automatically link and close the issue).
- Create the PR as draft.
- Once builds are green, mark your PR "Ready for review".
When changes are requested, please address them and then dismiss the review to get it reviewed again.
-->
<!-- Does this PR fix an issue -->
Fixes #TODO
### Motivation
<!-- TODO: Say why you made your changes. -->
### Modifications
<!-- TODO: Say what changes you made. -->
<!-- TODO: Attach screenshots if you changed the UI. -->
### Verification
<!-- TODO: Say how you tested your changes. -->
### Documentation
<!-- TODO: Say how you have updated the documentation or explain why this isn't needed here -->
<!-- Required for features: Explain how the user will discover this feature through documentation and examples -->
<!--
### Beyond this PR
Thank you for submitting this! Have you ever thought of becoming a Reviewer or Approver on the project?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.
-->

34
.github/workflows/changelog.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Changelog
on:
push:
tags:
- v*
- "!v0.0.0"
permissions:
contents: read
jobs:
generate_changelog:
name: Generate changelog
if: github.repository == 'argoproj/argo-workflows'
permissions:
contents: write # for peter-evans/create-pull-request to create branch
pull-requests: write # for peter-evans/create-pull-request to create a PR
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
ref: main
fetch-depth: 0
- run: git fetch --prune --prune-tags
- run: git tag -l 'v*'
# avoid invoking `make` to reduce the risk of a Makefile bug failing this workflow
- run: ./hack/changelog.sh > CHANGELOG.md
- uses: peter-evans/create-pull-request@a4f52f8033a6168103c2538976c07b467e8163bc # v6.0.1
with:
title: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
commit-message: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
branch: create-pull-request/changelog
signoff: true

521
.github/workflows/ci-build.yaml vendored Normal file
View File

@ -0,0 +1,521 @@
name: CI
on:
push:
branches:
- "main"
- "release-*"
- "!release-2.8"
pull_request:
branches:
- "main"
- "release-*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
changed-files:
name: Get changed files
outputs:
# reference: https://github.com/tj-actions/changed-files#outputs-
tests: ${{ steps.changed-files.outputs.tests_any_modified == 'true' }}
e2e-tests: ${{ steps.changed-files.outputs.e2e-tests_any_modified == 'true' }}
codegen: ${{ steps.changed-files.outputs.codegen_any_modified == 'true' }}
lint: ${{ steps.changed-files.outputs.lint_any_modified == 'true' }}
ui: ${{ steps.changed-files.outputs.ui_any_modified == 'true' }}
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
fetch-depth: 50 # assume PRs are less than 50 commits
- name: Get relevant files changed per group
id: changed-files
uses: tj-actions/changed-files@cbda684547adc8c052d50711417fa61b428a9f88 # v41.1.2
with:
files_yaml: |
common: &common
- .github/workflows/ci-build.yaml
- Makefile
- tasks.yaml
tests: &tests
- *common
- cmd/**
- config/**
- errors/**
- persist/**
- pkg/**
- server/**
- test/**
- util/**
- workflow/**
- go.mod
- go.sum
e2e-tests:
- *tests
# plus manifests and SDKs that are used in E2E tests
- Dockerfile
- manifests/**
- sdks/**
# example test suite
- examples/**
codegen:
- *common
# generated files
- api/**
- docs/fields.md
- docs/executor_swagger.md
- docs/cli/**
- pkg/**
- sdks/java/**
- sdks/python/**
# files that generation is based off
- pkg/**
- cmd/**
- examples/** # examples are used within the fields lists
- manifests/** # a few of these are generated and committed
# generation scripts
- hack/api/**
- hack/docs/**
- hack/manifests/**
- .clang-format
lint:
- *tests
- .features/**
# plus lint config
- .golangci.yml
# all GH workflows / actions
- .github/workflows/**
# docs files below
- docs/**
# generated files are covered by codegen
- '!docs/fields.md'
- '!docs/executor_swagger.md'
- '!docs/cli/**'
# proposals live only on GH as pure markdown
- '!docs/proposals/**'
# docs scripts & tools from `make docs`
- hack/docs/copy-readme.sh
- hack/docs/check-env-doc.sh
- hack/featuregen/**
- .markdownlint.yaml
- .mlc_config.json
- .spelling
- mkdocs.yml
ui:
- *common
- ui/**
tests:
name: Unit Tests
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.tests == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 20
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- run: make test STATIC_FILES=false GOTEST='go test -p 20 -covermode=atomic -coverprofile=coverage.out'
- name: Upload coverage report
# engineers just ignore this in PRs, so lets not even run it
if: github.ref == 'refs/heads/main'
uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed # v4.3.0
with:
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
tests-windows:
name: Windows Unit Tests
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.tests == 'true' }}
runs-on: windows-2022
timeout-minutes: 20
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
# windows run does not use makefile target because it does a lot more than just testing and is not cross-platform compatible
- run: if (!(Test-Path "ui/dist/app/index.html")) { New-Item -ItemType Directory -Force -Path "ui/dist/app" | Out-Null; New-Item -ItemType File -Path "ui/dist/app/placeholder" | Out-Null }; go test -p 20 -covermode=atomic -coverprofile='coverage.out' $(go list ./... | select-string -Pattern 'github.com/argoproj/argo-workflows/v3/workflow/controller' , 'github.com/argoproj/argo-workflows/v3/server' -NotMatch)
env:
KUBECONFIG: /dev/null
- name: Upload coverage report
# engineers just ignore this in PRs, so lets not even run it
if: github.ref == 'refs/heads/main'
uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed # v4.3.0
with:
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
build-binaries:
name: Build Binaries
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.e2e-tests == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
include:
- target: controller
- target: cli
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Build
run: make ${{matrix.target}}
- name: Upload
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with:
name: ${{matrix.target}}
path: dist
if-no-files-found: error
argo-images:
name: argo-images
# needs: [ lint ]
runs-on: ubuntu-24.04
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
include:
- image: argoexec
- image: argocli
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
- name: Build and export
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0
with:
context: .
tags: quay.io/argoproj/${{matrix.image}}:latest
outputs: type=docker,dest=/tmp/${{matrix.image}}_image.tar
target: ${{matrix.image}}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Upload
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with:
name: ${{matrix.image}}_image.tar
path: /tmp/${{matrix.image}}_image.tar
if-no-files-found: error
e2e-tests:
name: E2E Tests
needs: [ changed-files, argo-images, build-binaries ]
if: ${{ needs.changed-files.outputs.e2e-tests == 'true' }}
runs-on: ubuntu-24.04
# These tests usually finish in ~25m, but occasionally they take much longer due to resource
# contention on the runner, which we have no control over.
timeout-minutes: 60
env:
KUBECONFIG: /home/runner/.kubeconfig
E2E_ENV_FACTOR: 2
strategy:
fail-fast: false
matrix:
include:
- test: test-executor
profile: minimal
use-api: false
- test: test-corefunctional
profile: minimal
use-api: false
- test: test-functional
profile: minimal
use-api: false
- test: test-api
profile: mysql
use-api: true
- test: test-api
profile: postgres
use-api: true
- test: test-cli
profile: mysql
use-api: true
- test: test-cron
profile: minimal
use-api: false
- test: test-examples
profile: minimal
use-api: false
- test: test-plugins
profile: plugins
use-api: false
- test: test-java-sdk
profile: minimal
use-api: true
- test: test-python-sdk
profile: minimal
use-api: true
- test: test-executor
k8s_version: min
profile: minimal
use-api: false
- test: test-corefunctional
k8s_version: min
profile: minimal
use-api: false
- test: test-functional
k8s_version: min
profile: minimal
use-api: false
- test: test-dbsemaphore
k8s_version: min
profile: mysql
use-api: false
- test: test-dbsemaphore
k8s_version: min
profile: postgres
use-api: false
steps:
- name: Free up unused disk space
run: |
printf "==> Available space before cleanup\n"
df -h
# these directories are not used by E2E tests
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /usr/local/.ghcup /opt/hostedtoolcache/CodeQL
printf "==> Available space after cleanup\n"
df -h
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Install Java for the SDK
if: ${{matrix.test == 'test-java-sdk'}}
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0
with:
java-version: '8'
distribution: adopt
cache: maven
- name: Install Python for the SDK
if: ${{matrix.test == 'test-python-sdk'}}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: '3.x'
cache: pip
- name: Install socat (needed by Kubernetes) and kit
# socat is needed for "kubectl port-forward" to work when using cri-dockerd: https://github.com/k3s-io/cri-dockerd/blob/4995f339edcffdf890406b3f1477d34e38477f18/streaming/streaming_others.go#L46
# Both cri-o and containerd removed it as a dependency awhile ago, but that hasn't been ported to cri-dockerd.
# Running "make kit" isn't strictly necessary, since it would be installed automatically by "make start",
# but it's noisy and makes the logs for "Start controller/API" hard to follow.
run: sudo apt-get -y install socat && make kit
- name: Install and start K3S
env:
K8S_VERSION: ${{ matrix.k8s_version || 'max' }}
run: |
. hack/k8s-versions.sh
export INSTALL_K3S_VERSION="${K8S_VERSIONS[$K8S_VERSION]}+k3s1"
curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=stable \
INSTALL_K3S_EXEC="--docker --kubelet-arg=config=${GITHUB_WORKSPACE}/test/e2e/manifests/kubelet-configuration.yaml" \
K3S_KUBECONFIG_MODE=644 \
sh -
until kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml cluster-info ; do sleep 10s ; done
cp /etc/rancher/k3s/k3s.yaml /home/runner/.kubeconfig
echo "- name: fake_token_user" >> $KUBECONFIG
echo " user:" >> $KUBECONFIG
echo " token: xxxxxx" >> $KUBECONFIG
until kubectl cluster-info ; do sleep 10s ; done
- name: Download images
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: '*_image.tar'
path: /tmp
- name: Load images
run: |
set -eux
docker load < /tmp/argoexec_image.tar/argoexec_image.tar
docker load < /tmp/argocli_image.tar/argocli_image.tar
- name: Download controller
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: controller
path: dist/
- name: Download CLI
if: ${{matrix.use-api}}
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: cli
path: dist/
- name: Prepare binaries
run: chmod +x dist/* && make --touch dist/*
- name: Set-up /etc/hosts
run: |
echo '127.0.0.1 dex' | sudo tee -a /etc/hosts
echo '127.0.0.1 minio' | sudo tee -a /etc/hosts
echo '127.0.0.1 postgres' | sudo tee -a /etc/hosts
echo '127.0.0.1 mysql' | sudo tee -a /etc/hosts
echo '127.0.0.1 azurite' | sudo tee -a /etc/hosts
- name: Start controller/API
run: |
make start PROFILE=${{matrix.profile}} \
AUTH_MODE=client STATIC_FILES=false \
LOG_LEVEL=info \
API=${{matrix.use-api}} \
UI=false \
POD_STATUS_CAPTURE_FINALIZER=true 2>&1 | tee /tmp/argo.log &
make wait PROFILE=${{matrix.profile}} API=${{matrix.use-api}}
timeout-minutes: 5
- name: Validate release manifests
run: make manifests-validate
- name: Run tests ${{matrix.test}}
run: make ${{matrix.test}} E2E_SUITE_TIMEOUT=20m STATIC_FILES=false
# failure debugging below
- name: Failure debug - k3s logs
if: ${{ failure() }}
run: journalctl -u k3s
- name: Failure debug - describe MinIO/MySQL deployment
if: ${{ failure() }}
run: |
set -eux
kubectl get deploy
kubectl describe deploy
- name: Failure debug - describe MinIO/MySQL pods
if: ${{ failure() }}
run: |
set -eux
kubectl get pods -l '!workflows.argoproj.io/workflow'
kubectl describe pods -l '!workflows.argoproj.io/workflow'
- name: Failure debug - MinIO/MySQL logs
if: ${{ failure() }}
run: kubectl logs -l '!workflows.argoproj.io/workflow' --prefix
- name: Failure debug - Controller/API logs
if: ${{ failure() }}
run: |
[ -e /tmp/argo.log ] && cat /tmp/argo.log
- if: ${{ failure() }}
name: Failure debug - describe Workflows
run: |
set -eux
kubectl get wf
kubectl describe wf
- name: Failure debug - describe Workflow pods
if: ${{ failure() }}
run: |
set -eux
kubectl get pods -l workflows.argoproj.io/workflow
kubectl describe pods -l workflows.argoproj.io/workflow
- name: Failure debug - Workflow Pod logs
if: ${{ failure() }}
run: kubectl logs --all-containers -l workflows.argoproj.io/workflow --prefix
# workaround for status checks -- check this one job instead of each individual E2E job in the matrix
# this allows us to skip the entire matrix when it doesn't need to run while still having accurate status checks
# see https://github.com/orgs/community/discussions/9141#discussioncomment-2296809 and https://github.com/orgs/community/discussions/26822#discussioncomment-3305794
e2e-tests-composite-result:
name: E2E Tests - Composite result
needs: [ e2e-tests ]
if: ${{ always() }}
runs-on: ubuntu-24.04
steps:
- run: |
result="${{ needs.e2e-tests.result }}"
# mark as successful even if skipped
if [[ $result == "success" || $result == "skipped" ]]; then
exit 0
else
exit 1
fi
codegen:
name: Codegen
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.codegen == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 20
env:
GOPATH: /home/runner/go
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Install protoc
run: |
set -eux -o pipefail
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.19.4/protoc-3.19.4-linux-x86_64.zip
sudo unzip -o protoc-3.19.4-linux-x86_64.zip -d /usr/local bin/protoc
sudo unzip -o protoc-3.19.4-linux-x86_64.zip -d /usr/local 'include/*'
sudo chmod +x /usr/local/bin/protoc
sudo find /usr/local/include -type f | xargs sudo chmod a+r
sudo find /usr/local/include -type d | xargs sudo chmod a+rx
ls /usr/local/include/google/protobuf/
- name: Pull OpenAPI Generator CLI Docker image
run: |
docker pull openapitools/openapi-generator-cli:v5.4.0 &
docker pull openapitools/openapi-generator-cli:v5.2.1 &
- name: Create symlinks
run: |
mkdir -p /home/runner/go/src/github.com/argoproj
ln -s "$PWD" /home/runner/go/src/github.com/argoproj/argo-workflows
- run: make codegen -B STATIC_FILES=false
# if codegen makes changes that are not in the PR, fail the build
- name: Check if codegen made changes not present in the PR
run: git diff --exit-code
lint:
name: Lint
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.lint == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 15 # must be strictly greater than the timeout in .golangci.yml
env:
GOPATH: /home/runner/go
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- run: make lint STATIC_FILES=false
# if lint makes changes that are not in the PR, fail the build
- name: Check if lint made changes not present in the PR
run: git diff --exit-code
# lint GH Actions
- name: Ensure GH Actions are pinned to SHAs
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # v3.0.3
ui:
name: UI
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.ui == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 6
env:
NODE_OPTIONS: --max-old-space-size=4096
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "20" # change in all GH Workflows
cache: yarn
cache-dependency-path: ui/yarn.lock
- run: yarn --cwd ui install
- run: yarn --cwd ui build
- run: yarn --cwd ui test
- run: yarn --cwd ui lint
- run: yarn --cwd ui deduplicate
# if lint or deduplicate make changes that are not in the PR, fail the build
- name: Check if lint & deduplicate made changes not present in the PR
run: git diff --exit-code
# check to see if it'll start (but not if it'll render)
- run: yarn --cwd ui start &
- run: until curl http://localhost:8080 > /dev/null ; do sleep 10s ; done
timeout-minutes: 1

View File

@ -0,0 +1,30 @@
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions
name: Approve and enable auto-merge for dependabot
on: pull_request
permissions:
contents: read
jobs:
review:
if: ${{ github.actor == 'dependabot[bot]' && github.repository == 'argoproj/argo-workflows'}}
permissions:
pull-requests: write # for approving a PR
contents: write # for enabling auto-merge on a PR
runs-on: ubuntu-24.04
steps:
- name: Dependabot metadata
id: metadata
uses: dependabot/fetch-metadata@c9c4182bf1b97f5224aee3906fd373f6b61b4526 # v1.6.0
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
- name: Approve PR
run: gh pr review --approve "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
- name: Enable auto-merge for Dependabot PRs
run: gh pr merge --auto --squash "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}

50
.github/workflows/devcontainer.yaml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Dev Container
on:
push:
paths:
- ".devcontainer/**"
- "hack/k8s-versions.sh"
- ".github/workflows/devcontainer.yaml"
branches:
- main
pull_request:
paths:
- ".devcontainer/**"
- "hack/k8s-versions.sh"
- ".github/workflows/devcontainer.yaml"
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
packages: write
jobs:
build:
runs-on: ubuntu-22.04
if: github.repository == 'argoproj/argo-workflows'
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
with:
# Workaround for segfaults under arm64:
# https://github.com/docker/setup-qemu-action/issues/198#issuecomment-2653791775
image: tonistiigi/binfmt:qemu-v7.0.0-28
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
- name: Login to registry
if: ${{ github.event_name == 'push' }}
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Build dev container image
run: make devcontainer-build
env:
TARGET_PLATFORM: linux/amd64,linux/arm64
DEVCONTAINER_PUSH: ${{ github.event_name == 'push' && 'true' || 'false' }}

48
.github/workflows/docs.yaml vendored Normal file
View File

@ -0,0 +1,48 @@
name: Docs
on:
push:
branches:
- main
pull_request:
branches:
- main
- release/*
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
docs:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: 3.9
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "19"
# This is mainly here so the dependencies get saved to the cache by "actions/setup-go"
- name: Download go dependencies
run: go mod download
# Use the same make target both locally and on CI to make it easier to debug failures.
- name: Build & Lint docs
run: make docs
# If linters auto-fix issues, files will be changed. If so, fail the build.
- name: Check if linters made changes
run: git diff --exit-code
# Upload the site so reviewers see it.
- name: Upload Docs Site
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with:
name: docs
path: site
if-no-files-found: error

73
.github/workflows/pr.yaml vendored Normal file
View File

@ -0,0 +1,73 @@
name: PR
on:
pull_request_target:
types:
- opened
- edited
- reopened
- synchronize
permissions:
contents: read
jobs:
title-check:
runs-on: ubuntu-24.04
outputs:
type: ${{ steps.semantic-pr-check.outputs.type }}
steps:
- name: Check PR Title's semantic conformance
id: semantic-pr-check
uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # v5.5.3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
feature-pr-handling:
needs: title-check
runs-on: ubuntu-24.04
if: needs.title-check.outputs.type == 'feat'
env:
PR_HEAD: ${{ github.event.pull_request.head.sha }}
steps:
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
fetch-depth: 50
- name: Ensure ./.features/pending/*.md addition(s)
id: changed-files
uses: tj-actions/changed-files@cbda684547adc8c052d50711417fa61b428a9f88 # v41.1.2
with:
files: |
.features/pending/*.md
- name: No ./.features/*.md addition
if: steps.changed-files.outputs.added_files_count == 0
run: |
echo "No feature description was added to the ./.features/ directory for this feature PR."
echo "Please add a .md file to the ./.features/ directory."
echo "See docs/running-locally.md for more details."
false
- name: Validate ./.features/*.md changes
if: steps.changed-files.outputs.added_files_count > 0
run: |
echo "A feature description was added to the ./.features/ directory."
make features-validate \
|| { echo "New ./.features/*.md file failed validation."; exit 1; }
# In order to validate any links in the yaml file, render the config to markdown
- name: Render .features/*.md feature descriptions
run: make features-preview > features_preview.md
- name: Link Checker
id: lychee
uses: lycheeverse/lychee-action@f613c4a64e50d792e0b31ec34bbcbba12263c6a6 # f613c4a64e50d792e0b31ec34bbcbba12263c6a6
with:
args: "--verbose --no-progress ./features_preview.md"
failIfEmpty: false

364
.github/workflows/release.yaml vendored Normal file
View File

@ -0,0 +1,364 @@
name: Release
on:
push:
tags:
- v*
branches:
- main
- dev-*
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
permissions:
contents: read
jobs:
build-linux:
name: Build & push linux
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
strategy:
matrix:
platform: [ linux/amd64, linux/arm64 ]
target: [ workflow-controller, argocli, argoexec, argoexec-nonroot ]
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
with:
version: v0.10.4
- name: Cache Docker layers
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
id: cache
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx-
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Docker Buildx
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
run: |
set -eux
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
# copied verbatim from Makefile
GIT_COMMIT=$(git rev-parse HEAD || echo unknown)
GIT_TAG=$(git describe --exact-match --tags --abbrev=0 2> /dev/null || echo untagged)
GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
tag_suffix=$(echo $PLATFORM | sed -r "s/\//-/g")
# Special handling for argoexec-nonroot to create argoexec:tag-nonroot-platform instead
if [ "$TARGET" = "argoexec-nonroot" ]; then
image_name="${DOCKERIO_ORG}/argoexec:${tag}-nonroot-${tag_suffix}"
else
image_name="${DOCKERIO_ORG}/${TARGET}:${tag}-${tag_suffix}"
fi
docker buildx build \
--cache-from "type=local,src=/tmp/.buildx-cache" \
--cache-to "type=local,dest=/tmp/.buildx-cache" \
--output "type=image,push=true" \
--build-arg GIT_COMMIT=$GIT_COMMIT \
--build-arg GIT_TAG=$GIT_TAG \
--build-arg GIT_TREE_STATE=$GIT_TREE_STATE \
--platform="${PLATFORM}" \
--target $TARGET \
--provenance=false \
--tag quay.io/$image_name .
build-windows:
name: Build & push windows
if: github.repository == 'argoproj/argo-workflows'
runs-on: windows-2022
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Build & Push Windows Docker Images
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
run: |
docker_org=$DOCKERIO_ORG
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
targets="argoexec"
for target in $targets; do
image_name="${docker_org}/${target}:${tag}-windows"
docker build \
--build-arg GIT_COMMIT=$tag \
--build-arg GIT_BRANCH=$branch \
--build-arg GIT_TREE_STATE=$tree_state \
--target $target \
-t $image_name \
-f Dockerfile.windows \
.
docker tag $image_name quay.io/$image_name
docker push quay.io/$image_name
done
push-images:
name: Push manifest with all images
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
needs: [ build-linux, build-windows ]
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Install cosign
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
with:
cosign-release: 'v2.2.3'
- name: Push Multiarch Image
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
run: |
: ${DOCKER_CONFIG:=~/.docker}
echo $(jq -c '. + { "experimental": "enabled" }' ${DOCKER_CONFIG}/config.json) > ${DOCKER_CONFIG}/config.json
docker_org=$DOCKERIO_ORG
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
targets="workflow-controller argoexec argoexec-nonroot argocli"
for target in $targets; do
if [ "$target" = "argoexec-nonroot" ]; then
# Special handling for argoexec-nonroot: create argoexec:tag-nonroot manifest
image_name="${docker_org}/argoexec:${tag}-nonroot"
docker manifest create quay.io/$image_name quay.io/${docker_org}/argoexec:${tag}-nonroot-linux-arm64 quay.io/${docker_org}/argoexec:${tag}-nonroot-linux-amd64
elif [ "$target" = "argoexec" ]; then
image_name="${docker_org}/${target}:${tag}"
docker manifest create quay.io/$image_name quay.io/${image_name}-linux-arm64 quay.io/${image_name}-linux-amd64 quay.io/${image_name}-windows
else
image_name="${docker_org}/${target}:${tag}"
docker manifest create quay.io/$image_name quay.io/${image_name}-linux-arm64 quay.io/${image_name}-linux-amd64
fi
docker manifest push quay.io/$image_name
cosign sign -y --key env://COSIGN_PRIVATE_KEY quay.io/$image_name
done
test-images-linux-amd64:
name: Try pulling linux/amd64
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
needs: [ push-images ]
strategy:
matrix:
platform: [ linux/amd64 ]
target: [ workflow-controller, argocli, argoexec, argoexec-nonroot ]
steps:
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Docker Buildx
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
run: |
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
if [ "$TARGET" = "argoexec-nonroot" ]; then
image_name="${DOCKERIO_ORG}/argoexec:${tag}-nonroot"
else
image_name="${DOCKERIO_ORG}/${TARGET}:${tag}"
fi
docker pull quay.io/$image_name
test-images-windows:
name: Try pulling windows
if: github.repository == 'argoproj/argo-workflows'
runs-on: windows-2022
needs: [ push-images ]
steps:
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Try pulling
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
run: |
docker_org=$DOCKERIO_ORG
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
targets="argoexec"
for target in $targets; do
image_name="${docker_org}/${target}:${tag}"
docker pull quay.io/$image_name
done
publish-release:
permissions:
contents: write # for softprops/action-gh-release to create GitHub release
runs-on: ubuntu-24.04
if: github.repository == 'argoproj/argo-workflows'
needs: [ push-images, test-images-linux-amd64, test-images-windows ]
env:
NODE_OPTIONS: --max-old-space-size=4096
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "20" # change in all GH Workflows
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
- name: Restore node packages cache
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v1-${{ hashFiles('**/yarn.lock') }}
- name: Install cosign
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
with:
cosign-release: 'v2.2.3'
# https://stackoverflow.com/questions/58033366/how-to-get-current-branch-within-github-actions
- run: |
if [ ${GITHUB_REF##*/} = main ]; then
echo "VERSION=latest" >> $GITHUB_ENV
else
echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_ENV
fi
- run: go install sigs.k8s.io/bom/cmd/bom@v0.2.0
- run: go install github.com/spdx/spdx-sbom-generator/cmd/generator@v0.0.13
- run: mkdir -p dist
- run: generator -o dist -p .
- run: yarn --cwd ui install
- run: generator -o dist -p ui
- run: bom generate --image quay.io/argoproj/workflow-controller:$VERSION -o dist/workflow-controller.spdx
- run: bom generate --image quay.io/argoproj/argocli:$VERSION -o dist/argocli.spdx
- run: bom generate --image quay.io/argoproj/argoexec:$VERSION -o dist/argoexec.spdx
- run: bom generate --image quay.io/argoproj/argoexec:$VERSION-nonroot -o dist/argoexec-nonroot.spdx
# pack the boms into one file to make it easy to download
- run: tar -zcf dist/sbom.tar.gz dist/*.spdx
- run: make release-notes VERSION=$VERSION
- run: cat release-notes
- run: make manifests VERSION=$VERSION
- name: Print image tag (please check it is not `:latest`)
run: |
grep image: dist/manifests/install.yaml
- run: go mod download
- run: make clis STATIC_FILES=true VERSION=$VERSION
- name: Print version (please check it is not dirty)
run: dist/argo-linux-amd64 version
- run: make checksums
- name: Sign checksums and create public key for release assets
run: |
cosign sign-blob -y --key env://COSIGN_PRIVATE_KEY ./dist/argo-workflows-cli-checksums.txt > ./dist/argo-workflows-cli-checksums.sig
# Retrieves the public key to release as an asset
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argo-workflows-cosign.pub
# https://github.com/softprops/action-gh-release
# This will publish the release and upload assets.
# If a conflict occurs (because you are not on a tag), the release will not be updated. This is a short coming
# of this action.
# Instead, delete the release so it is re-created.
- uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
if: startsWith(github.ref, 'refs/tags/v')
with:
prerelease: ${{ startsWith(github.ref, 'refs/tags/v0') || contains(github.ref, 'rc') }}
body_path: release-notes
files: |
dist/argo-*.gz
dist/argo-workflows-cli-checksums.txt
dist/argo-workflows-cli-checksums.sig
dist/manifests/*.yaml
dist/argo-workflows-cosign.pub
dist/sbom.tar.gz
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

25
.github/workflows/retest.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
name: Detect and Trigger Retest
on:
issue_comment:
types: [created]
permissions:
contents: read
jobs:
retest:
# PR comments where a Member types "/retest" exactly
if: github.event.issue.pull_request && github.event.comment.author_association == 'MEMBER' && github.event.comment.body == '/retest'
permissions:
actions: write # for re-running failed jobs: https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#re-run-a-job-from-a-workflow-run
runs-on: ubuntu-24.04
steps:
- name: Re-run failed jobs for this PR
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.issue.number }}
run: |
SHA_REF=$(gh api "/repos/$REPO/pulls/$PR_NUMBER/commits" | jq -r '.[].sha' | tail -n 1)
RUN_ID=$(gh api "repos/$REPO/actions/workflows/ci-build.yaml/runs?per_page=1&event=pull_request&head_sha=$SHA_REF" | jq -r '.workflow_runs[] | .id')
gh api --method POST repos/$REPO/actions/runs/$RUN_ID/rerun-failed-jobs

28
.github/workflows/sdks.yaml vendored Normal file
View File

@ -0,0 +1,28 @@
name: SDKs
on:
push:
tags:
- v*
permissions:
contents: read
jobs:
sdks:
name: Publish SDKs
if: github.repository == 'argoproj/argo-workflows'
permissions:
packages: write # for publishing packages
contents: write # for creating releases
runs-on: ubuntu-24.04
strategy:
matrix:
name:
- java
- python
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- run: make --directory sdks/${{matrix.name}} publish -B
env:
JAVA_SDK_MAVEN_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}

47
.github/workflows/snyk.yml vendored Normal file
View File

@ -0,0 +1,47 @@
name: Snyk
on:
schedule:
- cron: "30 2 * * *"
push:
branches:
- main
- release-*
permissions:
contents: read
jobs:
# we do not scan images here, they're scanned here: https://app.snyk.io/org/argoproj/projects
golang:
name: Scan Go deps
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- run: mkdir -p ui/dist/app/ && touch ui/dist/app/placeholder
- name: Run Snyk to check for Go vulnerabilities
uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # v0.4.0
with:
args: --severity-threshold=high
node:
name: Scan Node deps
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "20" # change in all GH Workflows
cache: yarn
cache-dependency-path: ui/yarn.lock
- run: yarn --cwd ui install
- name: Run Snyk to check for Node vulnerabilities
uses: snyk/actions/node@b98d498629f1c368650224d6d212bf7dfa89e4bf # v0.4.0
with:
args: --file=ui/package.json --severity-threshold=high

41
.github/workflows/stale.yaml vendored Normal file
View File

@ -0,0 +1,41 @@
# https://github.com/actions/stale
name: Mark stale issues and pull requests
on:
schedule:
- cron: '0 2 * * *' # once a day at 2am
permissions:
contents: read
jobs:
stale:
permissions:
issues: write # for commenting on an issue and editing labels
pull-requests: write # for commenting on a PR and editing labels
runs-on: ubuntu-24.04
steps:
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# timing
days-before-stale: 14 # 2 weeks of inactivity
days-before-close: 14 # 2 more weeks of inactivity
# labels to watch for, add, and remove
only-labels: 'problem/more information needed' # only mark issues/PRs as stale if they have this label
labels-to-remove-when-unstale: 'problem/more information needed' # remove label when unstale -- should be manually added back if information is insufficient
stale-issue-label: 'problem/stale'
stale-pr-label: 'problem/stale'
# automated messages to issue/PR authors
stale-issue-message: >
This issue has been automatically marked as stale because it has not had recent activity and needs more information.
It will be closed if no further activity occurs.
stale-pr-message: >
This PR has been automatically marked as stale because it has not had recent activity and needs further changes.
It will be closed if no further activity occurs.
close-issue-message: >
This issue has been closed due to inactivity and lack of information.
If you still encounter this issue, please add the requested information and re-open.
close-pr-message:
This PR has been closed due to inactivity and lack of changes.
If you would like to still work on this PR, please address the review comments and re-open.

47
.gitignore vendored
View File

@ -1,9 +1,54 @@
Pipfile
.vscode/
.idea/
.node-version
.DS_Store
vendor/
dist/
*.iml
# delve debug binaries
cmd/**/debug
hack/**/debug
hack/featuregen/featuregen
/argo
/argoexec
release-notes
debug.test
git-ask-pass.sh
*.iml
/coverage.out
.envrc
/.vendor-new
/kustomize
/workflow-controller
/.scannerwork/
/test-results/
/package-lock.json
/pkg/apiclient/_.secondary.swagger.json
/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json
/pkg/apiclient/cronworkflow/cron-workflow.swagger.json
/pkg/apiclient/event/event.swagger.json
/pkg/apiclient/eventsource/eventsource.swagger.json
/pkg/apiclient/info/info.swagger.json
/pkg/apiclient/pipeline/pipeline.swagger.json
/pkg/apiclient/sensor/sensor.swagger.json
/pkg/apiclient/workflow/workflow.swagger.json
/pkg/apiclient/workflowarchive/workflow-archive.swagger.json
/pkg/apiclient/workflowtemplate/workflow-template.swagger.json
/site/
/.brew_home
/go-diagrams/
/.run/
sdks/python/client/dist/*
/v3/
/cmd/argoexec/commands/test.txt
/db-dumps/
.spelling.tmp
# Do not commit rendered installation manifests since they are misleading to users.
manifests/install.yaml
manifests/namespace-install.yaml
/logs
node_modules
result
.devenv
.devenv.flake.nix

94
.golangci.yml Normal file
View File

@ -0,0 +1,94 @@
version: "2"
run:
build-tags:
- api
- cli
- cron
- executor
- examples
- corefunctional
- functional
- plugins
linters:
enable:
- asasalint
- bidichk
- bodyclose
- copyloopvar
- errcheck
- gosec
- govet
- ineffassign
- misspell
- nakedret
- nosprintfhostport
- reassign
- rowserrcheck
- sqlclosecheck
- staticcheck
- testifylint
- unparam
- unused
settings:
gosec:
includes:
- G304
- G307
excludes:
# G106: Use of ssh InsecureIgnoreHostKey should be audited
- G106
# G402: TLS InsecureSkipVerify set true
- G402
staticcheck:
checks:
- all
# Capitalised error strings
- "-ST1005"
govet:
enable: [nilness]
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- path: server/artifacts/artifact_server_test.go
text: response body must be closed
paths:
- dist
- docs
- examples
- hack
- manifests
- pkg/client
- sdks
- ui
- vendor
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
settings:
goimports:
local-prefixes:
- github.com/argoproj/argo-workflows/
exclusions:
generated: lax
paths:
- dist
- docs
- examples
- hack
- manifests
- pkg/client
- sdks
- ui
- vendor
- third_party$
- builtin$
- examples$

8
.markdownlint.yaml Normal file
View File

@ -0,0 +1,8 @@
# not fix for line length
MD013: false
# mkdocs uses 4 spaces indent
MD007:
indent: 4
MD024:
siblings_only: true
MD034: false

11
.mlc_config.json Normal file
View File

@ -0,0 +1,11 @@
{
"ignorePatterns": [
{
"pattern": ".*localhost.*"
}
],
"aliveStatusCodes": [
200,
429
]
}

12
.readthedocs.yml Normal file
View File

@ -0,0 +1,12 @@
version: 2
formats: all
mkdocs:
fail_on_warning: false
configuration: mkdocs.yml
python:
install:
- requirements: docs/requirements.txt
build:
os: "ubuntu-22.04"
tools:
python: "3.12"

285
.spelling Normal file
View File

@ -0,0 +1,285 @@
# markdown-spellcheck dictionary
000s
0s
100Gi
100Mi
100s
10h
10s
120s
120sec
1Gi
1Mi
1h
1m
200Ki
2Gi
2h
30s
3min
3s
4Gi
4xx
512Mi
5m
5xx
8Ki
90m
Alexandre
Alibaba
Ang
Anthos
Approvers
ArgoLabs
Artifactory
BlackRock
Breitgand
CRD
CRDs
CloudSQL
ClusterRoleBinding
ClusterRoles
ClusterWorkflowTemplate
ClusterWorkflowTemplates
Codespaces
ConfigMap
ConfigMaps
ContainerSet
Couler
CronWorkflow
CronWorkflows
CustomResource
DataDog
Dataflow
DevOps
Devenv
Dex
EditorConfig
EtcD
EventRouter
Generator
GitOps
Github
Golang
Grafana
Grammarly
Hadoop
Heptio
Homebrew
IAM-based
IPs
InitContainer
InsideBoard
Invocators
Istio
Jemison
JetBrains
KNative
Kaniko
Katacoda
Katib
Kerberos
KeyValueEditor
Killercoda
KubectlExec
Kubeflow
Kustomize
LDFlags
Lifecycle-Hook
LitmusChaos
MLOps
Makefile
Metaflow
MinIO
Minikube
MySQL
Nagal
Nano
Nginx
Node.JS.
OAuth
OAuth2
Okta
OpenAPI
OpenTelemetry
PDBs
PProf
PVCs
Peixuan
PersistentVolumeClaims
Ploomber
PostgreSQL
Postgres
Pre-fill
PriorityClass
RCs
Risc-V
Roadmap
RoleBinding
SDKs
SageMaker
ServiceAccount
Sharding
Singer.io
Snyk
Sumit
Tekton
Traefik
Triaging
TripAdvisor
UI
VSCode
Valasek
Webhooks
Welch
WorkflowEventBinding
WorkflowTemplate
WorkflowTemplates
a.m.
anded
apis
architecting
argo
argoexec
argoproj
args
async
auth
backend
backoff
backported
boolean
booleans
buildkit
changelog
codebase
config
cpu
cron
daemoned
dependabot
dev
devenv
dockershim
docs
dropdown
e.g.
e2e
entrypoint
enum
env
errored
expr
fibonacci
finalizer
gitops
goroutine
goroutines
govaluate
grpc
gzipped
i.e.
idempotence
instantiator
instantiators
jenkins
k3d
k3s
k8s
k8s-jobs
kube
kube-apiserver
kube-scheduler
kubectl
kubelet
kubernetes
liveness
localhost
maxFailures
maxSuccess
md
memoization
memoized
memoizing
metadata
minikube
mutex
mutexes
namespace
namespaces
natively
nix.conf
non-member
p.m.
parameterizing
params
pprof
pre-commit
pytorch
qps
ray
rc2
repo
retryStrategy
roadmap
runtime
runtimes
s3
sandboxed
shortcodes
stateful
stderr
symlinks
temporality
tolerations
triaged
un-reconciled
v1
v1.0
v1.1
v1.2
v1.24
v1.3
v2
v2.0
v2.1
v2.10
v2.10.2
v2.11
v2.12
v2.35.0
v2.4
v2.5
v2.6
v2.7
v2.7.2
v2.8
v2.9
v3.0
v3.0.0
v3.1
v3.1.4
v3.2
v3.2.
v3.3
v3.3.
v3.4
v3.4.
v3.4.4
v3.5
v3.6
v3.6.0
v3.6.1
v3.6.5
v3.7
v3.7.0
validator
vendored
versioned
versioning
webHDFS
webhook
webhooks
workflow-controller-configmap
workqueue
yaml

5040
CHANGELOG-2-x-x.md Normal file

File diff suppressed because it is too large Load Diff

6943
CHANGELOG.md Normal file

File diff suppressed because it is too large Load Diff

1
CODEOWNERS Normal file
View File

@ -0,0 +1 @@
*.proto @joibel @terrytangyuan @sarabala1979

View File

@ -1,40 +1,2 @@
## Requirements
Make sure you have following tools installed [golang](https://golang.org/), [dep](https://github.com/golang/dep), [protobuf](https://developers.google.com/protocol-buffers/),
[kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/).
```
$ brew install go dep protobuf kubectl
$ go get -u github.com/golang/protobuf/protoc-gen-go
```
Nice to have [gometalinter](https://github.com/alecthomas/gometalinter) and [goreman](https://github.com/mattn/goreman):
```
$ go get -u gopkg.in/alecthomas/gometalinter.v2 github.com/mattn/goreman && gometalinter.v2 --install
```
## Building
```
$ go get -u github.com/argoproj/argo-cd
$ dep ensure
$ make
```
## Running locally
You need to have access to kubernetes cluster (including [minikube](https://kubernetes.io/docs/tasks/tools/install-minikube/) or [docker edge](https://docs.docker.com/docker-for-mac/install/) ) in order to run Argo CD on your laptop:
* install kubectl: `brew install kubectl`
* make sure `kubectl` is connected to your cluster (e.g. `kubectl get pods` should work).
* install application CRD using following command:
```
$ kubectl create -f install/manifests/01_application-crd.yaml
```
* start Argo CD services using [goreman](https://github.com/mattn/goreman):
```
$ goreman start
```
<!-- markdownlint-disable-next-line MD041 -->
See [docs/CONTRIBUTING.md](docs/CONTRIBUTING.md).

View File

@ -1,13 +0,0 @@
Copyright 2017-2018 The Argo Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License..

132
Dockerfile Normal file
View File

@ -0,0 +1,132 @@
#syntax=docker/dockerfile:1.2
ARG GIT_COMMIT=unknown
ARG GIT_TAG=unknown
ARG GIT_TREE_STATE=unknown
FROM golang:1.24.4-alpine3.22 as builder
# libc-dev to build openapi-gen
RUN apk update && apk add --no-cache \
git \
make \
ca-certificates \
wget \
curl \
gcc \
libc-dev \
bash \
mailcap
WORKDIR /go/src/github.com/argoproj/argo-workflows
COPY go.mod .
COPY go.sum .
RUN --mount=type=cache,target=/go/pkg/mod go mod download
COPY . .
####################################################################################################
FROM node:20-alpine as argo-ui
RUN apk update && apk add --no-cache git
COPY ui/package.json ui/yarn.lock ui/
RUN --mount=type=cache,target=/root/.yarn \
YARN_CACHE_FOLDER=/root/.yarn JOBS=max \
yarn --cwd ui install --network-timeout 1000000
COPY ui ui
COPY api api
RUN --mount=type=cache,target=/root/.yarn \
YARN_CACHE_FOLDER=/root/.yarn JOBS=max \
NODE_OPTIONS="--max-old-space-size=2048" JOBS=max yarn --cwd ui build
####################################################################################################
FROM builder as argoexec-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build make dist/argoexec GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE}
####################################################################################################
FROM builder as workflow-controller-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build make dist/workflow-controller GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE}
####################################################################################################
FROM builder as argocli-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
RUN mkdir -p ui/dist
COPY --from=argo-ui ui/dist/app ui/dist/app
# update timestamp so that `make` doesn't try to rebuild this -- it was already built in the previous stage
RUN touch ui/dist/app/index.html
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build STATIC_FILES=true make dist/argo GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE}
####################################################################################################
FROM gcr.io/distroless/static as argoexec-base
COPY --from=argoexec-build /etc/mime.types /etc/mime.types
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
####################################################################################################
FROM argoexec-base as argoexec-nonroot
USER 8737
COPY --chown=8737 --from=argoexec-build /go/src/github.com/argoproj/argo-workflows/dist/argoexec /bin/
ENTRYPOINT [ "argoexec" ]
####################################################################################################
FROM argoexec-base as argoexec
COPY --from=argoexec-build /go/src/github.com/argoproj/argo-workflows/dist/argoexec /bin/
ENTRYPOINT [ "argoexec" ]
####################################################################################################
FROM gcr.io/distroless/static as workflow-controller
USER 8737
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
COPY --chown=8737 --from=workflow-controller-build /go/src/github.com/argoproj/argo-workflows/dist/workflow-controller /bin/
ENTRYPOINT [ "workflow-controller" ]
####################################################################################################
FROM gcr.io/distroless/static as argocli
USER 8737
WORKDIR /home/argo
# Temporary workaround for https://github.com/grpc/grpc-go/issues/434
ENV GRPC_ENFORCE_ALPN_ENABLED=false
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
COPY --from=argocli-build /go/src/github.com/argoproj/argo-workflows/dist/argo /bin/
ENTRYPOINT [ "argo" ]

View File

@ -1,84 +0,0 @@
FROM debian:9.3 as builder
RUN apt-get update && apt-get install -y \
git \
make \
wget \
gcc \
zip && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
# Install go
ENV GO_VERSION 1.9.3
ENV GO_ARCH amd64
ENV GOPATH /root/go
ENV PATH ${GOPATH}/bin:/usr/local/go/bin:${PATH}
RUN wget https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \
rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz
# Install protoc, dep, packr
ENV PROTOBUF_VERSION 3.5.1
RUN cd /usr/local && \
wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/protoc-${PROTOBUF_VERSION}-linux-x86_64.zip && \
unzip protoc-*.zip && \
wget https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -O /usr/local/bin/dep && \
chmod +x /usr/local/bin/dep && \
wget https://github.com/gobuffalo/packr/releases/download/v1.10.4/packr_1.10.4_linux_amd64.tar.gz && \
tar -vxf packr*.tar.gz -C /tmp/ && \
mv /tmp/packr /usr/local/bin/packr
# A dummy directory is created under $GOPATH/src/dummy so we are able to use dep
# to install all the packages of our dep lock file
COPY Gopkg.toml ${GOPATH}/src/dummy/Gopkg.toml
COPY Gopkg.lock ${GOPATH}/src/dummy/Gopkg.lock
RUN cd ${GOPATH}/src/dummy && \
dep ensure -vendor-only && \
mv vendor/* ${GOPATH}/src/ && \
rmdir vendor
# Perform the build
ARG MAKE_TARGET
WORKDIR /root/go/src/github.com/argoproj/argo-cd
COPY . .
RUN make ${MAKE_TARGET}
##############################################################
# This stage will pull in or build any CLI tooling we need for our final image
FROM golang:1.10 as cli-tooling
# NOTE: we frequently switch between tip of master ksonnet vs. official builds. Comment/uncomment
# the corresponding section to switch between the two options:
# Option 1: build ksonnet ourselves
#RUN go get -v -u github.com/ksonnet/ksonnet && mv ${GOPATH}/bin/ksonnet /ks
# Option 2: use official tagged ksonnet release
env KSONNET_VERSION=0.10.0-alpha.3
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /ks
RUN curl -o /kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x /kubectl
##############################################################
FROM debian:9.3
RUN apt-get update && apt-get install -y git && \
apt-get clean && \
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*
ARG BINARY
COPY --from=builder /root/go/src/github.com/argoproj/argo-cd/dist/${BINARY} /${BINARY}
COPY --from=cli-tooling /ks /usr/local/bin/ks
COPY --from=cli-tooling /kubectl /usr/local/bin/kubectl
# workaround ksonnet issue https://github.com/ksonnet/ksonnet/issues/298
ENV USER=root
ENV BINARY=$BINARY
CMD /$BINARY

View File

@ -1,21 +0,0 @@
FROM golang:1.9.2
WORKDIR /tmp
RUN curl -O https://get.docker.com/builds/Linux/x86_64/docker-1.13.1.tgz && \
tar -xzf docker-1.13.1.tgz && \
mv docker/docker /usr/local/bin/docker && \
rm -rf ./docker && \
go get -u github.com/golang/dep/cmd/dep && \
go get -u gopkg.in/alecthomas/gometalinter.v2 && \
gometalinter.v2 --install
# Install kubectl
RUN curl -o /usr/local/bin/kubectl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
# Install ksonnet
env KSONNET_VERSION=0.10.0-alpha.3
RUN wget https://github.com/ksonnet/ksonnet/releases/download/v${KSONNET_VERSION}/ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
tar -C /tmp/ -xf ks_${KSONNET_VERSION}_linux_amd64.tar.gz && \
mv /tmp/ks_${KSONNET_VERSION}_linux_amd64/ks /usr/local/bin/ks && \
rm -rf /tmp/ks_${KSONNET_VERSION}

61
Dockerfile.windows Normal file
View File

@ -0,0 +1,61 @@
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
ARG IMAGE_OS_VERSION=ltsc2022-amd64
ARG GIT_COMMIT=unknown
ARG GIT_TAG=unknown
ARG GIT_TREE_STATE=unknown
# had issues with official golange image for windows so I'm using plain servercore
FROM mcr.microsoft.com/windows/servercore:${IMAGE_OS_VERSION} as builder
ENV GOLANG_VERSION=1.24
SHELL ["powershell", "-Command"]
# install chocolatey package manager
ENV chocolateyUseWindowsCompression=false
RUN iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')); \
choco feature disable --name showDownloadProgress ; \
choco feature enable -n allowGlobalConfirmation
# install golang, dep and other tools
RUN choco install golang --version=$env:GOLANG_VERSION ; \
choco install make dep git.portable 7zip.portable
####################################################################################################
# argoexec-base
# Used as the base for both the release and development version of argoexec
####################################################################################################
FROM mcr.microsoft.com/windows/nanoserver:${IMAGE_OS_VERSION} as argoexec-base
COPY --from=builder /windows/system32/netapi32.dll /windows/system32/netapi32.dll
COPY --from=builder C:/ProgramData/chocolatey/lib/7zip.portable/tools/7z-extra/x64/7za.exe C:/app/7za.exe
# add binaries to path
USER Administrator
RUN SETX /m path C:\app;%path%
####################################################################################################
# Argo Build stage which performs the actual build of Argo binaries
####################################################################################################
FROM builder as argo-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
# Perform the build
WORKDIR C:/Users/ContainerAdministrator/go/src/github.com/argoproj/argo-workflows
COPY . .
# run in git bash for all the shell commands in Makefile to work
RUN bash -c 'make dist/argoexec GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE} HACK_PKG_FILES_AS_PKGS=true'
####################################################################################################
# argoexec
####################################################################################################
FROM argoexec-base as argoexec
COPY --from=argo-build C:/Users/ContainerAdministrator/go/src/github.com/argoproj/argo-workflows/dist/argoexec C:/app/argoexec.exe
RUN argoexec version
ENTRYPOINT [ "argoexec" ]

787
Gopkg.lock generated
View File

@ -1,787 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata"]
revision = "767c40d6a2e058483c25fa193e963a22da17236d"
version = "v0.18.0"
[[projects]]
name = "github.com/GeertJohan/go.rice"
packages = [
".",
"embedded"
]
revision = "c02ca9a983da5807ddf7d796784928f5be4afd09"
[[projects]]
name = "github.com/PuerkitoBio/purell"
packages = ["."]
revision = "b938d81255b5473c57635324295cb0fe398c7a58"
[[projects]]
branch = "master"
name = "github.com/PuerkitoBio/urlesc"
packages = ["."]
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
[[projects]]
name = "github.com/blang/semver"
packages = ["."]
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
version = "v3.5.1"
[[projects]]
branch = "master"
name = "github.com/daaku/go.zipexe"
packages = ["."]
revision = "a5fe2436ffcb3236e175e5149162b41cd28bd27d"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
name = "github.com/emicklei/go-restful"
packages = [
".",
"log"
]
revision = "26b41036311f2da8242db402557a0dbd09dc83da"
version = "v2.6.0"
[[projects]]
name = "github.com/ghodss/yaml"
packages = ["."]
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonpointer"
packages = ["."]
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonreference"
packages = ["."]
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
[[projects]]
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "1de3e0542de65ad8d75452a595886fdd0befb363"
[[projects]]
branch = "master"
name = "github.com/go-openapi/swag"
packages = ["."]
revision = "84f4bee7c0a6db40e3166044c7983c1c32125429"
[[projects]]
name = "github.com/gobuffalo/packr"
packages = ["."]
revision = "6434a292ac52e6964adebfdce3f9ce6d9f16be01"
version = "v1.10.4"
[[projects]]
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
"plugin/compare",
"plugin/defaultcheck",
"plugin/description",
"plugin/embedcheck",
"plugin/enumstringer",
"plugin/equal",
"plugin/face",
"plugin/gostring",
"plugin/marshalto",
"plugin/oneofcheck",
"plugin/populate",
"plugin/size",
"plugin/stringer",
"plugin/testgen",
"plugin/union",
"plugin/unmarshal",
"proto",
"protoc-gen-gofast",
"protoc-gen-gogo/descriptor",
"protoc-gen-gogo/generator",
"protoc-gen-gogo/grpc",
"protoc-gen-gogo/plugin",
"protoc-gen-gogofast",
"sortkeys",
"vanity",
"vanity/command"
]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/golang/glog"
packages = ["."]
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = [
"jsonpb",
"proto",
"protoc-gen-go/descriptor",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/empty",
"ptypes/struct",
"ptypes/timestamp"
]
revision = "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b"
[[projects]]
branch = "master"
name = "github.com/google/btree"
packages = ["."]
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
[[projects]]
name = "github.com/google/go-github"
packages = ["github"]
revision = "996760c56486beb81e91bb7bdb816f8c6f29284e"
[[projects]]
name = "github.com/google/go-jsonnet"
packages = [
".",
"ast",
"parser"
]
revision = "dfddf2b4e3aec377b0dcdf247ff92e7d078b8179"
[[projects]]
branch = "master"
name = "github.com/google/go-querystring"
packages = ["query"]
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
[[projects]]
branch = "master"
name = "github.com/google/gofuzz"
packages = ["."]
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions"
]
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache"
]
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
[[projects]]
branch = "master"
name = "github.com/grpc-ecosystem/go-grpc-middleware"
packages = [
".",
"auth",
"logging",
"logging/logrus",
"logging/logrus/ctxlogrus",
"tags",
"tags/logrus",
"util/metautils"
]
revision = "bc372cc64f55abd91995ba3f219b380ffbc59e9d"
[[projects]]
name = "github.com/grpc-ecosystem/grpc-gateway"
packages = [
"runtime",
"runtime/internal",
"utilities"
]
revision = "07f5e79768022f9a3265235f0db4ac8c3f675fec"
version = "v1.3.1"
[[projects]]
branch = "master"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru"
]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
branch = "master"
name = "github.com/howeyc/gopass"
packages = ["."]
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
name = "github.com/imdario/mergo"
packages = ["."]
revision = "163f41321a19dd09362d4c63cc2489db2015f1f4"
version = "0.3.2"
[[projects]]
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
name = "github.com/json-iterator/go"
packages = ["."]
revision = "e7c7f3b33712573affdcc7a107218e7926b9a05b"
version = "1.0.6"
[[projects]]
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "59fac5042749a5afb9af70e813da1dd5474f0167"
version = "1.0.1"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
name = "github.com/ksonnet/ksonnet"
packages = [
"metadata",
"metadata/params",
"pkg/app",
"pkg/component",
"pkg/docparser",
"pkg/env",
"pkg/lib",
"pkg/node",
"pkg/params",
"pkg/parts",
"pkg/pkg",
"pkg/prototype",
"pkg/registry",
"pkg/schema",
"pkg/util/github",
"pkg/util/jsonnet",
"pkg/util/kslib",
"pkg/util/strings",
"pkg/util/yaml"
]
revision = "52f89335ef2c7c7015f4ba92cc1ba1e109310c70"
version = "v0.10.0-alpha.3"
[[projects]]
name = "github.com/ksonnet/ksonnet-lib"
packages = [
"ksonnet-gen/astext",
"ksonnet-gen/jsonnet",
"ksonnet-gen/ksonnet",
"ksonnet-gen/kubespec",
"ksonnet-gen/kubeversion",
"ksonnet-gen/nodemaker",
"ksonnet-gen/printer"
]
revision = "93eeff932dcb550d888dd7fa7e1f7c6a182108fb"
[[projects]]
branch = "master"
name = "github.com/mailru/easyjson"
packages = [
"buffer",
"jlexer",
"jwriter"
]
revision = "32fa128f234d041f196a9f3e0fea5ac9772c08e1"
[[projects]]
branch = "master"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
name = "github.com/peterbourgon/diskv"
packages = ["."]
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/sergi/go-diff"
packages = ["diffmatchpatch"]
revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
version = "v1.0.0"
[[projects]]
name = "github.com/sirupsen/logrus"
packages = ["."]
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
version = "v1.0.5"
[[projects]]
name = "github.com/soheilhy/cmux"
packages = ["."]
revision = "e09e9389d85d8492d313d73d1469c029e710623f"
version = "v0.1.4"
[[projects]]
name = "github.com/spf13/afero"
packages = [
".",
"mem"
]
revision = "9be650865eab0c12963d8753212f4f9c66cdcf12"
[[projects]]
name = "github.com/spf13/cobra"
packages = ["."]
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
version = "v0.0.1"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
version = "v1.0.0"
[[projects]]
name = "github.com/stretchr/objx"
packages = ["."]
revision = "facf9a85c22f48d2f52f2380e4efce1768749a89"
version = "v0.1"
[[projects]]
name = "github.com/stretchr/testify"
packages = [
"assert",
"mock"
]
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1"
[[projects]]
name = "github.com/yudai/gojsondiff"
packages = [
".",
"formatter"
]
revision = "7b1b7adf999dab73a6eb02669c3d82dbb27a3dd6"
version = "1.0.0"
[[projects]]
branch = "master"
name = "github.com/yudai/golcs"
packages = ["."]
revision = "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = [
"bcrypt",
"blowfish",
"ssh/terminal"
]
revision = "432090b8f568c018896cd8a0fb0345872bbac6ce"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
revision = "cbe0f9307d0156177f9dd5dc85da1a31abc5f2fb"
[[projects]]
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt"
]
revision = "cce311a261e6fcf29de72ca96827bdb0b7d9c9e6"
[[projects]]
branch = "master"
name = "golang.org/x/sync"
packages = ["errgroup"]
revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
"width"
]
revision = "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
packages = [
"go/ast/astutil",
"imports"
]
revision = "5e776fee60db37e560cee3fb46db699d2f095386"
[[projects]]
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/urlfetch",
"urlfetch"
]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
"googleapis/rpc/status"
]
revision = "2b5a72b8730b0b16380010cfe5286c42108d88e7"
[[projects]]
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"reflection",
"reflection/grpc_reflection_v1alpha",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
version = "v1.10.0"
[[projects]]
name = "gopkg.in/inf.v0"
packages = ["."]
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
version = "v0.9.0"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
[[projects]]
branch = "release-1.9"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1"
]
revision = "acf347b865f29325eb61f4cd2df11e86e073a5ee"
[[projects]]
branch = "release-1.9"
name = "k8s.io/apiextensions-apiserver"
packages = [
"pkg/apis/apiextensions",
"pkg/apis/apiextensions/v1beta1",
"pkg/client/clientset/clientset",
"pkg/client/clientset/clientset/scheme",
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
]
revision = "b89f5ce12ce6e022fc3e9d7586d61346e694d56e"
[[projects]]
branch = "release-1.9"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/equality",
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1alpha1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/reflect"
]
revision = "19e3f5aa3adca672c153d324e6b7d82ff8935f03"
[[projects]]
branch = "release-6.0"
name = "k8s.io/client-go"
packages = [
"discovery",
"discovery/fake",
"dynamic",
"dynamic/fake",
"kubernetes",
"kubernetes/fake",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1alpha1/fake",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/admissionregistration/v1beta1/fake",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1/fake",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authentication/v1beta1/fake",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1/fake",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/authorization/v1beta1/fake",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v1beta1/fake",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/core/v1",
"kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/events/v1beta1/fake",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/extensions/v1beta1/fake",
"kubernetes/typed/networking/v1",
"kubernetes/typed/networking/v1/fake",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/policy/v1beta1/fake",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1/fake",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1alpha1/fake",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/rbac/v1beta1/fake",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/scheduling/v1alpha1/fake",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/settings/v1alpha1/fake",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1/fake",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
"kubernetes/typed/storage/v1beta1/fake",
"pkg/version",
"plugin/pkg/client/auth/gcp",
"plugin/pkg/client/auth/oidc",
"rest",
"rest/watch",
"testing",
"third_party/forked/golang/template",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/pager",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/jsonpath",
"util/workqueue"
]
revision = "9389c055a838d4f208b699b3c7c51b70f2368861"
[[projects]]
branch = "release-1.9"
name = "k8s.io/code-generator"
packages = [
"cmd/go-to-protobuf",
"cmd/go-to-protobuf/protobuf",
"third_party/forked/golang/reflect"
]
revision = "91d3f6a57905178524105a085085901bb73bd3dc"
[[projects]]
branch = "master"
name = "k8s.io/gengo"
packages = [
"args",
"generator",
"namer",
"parser",
"types"
]
revision = "8394c995ab8fbe52216f38d0e1a37de36d820528"
[[projects]]
branch = "master"
name = "k8s.io/kube-openapi"
packages = ["pkg/common"]
revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "2993ca9c549880a06dd96160711aa3d8813184e579f8ba084c5c00f7f4f0a5c5"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,48 +0,0 @@
required = [
"github.com/gogo/protobuf/protoc-gen-gofast",
"github.com/gogo/protobuf/protoc-gen-gogofast",
"golang.org/x/sync/errgroup",
"k8s.io/code-generator/cmd/go-to-protobuf",
]
[[constraint]]
name = "google.golang.org/grpc"
version = "1.9.2"
[[constraint]]
name = "github.com/grpc-ecosystem/grpc-gateway"
version = "v1.3.1"
# override ksonnet's release-1.8 dependency
[[override]]
branch = "release-1.9"
name = "k8s.io/apimachinery"
[[constraint]]
branch = "release-1.9"
name = "k8s.io/api"
[[constraint]]
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.9"
[[constraint]]
branch = "release-1.9"
name = "k8s.io/code-generator"
[[constraint]]
branch = "release-6.0"
name = "k8s.io/client-go"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.1"
[[constraint]]
name = "github.com/ksonnet/ksonnet"
version = "v0.10.0-alpha.3"
# override ksonnet's logrus dependency
[[override]]
name = "github.com/sirupsen/logrus"
version = "v1.0.3"

View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2017-2018 The Argo Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

972
Makefile

File diff suppressed because it is too large Load Diff

17
OWNERS Normal file
View File

@ -0,0 +1,17 @@
owners:
- joibel
- sarabala1979
- terrytangyuan
approvers:
- alexec
- alexmt
- edlee2121
- isubasinghe
- jessesuen
- juliev0
- tczhao
reviewers:
- jswxstw
- shuangkun

View File

@ -1,3 +0,0 @@
controller: go run ./cmd/argocd-application-controller/main.go --app-resync 10
api-server: go run ./cmd/argocd-server/main.go --insecure
repo-server: go run ./cmd/argocd-repo-server/main.go

206
README.md
View File

@ -1,91 +1,165 @@
<!-- markdownlint-disable-next-line MD041 -->
[![Security Status](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml/badge.svg?branch=main)](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml?query=branch%3Amain)
[![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows)
[![FOSSA License Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows?ref=badge_shield)
[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![X Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://x.com/argoproj)
[![LinkedIn](https://img.shields.io/badge/LinkedIn-argoproj-blue.svg?logo=linkedin)](https://www.linkedin.com/company/argoproj/)
[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-workflows?label=argo-workflows)](https://github.com/argoproj/argo-workflows/releases/latest)
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows)
# Argo CD - GitOps Continuous Delivery for Kubernetes
## What is Argo Workflows?
## What is Argo CD?
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes.
Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).
Argo CD is a declarative, continuous delivery service based on ksonnet for Kubernetes.
* Define workflows where each step is a container.
* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic graph (DAG).
* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo Workflows on Kubernetes.
## Why Argo CD?
Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) graduated project.
Application definitions, configurations, and environments should be declarative and version controlled.
Application deployment and lifecycle management should be automated, auditable, and easy to understand.
## Use Cases
## Getting Started
* [Machine Learning pipelines](https://argo-workflows.readthedocs.io/en/latest/use-cases/machine-learning/)
* [Data and batch processing](https://argo-workflows.readthedocs.io/en/latest/use-cases/data-processing/)
* [Infrastructure automation](https://argo-workflows.readthedocs.io/en/latest/use-cases/infrastructure-automation/)
* [CI/CD](https://argo-workflows.readthedocs.io/en/latest/use-cases/ci-cd/)
* [Other use cases](https://argo-workflows.readthedocs.io/en/latest/use-cases/other/)
Follow our [getting started guide](docs/getting_started.md).
## Why Argo Workflows?
## How it works
* Argo Workflows is the most popular workflow execution engine for Kubernetes.
* Light-weight, scalable, and easier to use.
* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments.
* Cloud agnostic and can run on any Kubernetes cluster.
Argo CD uses git repositories as the source of truth for defining the desired application state as
well as the target deployment environments. Kubernetes manifests are specified as
[ksonnet](https://ksonnet.io) applications. Argo CD automates the deployment of the desired
application states in the specified target environments.
[Read what people said in our latest survey](https://blog.argoproj.io/argo-workflows-events-2023-user-survey-results-82c53bc30543)
![Argo CD Architecture](docs/argocd_architecture.png)
## Try Argo Workflows
Application deployments can track updates to branches, tags, or pinned to a specific version of
manifests at a git commit. See [tracking strategies](docs/tracking_strategies.md) for additional
details about the different tracking strategies available.
You can try Argo Workflows via one of the following:
Argo CD is implemented as a kubernetes controller which continuously monitors running applications
and compares the current, live state against the desired target state (as specified in the git repo).
A deployed application whose live state deviates from the target state is considered out-of-sync.
Argo CD reports & visualizes the differences as well as providing facilities to automatically or
manually sync the live state back to the desired target state. Any modifications made to the desired
target state in the git repo can be automatically applied and reflected in the specified target
environments.
1. [Interactive Training Material](https://killercoda.com/argoproj/course/argo-workflows/)
1. [Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo)
For additional details, see [architecture overview](docs/architecture.md).
![Screenshot](docs/assets/screenshot.png)
## Who uses Argo Workflows?
[About 200+ organizations are officially using Argo Workflows](USERS.md)
## Ecosystem
Just some of the projects that use or rely on Argo Workflows (complete list [here](https://github.com/akuity/awesome-argo#ecosystem-projects)):
* [Argo Events](https://github.com/argoproj/argo-events)
* [Couler](https://github.com/couler-proj/couler)
* [Hera](https://github.com/argoproj-labs/hera-workflows)
* [Katib](https://github.com/kubeflow/katib)
* [Kedro](https://kedro.readthedocs.io/en/stable/)
* [Kubeflow Pipelines](https://github.com/kubeflow/pipelines)
* [Netflix Metaflow](https://metaflow.org)
* [Onepanel](https://github.com/onepanelio/onepanel)
* [Orchest](https://github.com/orchest/orchest/)
* [Piper](https://github.com/quickube/piper)
* [Ploomber](https://github.com/ploomber/ploomber)
* [Seldon](https://github.com/SeldonIO/seldon-core)
* [SQLFlow](https://github.com/sql-machine-learning/sqlflow)
## Client Libraries
Check out our [Java, Golang and Python clients](docs/client-libraries.md).
## Quickstart
* [Get started here](https://argo-workflows.readthedocs.io/en/latest/quick-start/)
* [Walk-through examples](https://argo-workflows.readthedocs.io/en/latest/walk-through/)
## Documentation
[View the docs](https://argo-workflows.readthedocs.io/en/latest/)
## Features
* Automated deployment of applications to specified target environments
* Continuous monitoring of deployed applications
* Automated or manual syncing of applications to its target state
* Web and CLI based visualization of applications and differences between live vs. target state
* Rollback/Roll-anywhere to any application state committed in the git repository
An incomplete list of features Argo Workflows provide:
## What is ksonnet?
* UI to visualize and manage Workflows
* Artifact support (S3, Artifactory, Alibaba Cloud OSS, Azure Blob Storage, HTTP, Git, GCS, raw)
* Workflow templating to store commonly used Workflows in the cluster
* Archiving Workflows after executing for later access
* Scheduled workflows using cron
* Server interface with REST API (HTTP and GRPC)
* DAG or Steps based declaration of workflows
* Step level input & outputs (artifacts/parameters)
* Loops
* Parameterization
* Conditionals
* Timeouts (step & workflow level)
* Retry (step & workflow level)
* Resubmit (memoized)
* Suspend & Resume
* Cancellation
* K8s resource orchestration
* Exit Hooks (notifications, cleanup)
* Garbage collection of completed workflow
* Scheduling (affinity/tolerations/node selectors)
* Volumes (ephemeral/existing)
* Parallelism limits
* Daemoned steps
* DinD (docker-in-docker)
* Script steps
* Event emission
* Prometheus metrics
* Multiple executors
* Multiple pod and workflow garbage collection strategies
* Automatically calculated resource usage per step
* Java/Golang/Python SDKs
* Pod Disruption Budget support
* Single-sign on (OAuth2/OIDC)
* Webhook triggering
* CLI
* Out-of-the box and custom Prometheus metrics
* Windows container support
* Embedded widgets
* Multiplex log viewer
* [Jsonnet](http://jsonnet.org), the basis for ksonnet, is a domain specific configuration language,
which provides extreme flexibility for composing and manipulating JSON/YAML specifications.
* [Ksonnet](http://ksonnet.io) goes one step further by applying Jsonnet principles to Kubernetes
manifests. It provides an opinionated file & directory structure to organize applications into
reusable components, parameters, and environments. Environments can be hierarchical, which promotes
both re-use and granular customization of application and environment specifications.
## Community Meetings
## Why ksonnet?
We host monthly community meetings where we and the community showcase demos and discuss the current and future state of the project. Feel free to join us!
For Community Meeting information, minutes and recordings, please [see here](https://bit.ly/argo-wf-cmty-mtng).
Application configuration management is a hard problem and grows rapidly in complexity as you deploy
more applications, against more and more environments. Current templating systems, such as Jinja,
and Golang templating, are unnatural ways to maintain kubernetes manifests, and are not well suited to
capture subtle configuration differences between environments. Its ability to compose and re-use
application and environment configurations is also very limited.
Participation in Argo Workflows is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
Imagine we have a single guestbook application deployed in following environments:
## Community Blogs and Presentations
| Environment | K8s Version | Application Image | DB Connection String | Environment Vars | Sidecars |
|---------------|-------------|------------------------|-----------------------|------------------|---------------|
| minikube | 1.10.0 | jesse/guestbook:latest | sql://locahost/db | DEBUG=true | |
| dev | 1.9.0 | app/guestbook:latest | sql://dev-test/db | DEBUG=true | |
| staging | 1.8.0 | app/guestbook:e3c0263 | sql://staging/db | | istio,dnsmasq |
| us-west-1 | 1.8.0 | app/guestbook:abc1234 | sql://prod/db | FOO_FEATURE=true | istio,dnsmasq |
| us-west-2 | 1.8.0 | app/guestbook:abc1234 | sql://prod/db | | istio,dnsmasq |
| us-east-1 | 1.9.0 | app/guestbook:abc1234 | sql://prod/db | BAR_FEATURE=true | istio,dnsmasq |
* [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
* [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)
* [Argo Workflows and Pipelines - CI/CD, Machine Learning, and Other Kubernetes Workflows](https://youtu.be/UMaivwrAyTA)
* [Argo Ansible role: Provisioning Argo Workflows on OpenShift](https://medium.com/@marekermk/provisioning-argo-on-openshift-with-ansible-and-kustomize-340a1fda8b50)
* [Argo Workflows vs Apache Airflow](http://bit.ly/30YNIvT)
* [Beyond Prototypes: Production-Ready ML Systems with Metaflow and Argo](https://github.com/terrytangyuan/public-talks/tree/main/talks/kubecon-na-2023-metaflow-argo)
* [CI/CD with Argo on Kubernetes](https://medium.com/@bouwe.ceunen/ci-cd-with-argo-on-kubernetes-28c1a99616a9)
* [Define Your CI/CD Pipeline with Argo Workflows](https://haque-zubair.medium.com/define-your-ci-cd-pipeline-with-argo-workflows-25aefb02fa63)
* [Distributed Machine Learning Patterns from Manning Publication](https://github.com/terrytangyuan/distributed-ml-patterns)
* [Engineering Cloud Native AI Platform](https://github.com/terrytangyuan/public-talks/tree/main/talks/platform-con-2024-engineering-cloud-native-ai-platform)
* [Managing Thousands of Automatic Machine Learning Experiments with Argo and Katib](https://github.com/terrytangyuan/public-talks/blob/main/talks/argocon-automl-experiments-2022)
* [Revolutionizing Scientific Simulations with Argo Workflows](https://www.youtube.com/watch?v=BYVf7GhfiRg)
* [Running Argo Workflows Across Multiple Kubernetes Clusters](https://admiralty.io/blog/running-argo-workflows-across-multiple-kubernetes-clusters/)
* [Scaling Kubernetes: Best Practices for Managing Large-Scale Batch Jobs with Spark and Argo Workflow](https://www.youtube.com/watch?v=KqEKRPjy4aE)
* [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/)
* [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/)
* [Production-Ready AI Platform on Kubernetes](https://github.com/terrytangyuan/public-talks/tree/main/talks/kubecon-europe-2024-production-ai-platform-on-k8s)
* [Argo integration review](http://dev.matt.hillsdon.net/2018/03/24/argo-integration-review.html)
* TGI Kubernetes with Joe Beda: [Argo workflow system](https://www.youtube.com/watch?v=M_rxPPLG8pU&start=859)
Ksonnet:
* Enables composition and re-use of common YAML specifications
* Allows overrides, additions, and subtractions of YAML sub-components specific to each environment
* Guarantees proper generation of K8s manifests suitable for the corresponding Kubernetes API version
* Provides [kubernetes-specific jsonnet libraries](https://github.com/ksonnet/ksonnet-lib) to enable
concise definition of kubernetes manifests
## Project Resources
## Development Status
* Argo CD is in early development
* [Argo Project GitHub organization](https://github.com/argoproj)
* [Argo Website](https://argoproj.github.io/)
* [Argo Slack](https://argoproj.github.io/community/join-slack)
## Roadmap
* PreSync, PostSync, OutOfSync hooks
* Customized application actions as Argo workflows
* Blue/Green & canary upgrades
* SSO Integration
* GitHub & Docker webhooks
## Security
See [SECURITY.md](SECURITY.md).

42
SECURITY.md Normal file
View File

@ -0,0 +1,42 @@
# Security
## Reporting a Vulnerability
If you find a security related bug in Argo Workflows, we kindly ask you for responsible
disclosure and for giving us appropriate time to react, analyze and develop a
fix to mitigate the found security vulnerability.
Please report vulnerabilities by:
* Opening a draft GitHub Security Advisory: https://github.com/argoproj/argo-workflows/security/advisories/new
* Sending an e-mail to the following address: cncf-argo-security@lists.cncf.io
All vulnerabilities and associated information will be treated with full confidentiality.
## Public Disclosure
Security vulnerabilities will be disclosed via [release notes](CHANGELOG.md) and using the
[GitHub Security Advisories](https://github.com/argoproj/argo-workflows/security/advisories)
feature to keep our community well informed, and will credit you for your findings (unless you prefer to stay anonymous, of course).
## Vulnerability Scanning
See [static code analysis](docs/static-code-analysis.md).
## Internet Bug Bounty collaboration
We're happy to announce that the Argo project is collaborating with the great
folks over at
[Hacker One](https://hackerone.com/) and their
[Internet Bug Bounty program](https://hackerone.com/ibb)
to reward the awesome people who find security vulnerabilities in the four
main Argo projects (CD, Events, Rollouts and Workflows) and then work with
us to fix and disclose them in a responsible manner.
If you report a vulnerability to us as outlined in this security policy, we
will work together with you to find out whether your finding is eligible for
claiming a bounty, and also on how to claim it.
## Securing Argo Workflows
See [docs/security.md](docs/security.md) for information about securing your Argo Workflows instance.

230
USERS.md Normal file
View File

@ -0,0 +1,230 @@
# Argo Workflows User Community Surveys & Feedback
User community survey results are available: [2023](https://blog.argoproj.io/argo-workflows-events-2023-user-survey-results-82c53bc30543), [2021](https://blog.argoproj.io/argo-workflows-2021-survey-results-d6fa890030ee?gi=857daaa1faa9), and [2020](https://github.com/argoproj/argoproj/blob/main/community/user_surveys/ArgoWorkflows2020SurveySummary.pdf).
## Who uses Argo Workflows?
As the Argo Community grows, we'd like to keep track of our users. Please send a PR with your organization or project name in the following sections.
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.
### Organizations Using Argo
Currently, the following organizations are **officially** using Argo Workflows:
1. [23mofang](https://www.23mofang.com/)
1. [4intelligence](https://4intelligence.com.br/)
1. [7shifts](https://www.7shifts.com)
1. [Acquia](https://www.acquia.com/)
1. [Adevinta](https://www.adevinta.com/)
1. [Admiralty](https://admiralty.io/)
1. [Adobe](https://www.adobe.com/)
1. [Adyen](https://www.adyen.com/)
1. [AKRA](https://www.akra.de/)
1. [Akuity](https://akuity.io/)
1. [Alibaba Cloud](https://www.alibabacloud.com/about)
1. [Alibaba Group](https://www.alibabagroup.com/)
1. [Anova](https://www.anova.com/)
1. [Ant Group](https://www.antgroup.com/)
1. [AppDirect](https://www.appdirect.com/)
1. [Arabesque](https://www.arabesque.com/)
1. [Argonaut](https://www.argonaut.dev/)
1. [ArthurAI](https://arthur.ai/)
1. [Astraea](https://astraea.earth/)
1. [Atlan](https://atlan.com/)
1. [BasisAI](https://basis-ai.com/)
1. [BEI.RE](https://www.bei.re/)
1. [bimspot](https://bimspot.io)
1. [BioBox Analytics](https://biobox.io)
1. [BlackRock](https://www.blackrock.com/)
1. [Bloomberg](https://www.bloomberg.com/)
1. [bonprix](https://en.bonprix.de/corporate/our-company/)
1. [Botkeeper](https://www.botkeeper.com/)
1. [ByteDance](https://www.bytedance.com/en/)
1. [Canva](https://www.canva.com/)
1. [Capact](https://capact.io/)
1. [Capital One](https://www.capitalone.com/tech/)
1. [Carrefour](https://www.carrefour.com/)
1. [CarTrack](https://www.cartrack.com/)
1. [Casavo](https://casavo.com/)
1. [CCRi](https://www.ccri.com/)
1. [Cisco](https://www.cisco.com/)
1. [Cloud Scale](https://cloudscaleinc.com/)
1. [CloudGeometry](https://www.cloudgeometry.io/)
1. [CloudSeeds](https://www.cloudseeds.de/)
1. [Codec](https://www.codec.ai/)
1. [Codefresh](https://www.codefresh.io/)
1. [Commodus Tech](https://www.commodus.tech)
1. [Concierge Render](https://www.conciergerender.com)
1. [Cookpad](https://cookpad.com/)
1. [Coralogix](https://coralogix.com)
1. [CoreFiling](https://www.corefiling.com/)
1. [CoreWeave Cloud](https://www.coreweave.com)
1. [Cratejoy](https://www.cratejoy.com/)
1. [Cruise](https://getcruise.com/)
1. [CVision AI](https://www.cvisionai.com)
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
1. [Cyrus Biotechnology](https://cyrusbio.com/)
1. [Data4Risk](https://www.data4risk.com/)
1. [Datable](https://datable.jp/)
1. [Datadog](https://www.datadoghq.com/)
1. [DataRobot](https://www.datarobot.com/)
1. [DataStax](https://www.datastax.com/)
1. [DDEV](https://www.ddev.com/)
1. [Deutsche Telekom AG](https://telekom.com)
1. [DevSamurai](https://www.devsamurai.com/)
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
1. [DLR](https://www.dlr.de/eoc/)
1. [DP Technology](https://www.dp.tech/)
1. [Dyno Therapeutics](https://dynotx.com)
1. [EBSCO Information Services](https://www.ebsco.com/)
1. [Enso Finance](https://enso.finance/)
1. [Equinor](https://www.equinor.com/)
1. [Elastic](https://www.elastic.co/)
1. [Fairwinds](https://fairwinds.com/)
1. [FOLIO](http://corp.folio-sec.com/)
1. [freee](https://corp.freee.co.jp/en/company/)
1. [FreeWheel](https://freewheel.com/)
1. [Fynd Trak](https://trak.fynd.com/)
1. [Galixir](https://www.galixir.com/)
1. [Gardener](https://gardener.cloud/)
1. [Gepardec](https://gepardec.com/)
1. [GitHub](https://github.com/)
1. [Gitpod](https://www.gitpod.io/)
1. [Gladly](https://gladly.com/)
1. [Gllue](https://gllue.com/)
1. [Glovo](https://www.glovoapp.com)
1. [Google](https://www.google.com/intl/en/about/our-company/)
1. [Graviti](https://www.graviti.com)
1. [Greenhouse](https://greenhouse.io)
1. [H2O.ai](https://h2o.ai/)
1. [Habx](https://www.habx.com/)
1. [Helio](https://helio.exchange)
1. [Hemisphere Digital](https://hemisphere.digital)
1. [HOVER](https://hover.to)
1. [HSBC](https://hsbc.com)
1. [Hydrogrid](https://hydrogrid.ai)
1. [IBM](https://ibm.com)
1. [Iflytek](https://www.iflytek.com/)
1. [Inceptio Technology](https://www.inceptio.ai/)
1. [incrmntal](https://incrmntal.com/)
1. [InsideBoard](https://www.insideboard.com)
1. [Interline Technologies](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/)
1. [Intralinks](https://www.intralinks.com/)
1. [Intuit](https://www.intuit.com/)
1. [InVision](https://www.invisionapp.com/)
1. [İşbank](https://www.isbank.com.tr/en)
1. [Jellysmack](https://www.jellysmack.com/)
1. [Jungle](https://www.jungle.ai/)
1. [Karius](https://www.kariusdx.com/)
1. [Karrot](https://www.daangn.com/)
1. [KarrotPay](https://www.daangnpay.com/)
1. [Kasa](https://www.kasa.co.kr/)
1. [KintoHub](https://www.kintohub.com/)
1. [KPMG](https://kpmg.com/uk)
1. [Localytics](https://www.localytics.com/)
1. [Lumin Digital](https://lumindigital.com/)
1. [Maersk](https://www.maersk.com/solutions/digital-solutions)
1. [MariaDB](https://mariadb.com/)
1. [Marmalade](https://www.marmalade.co/)
1. [Max Kelsen](https://maxkelsen.com/)
1. [Maya](https://www.maya.ph/)
1. [Microba](https://www.microba.com/)
1. [Microblink](https://microblink.com/)
1. [Mirantis](https://mirantis.com/)
1. [Mixpanel](https://mixpanel.com)
1. [Motus](https://www.motus.com)
1. [New Relic](https://newrelic.com/)
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
1. [Norwegian Refugee Council](https://www.nrc.no/)
1. [nrd.io](https://nrd.io/)
1. [NVIDIA](https://www.nvidia.com/)
1. [One Concern](https://oneconcern.com/)
1. [Onepanel](https://docs.onepanel.ai)
1. [OpsMx](https://www.opsmx.io/)
1. [Oracle](https://www.oracle.com/)
1. [Orchest](https://www.orchest.io/)
1. [OVH](https://www.ovh.com/)
1. [PathAI](https://www.pathai.com)
1. [PayIt](https://payitgov.com/)
1. [PDOK](https://www.pdok.nl/)
1. [Peak AI](https://www.peak.ai/)
1. [Phrase](https://phrase.com)
1. [Pipekit](https://pipekit.io)
1. [Pismo](https://pismo.io/)
1. [Polarpoint.io](https://polarpoint.io)
1. [Pollination](https://pollination.cloud)
1. [Preferred Networks](https://www.preferred-networks.jp/en/)
1. [Priceline.com](https://www.priceline.com)
1. [Procore](https://www.procore.com)
1. [Promaton](https://www.promaton.com/)
1. [Prudential](https://www.prudential.com.sg/)
1. [Pvotal Technologies](https://pvotal.tech/)
1. [Quantibio](http://quantibio.com/us/en/)
1. [QuantumBlack](https://quantumblack.com/)
1. [Raccoon Digital Marketing](https://raccoon.ag/)
1. [Ramboll Shair](https://ramboll-shair.com/)
1. [Ravelin](https://www.ravelin.com/)
1. [Reco](https://reco.ai)
1. [Red Hat](https://www.redhat.com/en)
1. [Reserved AI](https://reserved.ai/)
1. [Riskified](https://www.riskified.com)
1. [Robinhood](https://robinhood.com/)
1. [Sage (Sage AI Labs)](https://sage.com/)
1. [SAP Concur](https://www.concur.com/)
1. [SAP Fieldglass](https://www.fieldglass.com/)
1. [SAP Hybris](https://cx.sap.com/)
1. [SAS](https://www.sas.com/)
1. [Salesforce](https://salesforce.com)
1. [Schlumberger](https://slb.com/)
1. [Securitas](https://securitas.com/)
1. [SegmentStream](https://segmentstream.com)
1. [Semgrep](https://semgrep.com)
1. [Sendible](https://sendible.com)
1. [Sidecar Technologies](https://hello.getsidecar.com/)
1. [smallcase](https://smallcase.com/)
1. [Softonic](https://hello.softonic.com/)
1. [Sohu](https://www.sohu.com/)
1. [SternumIOT](https://www.sternumiot.com)
1. [Stillwater Supercomputing, Inc](http://www.stillwater-sc.com/)
1. [StreamNative](https://streamnative.io)
1. [strongDM](https://www.strongdm.com/)
1. [Styra](https://www.styra.com/)
1. [Splunk](https://www.splunk.com/)
1. [Sutpc](http://www.sutpc.com/)
1. [Swissblock Technologies](https://swissblock.net/)
1. [Tessell](https://www.tessell.com/)
1. [Threekit](https://www.threekit.com/)
1. [Tiger Analytics](https://www.tigeranalytics.com/)
1. [Tradeshift](https://tradeshift.com/)
1. [Trendyol](https://trendyol.com)
1. [Tuhu](https://www.tuhu.cn/)
1. [Tulip](https://tulip.com/)
1. [Ubie](https://ubie.life/)
1. [UFirstGroup](https://www.ufirstgroup.com)
1. [Vispera](https://www.vispera.co)
1. [VMware](https://www.vmware.com/)
1. [Voyager](https://investvoyager.com/)
1. [Wavefront](https://www.wavefront.com/)
1. [Wellcome Trust](https://wellcome.ac.uk/)
1. [WooliesX](https://wooliesx.com.au/)
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
1. [Workiva](https://www.workiva.com/)
1. [Xueqiu](https://www.xueqiu.com/)
1. [Yubo](https://www.yubo.live/)
1. [Zhihu](https://www.zhihu.com/)
### Projects Using Argo
In addition, the following projects are **officially** using Argo Workflows:
1. [Couler](https://github.com/couler-proj/couler)
1. [Hera Workflows](https://github.com/argoproj-labs/hera-workflows)
1. [Kubeflow](https://www.kubeflow.org/)
1. [Metaflow](https://www.metaflow.org)
1. [Onepanel](https://github.com/onepanelio/onepanel)
1. [SQLFlow](https://github.com/sql-machine-learning/sqlflow)
1. [BisQue](https://github.com/UCSB-VRL/bisqueUCSB)
1. [Tator](https://www.tator.io)

View File

@ -1 +0,0 @@
0.3.1

11716
api/jsonschema/schema.json generated Normal file

File diff suppressed because it is too large Load Diff

15784
api/openapi-spec/swagger.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
package openapi_spec //nolint:staticcheck
import (
"encoding/json"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
type obj = map[string]interface{}
func TestSwagger(t *testing.T) {
swagger := obj{}
data, err := os.ReadFile("swagger.json")
if err != nil {
panic(err)
}
err = json.Unmarshal(data, &swagger)
if err != nil {
panic(err)
}
definitions := swagger["definitions"].(obj)
// one definition from each API
t.Run("io.argoproj.workflow.v1alpha1.CreateCronWorkflowRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.CreateCronWorkflowRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.WorkflowCreateRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.WorkflowCreateRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateCreateRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateCreateRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.InfoResponse", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.InfoResponse")
})
t.Run("io.argoproj.workflow.v1alpha1.ScriptTemplate", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.ScriptTemplate"].(obj)
assert.NotContains(t, definition["required"], "name")
})
t.Run("io.argoproj.workflow.v1alpha1.CronWorkflow", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.CronWorkflow"].(obj)
assert.NotContains(t, definition["required"], "status")
})
t.Run("io.argoproj.workflow.v1alpha1.Workflow", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Workflow"].(obj)
assert.NotContains(t, definition["required"], "status")
})
t.Run("io.argoproj.workflow.v1alpha1.Parameter", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Parameter"].(obj)
properties := definition["properties"].(obj)
assert.Equal(t, "string", properties["default"].(obj)["type"])
assert.Equal(t, "string", properties["value"].(obj)["type"])
})
t.Run("io.argoproj.workflow.v1alpha1.Histogram", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Histogram"].(obj)
buckets := definition["properties"].(obj)["buckets"].(obj)
assert.Equal(t, "array", buckets["type"])
assert.Equal(t, obj{"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Amount"}, buckets["items"])
})
t.Run("io.argoproj.workflow.v1alpha1.Amount", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Amount"].(obj)
assert.Equal(t, "number", definition["type"])
})
t.Run("io.argoproj.workflow.v1alpha1.Item", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Item"].(obj)
assert.Empty(t, definition["type"])
})
t.Run("io.argoproj.workflow.v1alpha1.ParallelSteps", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.ParallelSteps"].(obj)
assert.Equal(t, "array", definition["type"])
assert.Equal(t, obj{"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStep"}, definition["items"])
})
// this test makes sure we deal with `inline`
t.Run("io.argoproj.workflow.v1alpha1.UserContainer", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.UserContainer"].(obj)
properties := definition["properties"]
assert.Contains(t, properties, "image")
})
// yes - we actually delete this field
t.Run("io.k8s.api.core.v1.Container", func(t *testing.T) {
definition := definitions["io.k8s.api.core.v1.Container"].(obj)
required := definition["required"]
assert.Contains(t, required, "image")
assert.NotContains(t, required, "name")
})
// this test makes sure we can deal with an instance where we are wrong vs Kubernetes
t.Run("io.k8s.api.core.v1.SecretKeySelector", func(t *testing.T) {
definition := definitions["io.k8s.api.core.v1.SecretKeySelector"].(obj)
properties := definition["properties"]
assert.Contains(t, properties, "name")
})
// this test makes sure we can deal with an instance where we are wrong vs Kubernetes
t.Run("io.k8s.api.core.v1.Volume", func(t *testing.T) {
definition := definitions["io.k8s.api.core.v1.Volume"].(obj)
properties := definition["properties"]
assert.Contains(t, properties, "name")
assert.NotContains(t, properties, "volumeSource")
})
}

View File

@ -0,0 +1,38 @@
package archive
import (
"fmt"
"github.com/spf13/cobra"
client "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
)
func NewDeleteCommand() *cobra.Command {
command := &cobra.Command{
Use: "delete UID...",
Short: "delete a workflow in the archive",
Example: `# Delete an archived workflow by its UID:
argo archive delete abc123-def456-ghi789-jkl012
`,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewArchivedWorkflowServiceClient()
if err != nil {
return err
}
for _, uid := range args {
if _, err = serviceClient.DeleteArchivedWorkflow(ctx, &workflowarchivepkg.DeleteArchivedWorkflowRequest{Uid: uid}); err != nil {
return err
}
fmt.Printf("Archived workflow '%s' deleted\n", uid)
}
return nil
},
}
return command
}

View File

@ -0,0 +1,100 @@
package archive
import (
"encoding/json"
"fmt"
"log"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/humanize"
)
func NewGetCommand() *cobra.Command {
var output = common.EnumFlagValue{
AllowedValues: []string{"json", "yaml", "wide"},
Value: "wide",
}
command := &cobra.Command{
Use: "get UID",
Short: "get a workflow in the archive",
Args: cobra.ExactArgs(1),
Example: `# Get information about an archived workflow by its UID:
argo archive get abc123-def456-ghi789-jkl012
# Get information about an archived workflow in YAML format:
argo archive get abc123-def456-ghi789-jkl012 -o yaml
`,
RunE: func(cmd *cobra.Command, args []string) error {
uid := args[0]
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewArchivedWorkflowServiceClient()
if err != nil {
return err
}
wf, err := serviceClient.GetArchivedWorkflow(ctx, &workflowarchivepkg.GetArchivedWorkflowRequest{Uid: uid})
if err != nil {
return err
}
printWorkflow(wf, output.String())
return nil
},
}
command.Flags().VarP(&output, "output", "o", "Output format. "+output.Usage())
return command
}
func printWorkflow(wf *wfv1.Workflow, output string) {
switch output {
case "json":
output, err := json.Marshal(wf)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(output))
case "yaml":
output, err := yaml.Marshal(wf)
if err != nil {
log.Fatal(err)
}
fmt.Println(string(output))
default:
const fmtStr = "%-20s %v\n"
fmt.Printf(fmtStr, "Name:", wf.Name)
fmt.Printf(fmtStr, "Namespace:", wf.Namespace)
serviceAccount := wf.GetExecSpec().ServiceAccountName
if serviceAccount == "" {
// if serviceAccountName was not specified in a submitted Workflow, we will
// use the serviceAccountName provided in Workflow Defaults (if any). If that
// also isn't set, we will use the 'default' ServiceAccount in the namespace
// the workflow will run in.
serviceAccount = "unset (will run with the default ServiceAccount)"
}
fmt.Printf(fmtStr, "ServiceAccount:", serviceAccount)
fmt.Printf(fmtStr, "Status:", wf.Status.Phase)
if wf.Status.Message != "" {
fmt.Printf(fmtStr, "Message:", wf.Status.Message)
}
fmt.Printf(fmtStr, "Created:", humanize.Timestamp(wf.CreationTimestamp.Time))
if !wf.Status.StartedAt.IsZero() {
fmt.Printf(fmtStr, "Started:", humanize.Timestamp(wf.Status.StartedAt.Time))
}
if !wf.Status.FinishedAt.IsZero() {
fmt.Printf(fmtStr, "Finished:", humanize.Timestamp(wf.Status.FinishedAt.Time))
}
if !wf.Status.StartedAt.IsZero() {
fmt.Printf(fmtStr, "Duration:", humanize.RelativeDuration(wf.Status.StartedAt.Time, wf.Status.FinishedAt.Time))
}
}
}

View File

@ -0,0 +1,84 @@
package archive
import (
"context"
"os"
"sort"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/printer"
)
func NewListCommand() *cobra.Command {
var (
selector string
output = common.NewPrintWorkflowOutputValue("wide")
chunkSize int64
)
command := &cobra.Command{
Use: "list",
Short: "list workflows in the archive",
Example: `# List all archived workflows:
argo archive list
# List all archived workflows fetched in chunks of 100:
argo archive list --chunk-size 100
# List all archived workflows in YAML format:
argo archive list -o yaml
# List archived workflows that have both labels:
argo archive list -l key1=value1,key2=value2
`,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewArchivedWorkflowServiceClient()
if err != nil {
return err
}
namespace := client.Namespace()
workflows, err := listArchivedWorkflows(ctx, serviceClient, namespace, selector, chunkSize)
if err != nil {
return err
}
return printer.PrintWorkflows(workflows, os.Stdout, printer.PrintOpts{Output: output.String(), Namespace: true, UID: true})
},
}
command.Flags().VarP(&output, "output", "o", "Output format. "+output.Usage())
command.Flags().StringVarP(&selector, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
command.Flags().Int64VarP(&chunkSize, "chunk-size", "", 0, "Return large lists in chunks rather than all at once. Pass 0 to disable.")
return command
}
func listArchivedWorkflows(ctx context.Context, serviceClient workflowarchivepkg.ArchivedWorkflowServiceClient, namespace string, labelSelector string, chunkSize int64) (wfv1.Workflows, error) {
listOpts := &metav1.ListOptions{
LabelSelector: labelSelector,
Limit: chunkSize,
}
var workflows wfv1.Workflows
for {
log.WithField("listOpts", listOpts).Debug()
resp, err := serviceClient.ListArchivedWorkflows(ctx, &workflowarchivepkg.ListArchivedWorkflowsRequest{Namespace: namespace, ListOptions: listOpts})
if err != nil {
return nil, err
}
workflows = append(workflows, resp.Items...)
if resp.Continue == "" {
break
}
listOpts.Continue = resp.Continue
}
sort.Sort(workflows)
return workflows, nil
}

View File

@ -0,0 +1,36 @@
package archive
import (
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
)
func NewListLabelKeyCommand() *cobra.Command {
command := &cobra.Command{
Use: "list-label-keys",
Short: "list workflows label keys in the archive",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewArchivedWorkflowServiceClient()
if err != nil {
return err
}
keys, err := serviceClient.ListArchivedWorkflowLabelKeys(ctx, &workflowarchivepkg.ListArchivedWorkflowLabelKeysRequest{})
if err != nil {
return err
}
for _, str := range keys.Items {
fmt.Printf("%s\n", str)
}
return nil
},
}
return command
}

View File

@ -0,0 +1,58 @@
package archive
import (
"context"
"fmt"
"github.com/argoproj/argo-workflows/v3/util/logging"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
"github.com/argoproj/argo-workflows/v3/util/errors"
)
func NewListLabelValueCommand() *cobra.Command {
var (
selector string
)
command := &cobra.Command{
Use: "list-label-values",
Short: "get workflow label values in the archive",
RunE: func(cmd *cobra.Command, args []string) error {
listOpts := &metav1.ListOptions{
LabelSelector: selector,
}
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewArchivedWorkflowServiceClient()
if err != nil {
return err
}
labels, err := serviceClient.ListArchivedWorkflowLabelValues(ctx, &workflowarchivepkg.ListArchivedWorkflowLabelValuesRequest{ListOptions: listOpts})
if err != nil {
return err
}
for _, str := range labels.Items {
fmt.Printf("%s\n", str)
}
return nil
},
}
ctx := command.Context()
if ctx != nil {
ctx = logging.WithLogger(context.Background(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
command.SetContext(ctx)
}
command.Flags().StringVarP(&selector, "selector", "l", "", "Selector (label query) to query on, allows 1 value (e.g. -l key1)")
err := command.MarkFlagRequired("selector")
errors.CheckError(ctx, err)
return command
}

View File

@ -0,0 +1,151 @@
package archive
import (
"context"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
client "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
)
type resubmitOps struct {
priority int32 // --priority
memoized bool // --memoized
namespace string // --namespace
labelSelector string // --selector
fieldSelector string // --field-selector
}
// hasSelector returns true if the CLI arguments selects multiple workflows
func (o *resubmitOps) hasSelector() bool {
if o.labelSelector != "" || o.fieldSelector != "" {
return true
}
return false
}
func NewResubmitCommand() *cobra.Command {
var (
resubmitOpts resubmitOps
cliSubmitOpts = common.NewCliSubmitOpts()
)
command := &cobra.Command{
Use: "resubmit [WORKFLOW...]",
Short: "resubmit one or more workflows",
Example: `# Resubmit a workflow:
argo archive resubmit uid
# Resubmit multiple workflows:
argo archive resubmit uid another-uid
# Resubmit multiple workflows by label selector:
argo archive resubmit -l workflows.argoproj.io/test=true
# Resubmit multiple workflows by field selector:
argo archive resubmit --field-selector metadata.namespace=argo
# Resubmit and wait for completion:
argo archive resubmit --wait uid
# Resubmit and watch until completion:
argo archive resubmit --watch uid
# Resubmit and tail logs until completion:
argo archive resubmit --log uid
`,
RunE: func(cmd *cobra.Command, args []string) error {
if cmd.Flag("priority").Changed {
cliSubmitOpts.Priority = &resubmitOpts.priority
}
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient := apiClient.NewWorkflowServiceClient() // needed for wait watch or log flags
archiveServiceClient, err := apiClient.NewArchivedWorkflowServiceClient()
if err != nil {
return err
}
resubmitOpts.namespace = client.Namespace()
return resubmitArchivedWorkflows(ctx, archiveServiceClient, serviceClient, resubmitOpts, cliSubmitOpts, args)
},
}
command.Flags().StringArrayVarP(&cliSubmitOpts.Parameters, "parameter", "p", []string{}, "input parameter to override on the original workflow spec")
command.Flags().Int32Var(&resubmitOpts.priority, "priority", 0, "workflow priority")
command.Flags().VarP(&cliSubmitOpts.Output, "output", "o", "Output format. "+cliSubmitOpts.Output.Usage())
command.Flags().BoolVarP(&cliSubmitOpts.Wait, "wait", "w", false, "wait for the workflow to complete, only works when a single workflow is resubmitted")
command.Flags().BoolVar(&cliSubmitOpts.Watch, "watch", false, "watch the workflow until it completes, only works when a single workflow is resubmitted")
command.Flags().BoolVar(&cliSubmitOpts.Log, "log", false, "log the workflow until it completes")
command.Flags().BoolVar(&resubmitOpts.memoized, "memoized", false, "re-use successful steps & outputs from the previous run")
command.Flags().StringVarP(&resubmitOpts.labelSelector, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
command.Flags().StringVar(&resubmitOpts.fieldSelector, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
return command
}
// resubmitArchivedWorkflows resubmits workflows by given resubmitOpts or workflow names
func resubmitArchivedWorkflows(ctx context.Context, archiveServiceClient workflowarchivepkg.ArchivedWorkflowServiceClient, serviceClient workflowpkg.WorkflowServiceClient, resubmitOpts resubmitOps, cliSubmitOpts common.CliSubmitOpts, args []string) error {
var (
wfs wfv1.Workflows
err error
)
if resubmitOpts.hasSelector() {
wfs, err = listArchivedWorkflows(ctx, archiveServiceClient, resubmitOpts.fieldSelector, resubmitOpts.labelSelector, 0)
if err != nil {
return err
}
}
for _, uid := range args {
wfs = append(wfs, wfv1.Workflow{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(uid),
Namespace: resubmitOpts.namespace,
},
})
}
var lastResubmitted *wfv1.Workflow
resubmittedUids := make(map[string]bool)
for _, wf := range wfs {
if _, ok := resubmittedUids[string(wf.UID)]; ok {
// de-duplication in case there is an overlap between the selector and given workflow names
continue
}
resubmittedUids[string(wf.UID)] = true
lastResubmitted, err = archiveServiceClient.ResubmitArchivedWorkflow(ctx, &workflowarchivepkg.ResubmitArchivedWorkflowRequest{
Uid: string(wf.UID),
Namespace: wf.Namespace,
Name: wf.Name,
Memoized: resubmitOpts.memoized,
Parameters: cliSubmitOpts.Parameters,
})
if err != nil {
return err
}
printWorkflow(lastResubmitted, cliSubmitOpts.Output.String())
}
if len(resubmittedUids) == 1 {
// watch or wait when there is only one workflow retried
return common.WaitWatchOrLog(ctx, serviceClient, lastResubmitted.Namespace, []string{lastResubmitted.Name}, cliSubmitOpts)
}
return nil
}

View File

@ -0,0 +1,156 @@
package archive
import (
"context"
"errors"
"fmt"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
client "github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
workflowarchivepkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflowarchive"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
)
type retryOps struct {
nodeFieldSelector string // --node-field-selector
restartSuccessful bool // --restart-successful
namespace string // --namespace
labelSelector string // --selector
fieldSelector string // --field-selector
}
// hasSelector returns true if the CLI arguments selects multiple workflows
func (o *retryOps) hasSelector() bool {
if o.labelSelector != "" || o.fieldSelector != "" {
return true
}
return false
}
func NewRetryCommand() *cobra.Command {
var (
cliSubmitOpts = common.NewCliSubmitOpts()
retryOpts retryOps
)
command := &cobra.Command{
Use: "retry [WORKFLOW...]",
Short: "retry zero or more workflows",
Example: `# Retry a workflow:
argo archive retry uid
# Retry multiple workflows:
argo archive retry uid another-uid
# Retry multiple workflows by label selector:
argo archive retry -l workflows.argoproj.io/test=true
# Retry multiple workflows by field selector:
argo archive retry --field-selector metadata.namespace=argo
# Retry and wait for completion:
argo archive retry --wait uid
# Retry and watch until completion:
argo archive retry --watch uid
# Retry and tail logs until completion:
argo archive retry --log uid
`,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 && !retryOpts.hasSelector() {
return errors.New("requires either selector or workflow")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient := apiClient.NewWorkflowServiceClient()
archiveServiceClient, err := apiClient.NewArchivedWorkflowServiceClient()
if err != nil {
return err
}
retryOpts.namespace = client.Namespace()
return retryArchivedWorkflows(ctx, archiveServiceClient, serviceClient, retryOpts, cliSubmitOpts, args)
},
}
command.Flags().StringArrayVarP(&cliSubmitOpts.Parameters, "parameter", "p", []string{}, "input parameter to override on the original workflow spec")
command.Flags().VarP(&cliSubmitOpts.Output, "output", "o", "Output format. "+cliSubmitOpts.Output.Usage())
command.Flags().BoolVarP(&cliSubmitOpts.Wait, "wait", "w", false, "wait for the workflow to complete, only works when a single workflow is retried")
command.Flags().BoolVar(&cliSubmitOpts.Watch, "watch", false, "watch the workflow until it completes, only works when a single workflow is retried")
command.Flags().BoolVar(&cliSubmitOpts.Log, "log", false, "log the workflow until it completes")
command.Flags().BoolVar(&retryOpts.restartSuccessful, "restart-successful", false, "indicates to restart successful nodes matching the --node-field-selector")
command.Flags().StringVar(&retryOpts.nodeFieldSelector, "node-field-selector", "", "selector of nodes to reset, eg: --node-field-selector inputs.paramaters.myparam.value=abc")
command.Flags().StringVarP(&retryOpts.labelSelector, "selector", "l", "", "Selector (label query) to filter on, not including uninitialized ones, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2)")
command.Flags().StringVar(&retryOpts.fieldSelector, "field-selector", "", "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.")
return command
}
// retryArchivedWorkflows retries workflows by given retryArgs or workflow names
func retryArchivedWorkflows(ctx context.Context, archiveServiceClient workflowarchivepkg.ArchivedWorkflowServiceClient, serviceClient workflowpkg.WorkflowServiceClient, retryOpts retryOps, cliSubmitOpts common.CliSubmitOpts, args []string) error {
selector, err := fields.ParseSelector(retryOpts.nodeFieldSelector)
if err != nil {
return fmt.Errorf("unable to parse node field selector '%s': %s", retryOpts.nodeFieldSelector, err)
}
var wfs wfv1.Workflows
if retryOpts.hasSelector() {
wfs, err = listArchivedWorkflows(ctx, archiveServiceClient, retryOpts.fieldSelector, retryOpts.labelSelector, 0)
if err != nil {
return err
}
}
for _, uid := range args {
wfs = append(wfs, wfv1.Workflow{
ObjectMeta: metav1.ObjectMeta{
UID: types.UID(uid),
Namespace: retryOpts.namespace,
},
})
}
var lastRetried *wfv1.Workflow
retriedUids := make(map[string]bool)
for _, wf := range wfs {
if _, ok := retriedUids[string(wf.UID)]; ok {
// de-duplication in case there is an overlap between the selector and given workflow names
continue
}
retriedUids[string(wf.UID)] = true
lastRetried, err = archiveServiceClient.RetryArchivedWorkflow(ctx, &workflowarchivepkg.RetryArchivedWorkflowRequest{
Uid: string(wf.UID),
Namespace: wf.Namespace,
Name: wf.Name,
RestartSuccessful: retryOpts.restartSuccessful,
NodeFieldSelector: selector.String(),
Parameters: cliSubmitOpts.Parameters,
})
if err != nil {
return err
}
printWorkflow(lastRetried, cliSubmitOpts.Output.String())
}
if len(retriedUids) == 1 {
// watch or wait when there is only one workflow retried
return common.WaitWatchOrLog(ctx, serviceClient, lastRetried.Namespace, []string{lastRetried.Name}, cliSubmitOpts)
}
return nil
}

View File

@ -0,0 +1,24 @@
package archive
import (
"github.com/spf13/cobra"
)
func NewArchiveCommand() *cobra.Command {
command := &cobra.Command{
Use: "archive",
Short: "manage the workflow archive",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
command.AddCommand(NewListCommand())
command.AddCommand(NewGetCommand())
command.AddCommand(NewDeleteCommand())
command.AddCommand(NewListLabelKeyCommand())
command.AddCommand(NewListLabelValueCommand())
command.AddCommand(NewResubmitCommand())
command.AddCommand(NewRetryCommand())
return command
}

View File

@ -0,0 +1,17 @@
package auth
import (
"github.com/spf13/cobra"
)
func NewAuthCommand() *cobra.Command {
command := &cobra.Command{
Use: "auth",
Short: "manage authentication settings",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
command.AddCommand(NewTokenCommand())
return command
}

View File

@ -0,0 +1,25 @@
package auth
import (
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
)
func NewTokenCommand() *cobra.Command {
return &cobra.Command{
Use: "token",
Short: "Print the auth token",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
authString, err := client.GetAuthString()
if err != nil {
return err
}
fmt.Println(authString)
return nil
},
}
}

View File

@ -0,0 +1,110 @@
package client
import (
"context"
"fmt"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/argo-workflows/v3"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient"
"github.com/argoproj/argo-workflows/v3/util/kubeconfig"
)
var (
ArgoServerOpts = apiclient.ArgoServerOpts{}
instanceID string
)
var overrides = clientcmd.ConfigOverrides{}
var (
explicitPath string
Offline bool
OfflineFiles []string
)
func AddKubectlFlagsToCmd(cmd *cobra.Command) {
kflags := clientcmd.RecommendedConfigOverrideFlags("")
cmd.PersistentFlags().StringVar(&explicitPath, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster")
clientcmd.BindOverrideFlags(&overrides, cmd.PersistentFlags(), kflags)
}
func GetConfig() clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
loadingRules.ExplicitPath = explicitPath
return clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, &overrides, os.Stdin)
}
func AddAPIClientFlagsToCmd(cmd *cobra.Command) {
cmd.PersistentFlags().StringVar(&instanceID, "instanceid", os.Getenv("ARGO_INSTANCEID"), "submit with a specific controller's instance id label. Default to the ARGO_INSTANCEID environment variable.")
// "-s" like kubectl
cmd.PersistentFlags().StringVarP(&ArgoServerOpts.URL, "argo-server", "s", os.Getenv("ARGO_SERVER"), "API server `host:port`. e.g. localhost:2746. Defaults to the ARGO_SERVER environment variable.")
cmd.PersistentFlags().StringVar(&ArgoServerOpts.Path, "argo-base-href", os.Getenv("ARGO_BASE_HREF"), "Path to use with HTTP client due to Base HREF. Defaults to the ARGO_BASE_HREF environment variable.")
cmd.PersistentFlags().BoolVar(&ArgoServerOpts.HTTP1, "argo-http1", os.Getenv("ARGO_HTTP1") == "true", "If true, use the HTTP client. Defaults to the ARGO_HTTP1 environment variable.")
cmd.PersistentFlags().StringSliceVarP(&ArgoServerOpts.Headers, "header", "H", []string{}, "Sets additional header to all requests made by Argo CLI. (Can be repeated multiple times to add multiple headers, also supports comma separated headers) Used only when either ARGO_HTTP1 or --argo-http1 is set to true.")
// "-e" for encrypted - like zip
cmd.PersistentFlags().BoolVarP(&ArgoServerOpts.Secure, "secure", "e", os.Getenv("ARGO_SECURE") != "false", "Whether or not the server is using TLS with the Argo Server. Defaults to the ARGO_SECURE environment variable.")
// "-k" like curl
cmd.PersistentFlags().BoolVarP(&ArgoServerOpts.InsecureSkipVerify, "insecure-skip-verify", "k", os.Getenv("ARGO_INSECURE_SKIP_VERIFY") == "true", "If true, the Argo Server's certificate will not be checked for validity. This will make your HTTPS connections insecure. Defaults to the ARGO_INSECURE_SKIP_VERIFY environment variable.")
}
func NewAPIClient(ctx context.Context) (context.Context, apiclient.Client, error) {
return apiclient.NewClientFromOpts(
apiclient.Opts{
ArgoServerOpts: ArgoServerOpts,
InstanceID: instanceID,
AuthSupplier: func() string {
authString, err := GetAuthString()
if err != nil {
log.Fatal(err)
}
return authString
},
ClientConfigSupplier: func() clientcmd.ClientConfig { return GetConfig() },
Offline: Offline,
OfflineFiles: OfflineFiles,
Context: ctx,
})
}
func Namespace() string {
if Offline {
return ""
}
if overrides.Context.Namespace != "" {
return overrides.Context.Namespace
}
namespace, ok := os.LookupEnv("ARGO_NAMESPACE")
if ok {
return namespace
}
namespace, _, err := GetConfig().Namespace()
if err != nil {
log.Fatal(err)
}
return namespace
}
func GetAuthString() (string, error) {
token, ok := os.LookupEnv("ARGO_TOKEN")
if ok {
return token, nil
}
restConfig, err := GetConfig().ClientConfig()
if err != nil {
return "", err
}
version := argo.GetVersion()
restConfig = restclient.AddUserAgent(restConfig, fmt.Sprintf("argo-workflows/%s argo-cli", version.Version))
authString, err := kubeconfig.GetAuthString(restConfig, explicitPath)
if err != nil {
return "", err
}
return authString, nil
}

View File

@ -0,0 +1,43 @@
package client
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/argoproj/argo-workflows/v3/util/logging"
)
func TestGetAuthString(t *testing.T) {
t.Setenv("ARGO_TOKEN", "my-token")
authString, err := GetAuthString()
require.NoError(t, err)
assert.Equal(t, "my-token", authString)
}
func TestNamespace(t *testing.T) {
t.Setenv("ARGO_NAMESPACE", "my-ns")
assert.Equal(t, "my-ns", Namespace())
}
func TestCreateOfflineClient(t *testing.T) {
t.Run("creating an offline client with no files should not fail", func(t *testing.T) {
Offline = true
OfflineFiles = []string{}
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
_, _, err := NewAPIClient(ctx)
assert.NoError(t, err)
})
t.Run("creating an offline client with a non-existing file should fail", func(t *testing.T) {
Offline = true
OfflineFiles = []string{"non-existing-file"}
ctx := logging.WithLogger(context.TODO(), logging.NewSlogLogger(logging.GetGlobalLevel(), logging.GetGlobalFormat()))
_, _, err := NewAPIClient(ctx)
assert.Error(t, err)
})
}

View File

@ -0,0 +1,69 @@
package clustertemplate
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate"
)
type cliCreateOpts struct {
output common.EnumFlagValue // --output
strict bool // --strict
}
func NewCreateCommand() *cobra.Command {
var cliCreateOpts = cliCreateOpts{output: common.NewPrintWorkflowOutputValue("")}
command := &cobra.Command{
Use: "create FILE1 FILE2...",
Short: "create a cluster workflow template",
Example: `# Create a Cluster Workflow Template:
argo cluster-template create FILE1
# Create a Cluster Workflow Template and print it as YAML:
argo cluster-template create FILE1 --output yaml
# Create a Cluster Workflow Template with relaxed validation:
argo cluster-template create FILE1 --strict false
`,
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return createClusterWorkflowTemplates(cmd.Context(), args, &cliCreateOpts)
},
}
command.Flags().VarP(&cliCreateOpts.output, "output", "o", "Output format. "+cliCreateOpts.output.Usage())
command.Flags().BoolVar(&cliCreateOpts.strict, "strict", true, "perform strict workflow validation")
return command
}
func createClusterWorkflowTemplates(ctx context.Context, filePaths []string, cliOpts *cliCreateOpts) error {
if cliOpts == nil {
cliOpts = &cliCreateOpts{}
}
ctx, apiClient, err := client.NewAPIClient(ctx)
if err != nil {
return err
}
serviceClient, err := apiClient.NewClusterWorkflowTemplateServiceClient()
if err != nil {
return err
}
clusterWorkflowTemplates := generateClusterWorkflowTemplates(filePaths, cliOpts.strict)
for _, wftmpl := range clusterWorkflowTemplates {
created, err := serviceClient.CreateClusterWorkflowTemplate(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateCreateRequest{
Template: &wftmpl,
})
if err != nil {
return fmt.Errorf("Failed to create cluster workflow template: %s, %v", wftmpl.Name, err)
}
printClusterWorkflowTemplate(created, cliOpts.output.String())
}
return nil
}

View File

@ -0,0 +1,62 @@
package clustertemplate
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate"
)
// NewDeleteCommand returns a new instance of an `argo delete` command
func NewDeleteCommand() *cobra.Command {
var all bool
command := &cobra.Command{
Use: "delete WORKFLOW_TEMPLATE",
Short: "delete a cluster workflow template",
RunE: func(cmd *cobra.Command, args []string) error {
return apiServerDeleteClusterWorkflowTemplates(cmd.Context(), all, args)
},
}
command.Flags().BoolVar(&all, "all", false, "Delete all cluster workflow templates")
return command
}
func apiServerDeleteClusterWorkflowTemplates(ctx context.Context, allWFs bool, wfTmplNames []string) error {
ctx, apiClient, err := client.NewAPIClient(ctx)
if err != nil {
return err
}
serviceClient, err := apiClient.NewClusterWorkflowTemplateServiceClient()
if err != nil {
return err
}
var delWFTmplNames []string
if allWFs {
cwftmplList, err := serviceClient.ListClusterWorkflowTemplates(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateListRequest{})
if err != nil {
return err
}
for _, cwfTmpl := range cwftmplList.Items {
delWFTmplNames = append(delWFTmplNames, cwfTmpl.Name)
}
} else {
delWFTmplNames = wfTmplNames
}
for _, cwfTmplName := range delWFTmplNames {
_, err := serviceClient.DeleteClusterWorkflowTemplate(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateDeleteRequest{
Name: cwfTmplName,
})
if err != nil {
return err
}
fmt.Printf("ClusterWorkflowTemplate '%s' deleted\n", cwfTmplName)
}
return nil
}

View File

@ -0,0 +1,41 @@
package clustertemplate
import (
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
clusterworkflowtmplpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate"
)
func NewGetCommand() *cobra.Command {
var output = common.NewPrintWorkflowOutputValue("")
command := &cobra.Command{
Use: "get CLUSTER WORKFLOW_TEMPLATE...",
Short: "display details about a cluster workflow template",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewClusterWorkflowTemplateServiceClient()
if err != nil {
return err
}
for _, name := range args {
wftmpl, err := serviceClient.GetClusterWorkflowTemplate(ctx, &clusterworkflowtmplpkg.ClusterWorkflowTemplateGetRequest{
Name: name,
})
if err != nil {
return err
}
printClusterWorkflowTemplate(wftmpl, output.String())
}
return nil
},
}
command.Flags().VarP(&output, "output", "o", "Output format. "+output.Usage())
return command
}

View File

@ -0,0 +1,46 @@
package clustertemplate
import (
"os"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
"github.com/argoproj/argo-workflows/v3/cmd/argo/lint"
wf "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow"
)
func NewLintCommand() *cobra.Command {
var (
strict bool
output = common.EnumFlagValue{
AllowedValues: []string{"pretty", "simple"},
Value: "pretty",
}
)
command := &cobra.Command{
Use: "lint FILE...",
Short: "validate files or directories of cluster workflow template manifests",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
opts := lint.LintOptions{
Files: args,
DefaultNamespace: client.Namespace(),
Strict: strict,
Printer: os.Stdout,
}
return lint.RunLint(ctx, apiClient, []string{wf.ClusterWorkflowTemplatePlural}, output.String(), false, opts)
},
}
command.Flags().VarP(&output, "output", "o", "Linting results output format. "+output.Usage())
command.Flags().BoolVar(&strict, "strict", true, "perform strict workflow validation")
return command
}

View File

@ -0,0 +1,72 @@
package clustertemplate
import (
"fmt"
"os"
"text/tabwriter"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
)
func NewListCommand() *cobra.Command {
var output = common.EnumFlagValue{
AllowedValues: []string{"wide", "name"},
}
command := &cobra.Command{
Use: "list",
Short: "list cluster workflow templates",
Example: `# List Cluster Workflow Templates:
argo cluster-template list
# List Cluster Workflow Templates with additional details such as labels, annotations, and status:
argo cluster-template list --output wide
# List Cluster Workflow Templates by name only:
argo cluster-template list -o name
`,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewClusterWorkflowTemplateServiceClient()
if err != nil {
return err
}
cwftmplList, err := serviceClient.ListClusterWorkflowTemplates(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateListRequest{})
if err != nil {
return err
}
switch output.String() {
case "", "wide":
printTable(cwftmplList.Items)
case "name":
for _, cwftmp := range cwftmplList.Items {
fmt.Println(cwftmp.Name)
}
default:
return fmt.Errorf("Unknown output mode: %s", output.String())
}
return nil
},
}
command.Flags().VarP(&output, "output", "o", "Output format. "+output.Usage())
return command
}
func printTable(wfList []wfv1.ClusterWorkflowTemplate) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
_, _ = fmt.Fprint(w, "NAME")
_, _ = fmt.Fprint(w, "\n")
for _, wf := range wfList {
_, _ = fmt.Fprintf(w, "%s\t", wf.Name)
_, _ = fmt.Fprintf(w, "\n")
}
_ = w.Flush()
}

View File

@ -0,0 +1,25 @@
package clustertemplate
import (
"github.com/spf13/cobra"
)
func NewClusterTemplateCommand() *cobra.Command {
command := &cobra.Command{
Use: "cluster-template",
Aliases: []string{"cwftmpl", "cwft"},
Short: "manipulate cluster workflow templates",
RunE: func(cmd *cobra.Command, args []string) error {
return cmd.Help()
},
}
command.AddCommand(NewGetCommand())
command.AddCommand(NewListCommand())
command.AddCommand(NewCreateCommand())
command.AddCommand(NewDeleteCommand())
command.AddCommand(NewLintCommand())
command.AddCommand(NewUpdateCommand())
return command
}

View File

@ -0,0 +1,75 @@
package clustertemplate
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/clusterworkflowtemplate"
)
type cliUpdateOpts struct {
output common.EnumFlagValue // --output
strict bool // --strict
}
func NewUpdateCommand() *cobra.Command {
var cliUpdateOpts = cliUpdateOpts{output: common.NewPrintWorkflowOutputValue("")}
command := &cobra.Command{
Use: "update FILE1 FILE2...",
Short: "update a cluster workflow template",
Example: `# Update a Cluster Workflow Template:
argo cluster-template update FILE1
# Update a Cluster Workflow Template and print it as YAML:
argo cluster-template update FILE1 --output yaml
# Update a Cluster Workflow Template with relaxed validation:
argo cluster-template update FILE1 --strict false
`,
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
return updateClusterWorkflowTemplates(cmd.Context(), args, &cliUpdateOpts)
},
}
command.Flags().VarP(&cliUpdateOpts.output, "output", "o", "Output format. "+cliUpdateOpts.output.Usage())
command.Flags().BoolVar(&cliUpdateOpts.strict, "strict", true, "perform strict workflow validation")
return command
}
func updateClusterWorkflowTemplates(ctx context.Context, filePaths []string, cliOpts *cliUpdateOpts) error {
if cliOpts == nil {
cliOpts = &cliUpdateOpts{}
}
ctx, apiClient, err := client.NewAPIClient(ctx)
if err != nil {
return err
}
serviceClient, err := apiClient.NewClusterWorkflowTemplateServiceClient()
if err != nil {
return err
}
clusterWorkflowTemplates := generateClusterWorkflowTemplates(filePaths, cliOpts.strict)
for _, wftmpl := range clusterWorkflowTemplates {
current, err := serviceClient.GetClusterWorkflowTemplate(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateGetRequest{
Name: wftmpl.Name,
})
if err != nil {
return fmt.Errorf("Failed to get existing cluster workflow template %q to update: %v", wftmpl.Name, err)
}
wftmpl.ResourceVersion = current.ResourceVersion
updated, err := serviceClient.UpdateClusterWorkflowTemplate(ctx, &clusterworkflowtemplate.ClusterWorkflowTemplateUpdateRequest{
Template: &wftmpl,
})
if err != nil {
return fmt.Errorf("Failed to update cluster workflow template: %s, %v", wftmpl.Name, err)
}
printClusterWorkflowTemplate(updated, cliOpts.output.String())
}
return nil
}

View File

@ -0,0 +1,78 @@
package clustertemplate
import (
"encoding/json"
"fmt"
"log"
"sigs.k8s.io/yaml"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/humanize"
argoJson "github.com/argoproj/argo-workflows/v3/util/json"
"github.com/argoproj/argo-workflows/v3/workflow/common"
"github.com/argoproj/argo-workflows/v3/workflow/util"
)
func generateClusterWorkflowTemplates(filePaths []string, strict bool) []wfv1.ClusterWorkflowTemplate {
fileContents, err := util.ReadManifest(filePaths...)
if err != nil {
log.Fatal(err)
}
var clusterWorkflowTemplates []wfv1.ClusterWorkflowTemplate
for _, body := range fileContents {
cwftmpls, err := unmarshalClusterWorkflowTemplates(body, strict)
if err != nil {
log.Fatalf("Failed to parse cluster workflow template: %v", err)
}
clusterWorkflowTemplates = append(clusterWorkflowTemplates, cwftmpls...)
}
if len(clusterWorkflowTemplates) == 0 {
log.Fatalln("No cluster workflow template found in given files")
}
return clusterWorkflowTemplates
}
// unmarshalClusterWorkflowTemplates unmarshals the input bytes as either json or yaml
func unmarshalClusterWorkflowTemplates(wfBytes []byte, strict bool) ([]wfv1.ClusterWorkflowTemplate, error) {
var cwft wfv1.ClusterWorkflowTemplate
var jsonOpts []argoJson.JSONOpt
if strict {
jsonOpts = append(jsonOpts, argoJson.DisallowUnknownFields)
}
err := argoJson.Unmarshal(wfBytes, &cwft, jsonOpts...)
if err == nil {
return []wfv1.ClusterWorkflowTemplate{cwft}, nil
}
yamlWfs, err := common.SplitClusterWorkflowTemplateYAMLFile(wfBytes, strict)
if err == nil {
return yamlWfs, nil
}
return nil, err
}
func printClusterWorkflowTemplate(wf *wfv1.ClusterWorkflowTemplate, outFmt string) {
switch outFmt {
case "name":
fmt.Println(wf.Name)
case "json":
outBytes, _ := json.MarshalIndent(wf, "", " ")
fmt.Println(string(outBytes))
case "yaml":
outBytes, _ := yaml.Marshal(wf)
fmt.Print(string(outBytes))
case "wide", "":
printClusterWorkflowTemplateHelper(wf)
default:
log.Fatalf("Unknown output format: %s", outFmt)
}
}
func printClusterWorkflowTemplateHelper(wf *wfv1.ClusterWorkflowTemplate) {
const fmtStr = "%-20s %v\n"
fmt.Printf(fmtStr, "Name:", wf.Name)
fmt.Printf(fmtStr, "Created:", humanize.Timestamp(wf.CreationTimestamp.Time))
}

View File

@ -0,0 +1,46 @@
package clustertemplate
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const cwfts = `
apiVersion: argoproj.io/v1alpha1
kind: ClusterWorkflowTemplate
metadata:
name: cluster-workflow-template-whalesay-template
spec:
templates:
- name: whalesay-template
inputs:
parameters:
- name: message
container:
image: docker/whalesay
command: [cowsay]
args: ["{{inputs.parameters.message}}"]
---
apiVersion: argoproj.io/v1alpha1
kind: ClusterWorkflowTemplate
metadata:
name: cluster-workflow-template-whalesay-template
spec:
templates:
- name: whalesay-template
inputs:
parameters:
- name: message
container:
image: docker/whalesay
command: [cowsay]
args: ["{{inputs.parameters.message}}"]
`
func TestUnmarshalCWFT(t *testing.T) {
clusterwfts, err := unmarshalClusterWorkflowTemplates([]byte(cwfts), false)
require.NoError(t, err)
assert.Len(t, clusterwfts, 2)
}

View File

@ -0,0 +1,110 @@
package common
import (
"fmt"
"os"
"strconv"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
)
// Global variables
var (
JobStatusIconMap map[wfv1.NodePhase]string
NodeTypeIconMap map[wfv1.NodeType]string
WorkflowConditionIconMap map[wfv1.ConditionType]string
NoColor bool
NoUtf8 bool
)
func init() {
cobra.OnInitialize(initializeSession)
}
// ANSI escape codes
const (
escape = "\x1b"
noFormat = 0
Bold = 1
FgBlack = 30
FgRed = 31
FgGreen = 32
FgYellow = 33
FgBlue = 34
FgMagenta = 35
FgCyan = 36
FgWhite = 37
FgDefault = 39
)
func initializeSession() {
log.SetFormatter(&log.TextFormatter{
TimestampFormat: "2006-01-02T15:04:05.000Z",
FullTimestamp: true,
})
if NoUtf8 {
JobStatusIconMap = map[wfv1.NodePhase]string{
wfv1.NodePending: ansiFormat("Pending", FgYellow),
wfv1.NodeRunning: ansiFormat("Running", FgCyan),
wfv1.NodeSucceeded: ansiFormat("Succeeded", FgGreen),
wfv1.NodeSkipped: ansiFormat("Skipped", FgDefault),
wfv1.NodeFailed: ansiFormat("Failed", FgRed),
wfv1.NodeError: ansiFormat("Error", FgRed),
}
NodeTypeIconMap = map[wfv1.NodeType]string{
wfv1.NodeTypeSuspend: ansiFormat("Suspend", FgCyan),
}
WorkflowConditionIconMap = map[wfv1.ConditionType]string{
wfv1.ConditionTypeMetricsError: ansiFormat("Error", FgRed),
wfv1.ConditionTypeSpecWarning: ansiFormat("Warning", FgYellow),
}
} else {
JobStatusIconMap = map[wfv1.NodePhase]string{
wfv1.NodePending: ansiFormat("◷", FgYellow),
wfv1.NodeRunning: ansiFormat("●", FgCyan),
wfv1.NodeSucceeded: ansiFormat("✔", FgGreen),
wfv1.NodeSkipped: ansiFormat("○", FgDefault),
wfv1.NodeFailed: ansiFormat("✖", FgRed),
wfv1.NodeError: ansiFormat("⚠", FgRed),
}
NodeTypeIconMap = map[wfv1.NodeType]string{
wfv1.NodeTypeSuspend: ansiFormat("ǁ", FgCyan),
}
WorkflowConditionIconMap = map[wfv1.ConditionType]string{
wfv1.ConditionTypeMetricsError: ansiFormat("✖", FgRed),
wfv1.ConditionTypeSpecWarning: ansiFormat("⚠", FgYellow),
}
}
}
func ansiColorCode(s string) int {
i := 0
for _, c := range s {
i += int(c)
}
colors := []int{FgGreen, FgYellow, FgBlue, FgMagenta, FgCyan, FgWhite}
return colors[i%len(colors)]
}
// ansiFormat wraps ANSI escape codes to a string to format the string to a desired color.
// NOTE: we still apply formatting even if there is no color formatting desired.
// The purpose of doing this is because when we apply ANSI color escape sequences to our
// output, this confuses the tabwriter library which miscalculates widths of columns and
// misaligns columns. By always applying a ANSI escape sequence (even when we don't want
// color, it provides more consistent string lengths so that tabwriter can calculate
// widths correctly.
func ansiFormat(s string, codes ...int) string {
if NoColor || os.Getenv("TERM") == "dumb" || len(codes) == 0 {
return s
}
codeStrs := make([]string, len(codes))
for i, code := range codes {
codeStrs[i] = strconv.Itoa(code)
}
sequence := strings.Join(codeStrs, ";")
return fmt.Sprintf("%s[%sm%s%s[%dm", escape, sequence, s, escape, noFormat)
}

View File

@ -0,0 +1,14 @@
package common
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_ansiColorCode(t *testing.T) {
// check we get a nice range of colours
assert.Equal(t, FgGreen, ansiColorCode("foo"))
assert.Equal(t, FgMagenta, ansiColorCode("bar"))
assert.Equal(t, FgWhite, ansiColorCode("baz"))
}

View File

@ -0,0 +1,48 @@
package common
import (
"errors"
"fmt"
"slices"
"strings"
"github.com/spf13/pflag"
)
// EnumFlagValue represents a CLI flag that can take one of a fixed set of values, and validates
// that the provided value is one of the allowed values.
// There's several libraries for this (e.g. https://github.com/thediveo/enumflag), but they're overkill.
type EnumFlagValue struct {
AllowedValues []string
Value string
}
func (e *EnumFlagValue) Usage() string {
return fmt.Sprintf("One of: %s", strings.Join(e.AllowedValues, "|"))
}
func (e *EnumFlagValue) String() string {
return e.Value
}
func (e *EnumFlagValue) Set(v string) error {
if slices.Contains(e.AllowedValues, v) {
e.Value = v
return nil
} else {
return errors.New(e.Usage())
}
}
func (e *EnumFlagValue) Type() string {
return "string"
}
var _ pflag.Value = &EnumFlagValue{}
func NewPrintWorkflowOutputValue(value string) EnumFlagValue {
return EnumFlagValue{
AllowedValues: []string{"name", "json", "yaml", "wide"},
Value: value,
}
}

View File

@ -0,0 +1,35 @@
package common
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEnumFlagValue(t *testing.T) {
e := EnumFlagValue{
AllowedValues: []string{"name", "json", "yaml", "wide"},
Value: "json",
}
t.Run("Usage", func(t *testing.T) {
assert.Equal(t, "One of: name|json|yaml|wide", e.Usage())
})
t.Run("String", func(t *testing.T) {
assert.Equal(t, "json", e.String())
})
t.Run("Type", func(t *testing.T) {
assert.Equal(t, "string", e.Type())
})
t.Run("Set", func(t *testing.T) {
err := e.Set("name")
require.NoError(t, err)
assert.Equal(t, "name", e.Value)
err = e.Set("invalid")
require.Error(t, err, "One of: name|json|yaml|wide")
})
}

View File

@ -0,0 +1,567 @@
package common
import (
"bytes"
"fmt"
"log"
"strings"
"text/tabwriter"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
argoutil "github.com/argoproj/argo-workflows/v3/util"
"github.com/argoproj/argo-workflows/v3/util/humanize"
"github.com/argoproj/argo-workflows/v3/util/printer"
"github.com/argoproj/argo-workflows/v3/workflow/util"
)
const onExitSuffix = "onExit"
type GetFlags struct {
Output EnumFlagValue
NodeFieldSelectorString string
// Only used for backwards compatibility
Status string
}
func statusToNodeFieldSelector(status string) string {
return fmt.Sprintf("phase=%s", status)
}
func (g GetFlags) shouldPrint(node wfv1.NodeStatus) bool {
if g.Status != "" {
// Adapt --status to a node field selector for compatibility
if g.NodeFieldSelectorString != "" {
log.Fatalf("cannot use both --status and --node-field-selector")
}
g.NodeFieldSelectorString = statusToNodeFieldSelector(g.Status)
}
if g.NodeFieldSelectorString != "" {
selector, err := fields.ParseSelector(g.NodeFieldSelectorString)
if err != nil {
log.Fatalf("selector is invalid: %s", err)
}
return util.SelectorMatchesNode(selector, node)
}
return true
}
func PrintWorkflowHelper(wf *wfv1.Workflow, getArgs GetFlags) string {
const fmtStr = "%-20s %v\n"
out := ""
out += fmt.Sprintf(fmtStr, "Name:", wf.Name)
out += fmt.Sprintf(fmtStr, "Namespace:", wf.Namespace)
serviceAccount := wf.GetExecSpec().ServiceAccountName
if serviceAccount == "" {
// if serviceAccountName was not specified in a submitted Workflow, we will
// use the serviceAccountName provided in Workflow Defaults (if any). If that
// also isn't set, we will use the 'default' ServiceAccount in the namespace
// the workflow will run in.
if wf.Spec.WorkflowTemplateRef != nil {
serviceAccount = "unset"
} else {
serviceAccount = "unset (will run with the default ServiceAccount)"
}
}
out += fmt.Sprintf(fmtStr, "ServiceAccount:", serviceAccount)
out += fmt.Sprintf(fmtStr, "Status:", printer.WorkflowStatus(wf))
if wf.Status.Message != "" {
out += fmt.Sprintf(fmtStr, "Message:", wf.Status.Message)
}
if len(wf.Status.Conditions) > 0 {
out += wf.Status.Conditions.DisplayString(fmtStr, WorkflowConditionIconMap)
}
out += fmt.Sprintf(fmtStr, "Created:", humanize.Timestamp(wf.CreationTimestamp.Time))
if !wf.Status.StartedAt.IsZero() {
out += fmt.Sprintf(fmtStr, "Started:", humanize.Timestamp(wf.Status.StartedAt.Time))
}
if !wf.Status.FinishedAt.IsZero() {
out += fmt.Sprintf(fmtStr, "Finished:", humanize.Timestamp(wf.Status.FinishedAt.Time))
}
if !wf.Status.StartedAt.IsZero() {
out += fmt.Sprintf(fmtStr, "Duration:", humanize.RelativeDuration(wf.Status.StartedAt.Time, wf.Status.FinishedAt.Time))
}
if wf.Status.Phase == wfv1.WorkflowRunning {
if wf.Status.EstimatedDuration > 0 {
out += fmt.Sprintf(fmtStr, "EstimatedDuration:", humanize.Duration(wf.Status.EstimatedDuration.ToDuration()))
}
}
out += fmt.Sprintf(fmtStr, "Progress:", wf.Status.Progress)
if !wf.Status.ResourcesDuration.IsZero() {
out += fmt.Sprintf(fmtStr, "ResourcesDuration:", wf.Status.ResourcesDuration)
}
if len(wf.GetExecSpec().Arguments.Parameters) > 0 {
out += fmt.Sprintf(fmtStr, "Parameters:", "")
for _, param := range wf.GetExecSpec().Arguments.Parameters {
if param.Value == nil {
continue
}
out += fmt.Sprintf(fmtStr, " "+param.Name+":", *param.Value)
}
}
if wf.Status.Outputs != nil {
if len(wf.Status.Outputs.Parameters) > 0 {
out += fmt.Sprintf(fmtStr, "Output Parameters:", "")
for _, param := range wf.Status.Outputs.Parameters {
if param.HasValue() {
out += fmt.Sprintf(fmtStr, " "+param.Name+":", param.GetValue())
}
}
}
if len(wf.Status.Outputs.Artifacts) > 0 {
out += fmt.Sprintf(fmtStr, "Output Artifacts:", "")
for _, art := range wf.Status.Outputs.Artifacts {
if art.S3 != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.S3.String())
} else if art.Git != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.Git.String())
} else if art.HTTP != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.HTTP.String())
} else if art.Artifactory != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.Artifactory.String())
} else if art.HDFS != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.HDFS.String())
} else if art.Raw != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.Raw.String())
} else if art.OSS != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.OSS.String())
} else if art.GCS != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.GCS.String())
} else if art.Azure != nil {
out += fmt.Sprintf(fmtStr, " "+art.Name+":", art.Azure.String())
}
}
}
}
printTree := true
if wf.Status.Nodes == nil {
printTree = false
} else if _, ok := wf.Status.Nodes[wf.Name]; !ok {
printTree = false
}
if printTree {
writerBuffer := new(bytes.Buffer)
w := tabwriter.NewWriter(writerBuffer, 0, 0, 2, ' ', 0)
out += "\n"
// apply a dummy FgDefault format to align tab writer with the rest of the columns
if getArgs.Output.String() == "wide" {
_, _ = fmt.Fprintf(w, "%s\tTEMPLATE\tPODNAME\tDURATION\tARTIFACTS\tMESSAGE\tRESOURCESDURATION\tNODENAME\n", ansiFormat("STEP", FgDefault))
} else if getArgs.Output.String() == "short" {
_, _ = fmt.Fprintf(w, "%s\tTEMPLATE\tPODNAME\tDURATION\tMESSAGE\tNODENAME\n", ansiFormat("STEP", FgDefault))
} else {
_, _ = fmt.Fprintf(w, "%s\tTEMPLATE\tPODNAME\tDURATION\tMESSAGE\n", ansiFormat("STEP", FgDefault))
}
// Convert Nodes to Render Trees
roots := convertToRenderTrees(wf)
// Print main and onExit Trees
mainRoot := roots[wf.Name]
if mainRoot == nil {
panic("failed to get the entrypoint node")
}
mainRoot.renderNodes(w, wf, 0, " ", " ", getArgs)
onExitID := wf.NodeID(wf.Name + "." + onExitSuffix)
if onExitRoot, ok := roots[onExitID]; ok {
_, _ = fmt.Fprintf(w, "\t\t\t\t\t\n")
onExitRoot.renderNodes(w, wf, 0, " ", " ", getArgs)
}
_ = w.Flush()
if getArgs.Output.String() == "short" {
out = writerBuffer.String()
} else {
out += writerBuffer.String()
}
}
writerBuffer := new(bytes.Buffer)
out += writerBuffer.String()
return out
}
type nodeInfoInterface interface {
getID() string
getNodeStatus(wf *wfv1.Workflow) wfv1.NodeStatus
getStartTime(wf *wfv1.Workflow) metav1.Time
}
type nodeInfo struct {
id string
}
func (n *nodeInfo) getID() string {
return n.id
}
func (n *nodeInfo) getNodeStatus(wf *wfv1.Workflow) wfv1.NodeStatus {
return wf.Status.Nodes[n.id]
}
func (n *nodeInfo) getStartTime(wf *wfv1.Workflow) metav1.Time {
return wf.Status.Nodes[n.id].StartedAt
}
// Interface to represent Nodes in render form types
type renderNode interface {
// Render this renderNode and its children
renderNodes(w *tabwriter.Writer, wf *wfv1.Workflow, depth int, nodePrefix string, childPrefix string, getArgs GetFlags)
nodeInfoInterface
}
// Currently this is Pod or Resource Nodes
type executionNode struct {
nodeInfo
}
// Currently this is the step groups or retry nodes
type nonBoundaryParentNode struct {
nodeInfo
children []renderNode // Can be boundaryNode or executionNode
}
// Currently this is the virtual Template node
type boundaryNode struct {
nodeInfo
boundaryContained []renderNode // Can be nonBoundaryParent or executionNode or boundaryNode
}
func isBoundaryNode(node wfv1.NodeType) bool {
return (node == wfv1.NodeTypeDAG) || (node == wfv1.NodeTypeSteps)
}
func isNonBoundaryParentNode(node wfv1.NodeType) bool {
return (node == wfv1.NodeTypeStepGroup) || (node == wfv1.NodeTypeRetry)
}
func isExecutionNode(node wfv1.NodeType) bool {
return (node == wfv1.NodeTypePod) || (node == wfv1.NodeTypeSkipped) || (node == wfv1.NodeTypeSuspend) || (node == wfv1.NodeTypeHTTP) || (node == wfv1.NodeTypePlugin)
}
func insertSorted(wf *wfv1.Workflow, sortedArray []renderNode, item renderNode) []renderNode {
insertTime := item.getStartTime(wf)
var index int
for index = 0; index < len(sortedArray); index++ {
existingItem := sortedArray[index]
t := existingItem.getStartTime(wf)
if insertTime.Before(&t) {
break
} else if insertTime.Equal(&t) {
// If they are equal apply alphabetical order so we
// get some consistent printing
insertName := item.getNodeStatus(wf).DisplayName
equalName := existingItem.getNodeStatus(wf).DisplayName
// If they are both elements of a list (e.g. withParams, withSequence, etc.) order by index number instead of
// alphabetical order
insertIndex := argoutil.RecoverIndexFromNodeName(insertName)
equalIndex := argoutil.RecoverIndexFromNodeName(equalName)
if insertIndex >= 0 && equalIndex >= 0 {
if insertIndex < equalIndex {
break
}
} else {
if insertName < equalName {
break
}
}
}
}
sortedArray = append(sortedArray, nil)
copy(sortedArray[index+1:], sortedArray[index:])
sortedArray[index] = item
return sortedArray
}
// Attach render node n to its parent based on what has been parsed previously
// In some cases add it to list of things that still needs to be attached to parent
// Return if I am a possible root
func attachToParent(wf *wfv1.Workflow, n renderNode,
nonBoundaryParentChildrenMap map[string]*nonBoundaryParentNode, boundaryID string,
boundaryNodeMap map[string]*boundaryNode, parentBoundaryMap map[string][]renderNode) bool {
// Check first if I am a child of a nonBoundaryParent
// that implies I attach to that instead of my boundary. This was already
// figured out in Pass 1
if nonBoundaryParent, ok := nonBoundaryParentChildrenMap[n.getID()]; ok {
nonBoundaryParent.children = insertSorted(wf, nonBoundaryParent.children, n)
return false
}
// If I am not attached to a nonBoundaryParent and I have no Boundary ID then
// I am a possible root
if boundaryID == "" {
return true
}
if parentBoundary, ok := boundaryNodeMap[boundaryID]; ok {
parentBoundary.boundaryContained = insertSorted(wf, parentBoundary.boundaryContained, n)
} else {
// put ourselves to be added by the parent when we get to it later
if _, ok := parentBoundaryMap[boundaryID]; !ok {
parentBoundaryMap[boundaryID] = make([]renderNode, 0)
}
parentBoundaryMap[boundaryID] = append(parentBoundaryMap[boundaryID], n)
}
return false
}
// This takes the map of NodeStatus and converts them into a forrest
// of trees of renderNodes and returns the set of roots for each tree
func convertToRenderTrees(wf *wfv1.Workflow) map[string]renderNode {
renderTreeRoots := make(map[string]renderNode)
// Used to store all boundary nodes so future render children can attach
// Maps node Name -> *boundaryNode
boundaryNodeMap := make(map[string]*boundaryNode)
// Used to store children of a boundary node that has not been parsed yet
// Maps boundary Node name -> array of render Children
parentBoundaryMap := make(map[string][]renderNode)
// Used to store Non Boundary Parent nodes so render children can attach
// Maps non Boundary Parent Node name -> *nonBoundaryParentNode
nonBoundaryParentMap := make(map[string]*nonBoundaryParentNode)
// Used to store children which have a Non Boundary Parent from rendering perspective
// Maps non Boundary render Children name -> *nonBoundaryParentNode
nonBoundaryParentChildrenMap := make(map[string]*nonBoundaryParentNode)
// We have to do a 2 pass approach because anything that is a child
// of a nonBoundaryParent and also has a boundaryID we may not know which
// parent to attach to if we didn't see the nonBoundaryParent earlier
// in a 1 pass strategy
// 1st Pass Process enough of nonBoundaryParent nodes to know all their children
for id, status := range wf.Status.Nodes {
if status.Type == "" {
log.Fatal("Missing node type in status node. Cannot get workflows created with Argo <= 2.0 using the default or wide output option.")
return nil
}
if isNonBoundaryParentNode(status.Type) {
n := nonBoundaryParentNode{nodeInfo: nodeInfo{id: id}}
nonBoundaryParentMap[id] = &n
for _, child := range status.Children {
nonBoundaryParentChildrenMap[child] = &n
}
}
}
// 2nd Pass process everything
for id, status := range wf.Status.Nodes {
switch {
case isBoundaryNode(status.Type):
n := boundaryNode{nodeInfo: nodeInfo{id: id}}
boundaryNodeMap[id] = &n
// Attach to my parent if needed
if attachToParent(wf, &n, nonBoundaryParentChildrenMap,
status.BoundaryID, boundaryNodeMap, parentBoundaryMap) {
renderTreeRoots[n.getID()] = &n
}
// Attach nodes who are in my boundary already seen before me to me
for _, val := range parentBoundaryMap[id] {
n.boundaryContained = insertSorted(wf, n.boundaryContained, val)
}
case isNonBoundaryParentNode(status.Type):
nPtr, ok := nonBoundaryParentMap[id]
if !ok {
log.Fatal("Unable to lookup node " + id)
return nil
}
// Attach to my parent if needed
if attachToParent(wf, nPtr, nonBoundaryParentChildrenMap,
status.BoundaryID, boundaryNodeMap, parentBoundaryMap) {
renderTreeRoots[nPtr.getID()] = nPtr
}
// All children attach directly to the nonBoundaryParents since they are already created
// in pass 1 so no need to do that here
case isExecutionNode(status.Type):
n := executionNode{nodeInfo: nodeInfo{id: id}}
// Attach to my parent if needed
if attachToParent(wf, &n, nonBoundaryParentChildrenMap,
status.BoundaryID, boundaryNodeMap, parentBoundaryMap) {
renderTreeRoots[n.getID()] = &n
}
// Execution nodes don't have other render nodes as children
}
}
return renderTreeRoots
}
// This function decides if a Node will be filtered from rendering and returns
// two things. First argument tells if the node is filtered and second argument
// tells whether the children need special indentation due to filtering
// Return Values: (is node filtered, do children need special indent)
func filterNode(node wfv1.NodeStatus, getArgs GetFlags) (bool, bool) {
if node.Type == wfv1.NodeTypeRetry && len(node.Children) == 1 {
return true, false
} else if node.Type == wfv1.NodeTypeStepGroup {
return true, true
} else if node.Type == wfv1.NodeTypeSkipped && node.Phase == wfv1.NodeOmitted {
return true, false
} else if !getArgs.shouldPrint(node) {
return true, false
}
return false, false
}
// Render the child of a given node based on information about the parent such as:
// whether it was filtered and does this child need special indent
func renderChild(w *tabwriter.Writer, wf *wfv1.Workflow, nInfo renderNode, depth int,
nodePrefix string, childPrefix string, parentFiltered bool,
childIndex int, maxIndex int, childIndent bool, getArgs GetFlags) {
var part, subp string
if NoUtf8 {
if parentFiltered && childIndent {
if maxIndex == 0 {
part = "--"
subp = " "
} else if childIndex == 0 {
part = "+-"
subp = "| "
} else if childIndex == maxIndex {
part = "`-"
subp = " "
} else {
part = "|-"
subp = "| "
}
} else if !parentFiltered {
if childIndex == maxIndex {
part = "`-"
subp = " "
} else {
part = "|-"
subp = "| "
}
}
} else {
if parentFiltered && childIndent {
if maxIndex == 0 {
part = "──"
subp = " "
} else if childIndex == 0 {
part = "┬─"
subp = "│ "
} else if childIndex == maxIndex {
part = "└─"
subp = " "
} else {
part = "├─"
subp = "│ "
}
} else if !parentFiltered {
if childIndex == maxIndex {
part = "└─"
subp = " "
} else {
part = "├─"
subp = "│ "
}
}
}
var childNodePrefix, childChldPrefix string
if !parentFiltered {
depth = depth + 1
childNodePrefix = childPrefix + part
childChldPrefix = childPrefix + subp
} else {
if childIndex == 0 {
childNodePrefix = nodePrefix + part
} else {
childNodePrefix = childPrefix + part
}
childChldPrefix = childPrefix + subp
}
nInfo.renderNodes(w, wf, depth, childNodePrefix, childChldPrefix, getArgs)
}
// Main method to print information of node in get
func printNode(w *tabwriter.Writer, node wfv1.NodeStatus, wfName, nodePrefix string, getArgs GetFlags, podNameVersion util.PodNameVersion) {
nodeName := node.Name
fmtNodeName := fmt.Sprintf("%s %s", JobStatusIconMap[node.Phase], node.DisplayName)
if node.IsActiveSuspendNode() {
fmtNodeName = fmt.Sprintf("%s %s", NodeTypeIconMap[node.Type], node.DisplayName)
}
templateName := util.GetTemplateFromNode(node)
fmtTemplateName := ""
if node.TemplateRef != nil {
fmtTemplateName = fmt.Sprintf("%s/%s", node.TemplateRef.Name, node.TemplateRef.Template)
} else if node.TemplateName != "" {
fmtTemplateName = node.TemplateName
}
var args []interface{}
duration := humanize.RelativeDurationShort(node.StartedAt.Time, node.FinishedAt.Time)
if node.Type == wfv1.NodeTypePod {
podName := util.GeneratePodName(wfName, nodeName, templateName, node.ID, podNameVersion)
args = []interface{}{nodePrefix, fmtNodeName, fmtTemplateName, podName, duration, node.Message, ""}
} else {
args = []interface{}{nodePrefix, fmtNodeName, fmtTemplateName, "", "", node.Message, ""}
}
if getArgs.Output.String() == "wide" {
msg := args[len(args)-2]
args[len(args)-2] = getArtifactsString(node)
args[len(args)-1] = msg
args = append(args, node.ResourcesDuration, "")
if node.Type == wfv1.NodeTypePod {
args[len(args)-1] = node.HostNodeName
}
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", args...)
} else if getArgs.Output.String() == "short" {
if node.Type == wfv1.NodeTypePod {
args[len(args)-1] = node.HostNodeName
}
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t%s\t%s\n", args...)
} else {
_, _ = fmt.Fprintf(w, "%s%s\t%s\t%s\t%s\t%s\t%s\n", args...)
}
}
// renderNodes for each renderNode Type
// boundaryNode
func (nodeInfo *boundaryNode) renderNodes(w *tabwriter.Writer, wf *wfv1.Workflow, depth int, nodePrefix string, childPrefix string, getArgs GetFlags) {
filtered, childIndent := filterNode(nodeInfo.getNodeStatus(wf), getArgs)
if !filtered {
version := util.GetWorkflowPodNameVersion(wf)
printNode(w, nodeInfo.getNodeStatus(wf), wf.Name, nodePrefix, getArgs, version)
}
for i, nInfo := range nodeInfo.boundaryContained {
renderChild(w, wf, nInfo, depth, nodePrefix, childPrefix, filtered, i,
len(nodeInfo.boundaryContained)-1, childIndent, getArgs)
}
}
// nonBoundaryParentNode
func (nodeInfo *nonBoundaryParentNode) renderNodes(w *tabwriter.Writer, wf *wfv1.Workflow, depth int, nodePrefix string, childPrefix string, getArgs GetFlags) {
filtered, childIndent := filterNode(nodeInfo.getNodeStatus(wf), getArgs)
if !filtered {
version := util.GetWorkflowPodNameVersion(wf)
printNode(w, nodeInfo.getNodeStatus(wf), wf.Name, nodePrefix, getArgs, version)
}
for i, nInfo := range nodeInfo.children {
renderChild(w, wf, nInfo, depth, nodePrefix, childPrefix, filtered, i,
len(nodeInfo.children)-1, childIndent, getArgs)
}
}
// executionNode
func (nodeInfo *executionNode) renderNodes(w *tabwriter.Writer, wf *wfv1.Workflow, _ int, nodePrefix string, _ string, getArgs GetFlags) {
filtered, _ := filterNode(nodeInfo.getNodeStatus(wf), getArgs)
if !filtered {
version := util.GetWorkflowPodNameVersion(wf)
printNode(w, nodeInfo.getNodeStatus(wf), wf.Name, nodePrefix, getArgs, version)
}
}
func getArtifactsString(node wfv1.NodeStatus) string {
if node.Outputs == nil {
return ""
}
var artNames []string
for _, art := range node.Outputs.Artifacts {
artNames = append(artNames, art.Name)
}
return strings.Join(artNames, ",")
}

View File

@ -0,0 +1,430 @@
package common
import (
"bytes"
"fmt"
"testing"
"text/tabwriter"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/workflow/util"
)
var workflowName string = "testWF"
func init() {
// these values get used as part of determining node name and would normally be set as part of
// running the application
JobStatusIconMap = map[wfv1.NodePhase]string{
wfv1.NodePending: ansiFormat("Pending", FgYellow),
wfv1.NodeRunning: ansiFormat("Running", FgCyan),
wfv1.NodeSucceeded: ansiFormat("Succeeded", FgGreen),
wfv1.NodeSkipped: ansiFormat("Skipped", FgDefault),
wfv1.NodeFailed: ansiFormat("Failed", FgRed),
wfv1.NodeError: ansiFormat("Error", FgRed),
}
NodeTypeIconMap = map[wfv1.NodeType]string{
wfv1.NodeTypeSuspend: ansiFormat("Suspend", FgCyan),
}
}
func testPrintNodeImpl(t *testing.T, expected string, node wfv1.NodeStatus, getArgs GetFlags) {
var result bytes.Buffer
w := tabwriter.NewWriter(&result, 0, 8, 1, '\t', 0)
filtered, _ := filterNode(node, getArgs)
if !filtered {
printNode(w, node, workflowName, "", getArgs, util.GetPodNameVersion())
}
err := w.Flush()
require.NoError(t, err)
assert.Equal(t, expected, result.String())
}
// TestPrintNode
func TestPrintNode(t *testing.T) {
nodeName := "testNode"
kubernetesNodeName := "testKnodeName"
nodeTemplateName := "testTemplate"
nodeTemplateRefName := "testTemplateRef"
nodeID := "testID"
nodeMessage := "test"
getArgs := GetFlags{}
timestamp := metav1.Time{
Time: time.Now(),
}
// Node without TemplateRef
node := wfv1.NodeStatus{
Name: nodeName,
Phase: wfv1.NodeRunning,
DisplayName: nodeName,
Type: wfv1.NodeTypePod,
ID: nodeID,
StartedAt: timestamp,
FinishedAt: timestamp,
Message: nodeMessage,
TemplateName: nodeTemplateName,
}
node.HostNodeName = kubernetesNodeName
// derive expected pod name:
templateName := util.GetTemplateFromNode(node)
expectedPodName := util.GeneratePodName(workflowName, nodeName, templateName, nodeID, util.GetPodNameVersion())
t.Log(expectedPodName)
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, expectedPodName, "0s", nodeMessage, ""), node, getArgs)
// Compatibility test
getArgs.Status = "Running"
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, expectedPodName, "0s", nodeMessage, ""), node, getArgs)
getArgs.Status = ""
getArgs.NodeFieldSelectorString = "phase=Running"
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, expectedPodName, "0s", nodeMessage, ""), node, getArgs)
getArgs.NodeFieldSelectorString = "phase!=foobar"
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, expectedPodName, "0s", nodeMessage, ""), node, getArgs)
getArgs.NodeFieldSelectorString = "phase!=Running"
testPrintNodeImpl(t, "", node, getArgs)
// Compatibility test
getArgs.NodeFieldSelectorString = ""
getArgs.Status = "foobar"
testPrintNodeImpl(t, "", node, getArgs)
getArgs.Status = ""
getArgs.NodeFieldSelectorString = "phase=foobar"
testPrintNodeImpl(t, "", node, getArgs)
getArgs = GetFlags{
Output: EnumFlagValue{AllowedValues: []string{"short", "wide"}},
}
node.TemplateName = nodeTemplateName
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateName, expectedPodName, "0s", nodeMessage, ""), node, getArgs)
node.Type = wfv1.NodeTypeSuspend
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s\t%s\t%s\t%s\t%s\n", NodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateName, "", "", nodeMessage, ""), node, getArgs)
// Node with templateRef
node.TemplateName = ""
node.TemplateRef = &wfv1.TemplateRef{
Name: nodeTemplateRefName,
Template: nodeTemplateRefName,
}
templateName = util.GetTemplateFromNode(node)
expectedPodName = util.GeneratePodName(workflowName, nodeName, templateName, nodeID, util.GetPodNameVersion())
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\n", NodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", nodeMessage, ""), node, getArgs)
require.NoError(t, getArgs.Output.Set("wide"))
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t\n", NodeTypeIconMap[wfv1.NodeTypeSuspend], nodeName, nodeTemplateRefName, nodeTemplateRefName, "", "", getArtifactsString(node), nodeMessage, ""), node, getArgs)
node.Type = wfv1.NodeTypePod
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateRefName, nodeTemplateRefName, expectedPodName, "0s", getArtifactsString(node), nodeMessage, "", kubernetesNodeName), node, getArgs)
require.NoError(t, getArgs.Output.Set("short"))
testPrintNodeImpl(t, fmt.Sprintf("%s %s\t%s/%s\t%s\t%s\t%s\t%s\n", JobStatusIconMap[wfv1.NodeRunning], nodeName, nodeTemplateRefName, nodeTemplateRefName, expectedPodName, "0s", nodeMessage, kubernetesNodeName), node, getArgs)
getArgs.Status = "foobar"
testPrintNodeImpl(t, "", node, getArgs)
}
func TestStatusToNodeFieldSelector(t *testing.T) {
one := statusToNodeFieldSelector("Running")
assert.Equal(t, "phase=Running", one)
}
func Test_printWorkflowHelper(t *testing.T) {
t.Run("Progress", func(t *testing.T) {
var wf wfv1.Workflow
wfv1.MustUnmarshal(`
status:
phase: Running
progress: 1/2
`, &wf)
output := PrintWorkflowHelper(&wf, GetFlags{})
assert.Regexp(t, `Progress: *1/2`, output)
})
t.Run("EstimatedDuration", func(t *testing.T) {
var wf wfv1.Workflow
wfv1.MustUnmarshal(`
status:
estimatedDuration: 1
phase: Running
`, &wf)
output := PrintWorkflowHelper(&wf, GetFlags{})
assert.Regexp(t, `EstimatedDuration: *1 second`, output)
})
t.Run("IndexOrdering", func(t *testing.T) {
var wf wfv1.Workflow
wfv1.MustUnmarshal(`apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
creationTimestamp: "2020-06-02T16:04:21Z"
generateName: many-items-
generation: 32
labels:
workflows.argoproj.io/completed: "true"
workflows.argoproj.io/phase: Succeeded
name: many-items-z26lj
namespace: argo
resourceVersion: "5102"
selfLink: /apis/argoproj.io/v1alpha1/namespaces/argo/workflows/many-items-z26lj
uid: d21f092a-f659-4300-bd69-983a9912a379
spec:
entrypoint: parallel-sleep
templates:
- name: parallel-sleep
steps:
- - name: sleep
template: sleep
withItems:
- zero
- one
- two
- three
- four
- five
- six
- seven
- eight
- nine
- ten
- eleven
- twelve
- container:
command:
- sh
- -c
- sleep 10
image: alpine:latest
name: sleep
status:
conditions:
- status: "True"
type: Completed
finishedAt: "2020-06-02T16:05:01Z"
nodes:
many-items-z26lj:
children:
- many-items-z26lj-1414877240
displayName: many-items-z26lj
finishedAt: "2020-06-02T16:05:01Z"
id: many-items-z26lj
name: many-items-z26lj
outboundNodes:
- many-items-z26lj-1939921510
- many-items-z26lj-2156977535
- many-items-z26lj-3409403178
- many-items-z26lj-1774150289
- many-items-z26lj-3491220632
- many-items-z26lj-1942531647
- many-items-z26lj-3178865096
- many-items-z26lj-3031375822
- many-items-z26lj-753834747
- many-items-z26lj-2619926859
- many-items-z26lj-1052882686
- many-items-z26lj-3011405271
- many-items-z26lj-3126938806
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: parallel-sleep
type: Steps
many-items-z26lj-753834747:
boundaryID: many-items-z26lj
displayName: sleep(8:eight)
finishedAt: "2020-06-02T16:04:42Z"
id: many-items-z26lj-753834747
name: many-items-z26lj[0].sleep(8:eight)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-1052882686:
boundaryID: many-items-z26lj
displayName: sleep(10:ten)
finishedAt: "2020-06-02T16:04:45Z"
id: many-items-z26lj-1052882686
name: many-items-z26lj[0].sleep(10:ten)
phase: Succeeded
startedAt: "2020-06-02T16:04:22Z"
templateName: sleep
type: Pod
many-items-z26lj-1414877240:
boundaryID: many-items-z26lj
children:
- many-items-z26lj-1939921510
- many-items-z26lj-2156977535
- many-items-z26lj-3409403178
- many-items-z26lj-1774150289
- many-items-z26lj-3491220632
- many-items-z26lj-1942531647
- many-items-z26lj-3178865096
- many-items-z26lj-3031375822
- many-items-z26lj-753834747
- many-items-z26lj-2619926859
- many-items-z26lj-1052882686
- many-items-z26lj-3011405271
- many-items-z26lj-3126938806
displayName: '[0]'
finishedAt: "2020-06-02T16:05:01Z"
id: many-items-z26lj-1414877240
name: many-items-z26lj[0]
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: parallel-sleep
type: StepGroup
many-items-z26lj-1774150289:
boundaryID: many-items-z26lj
displayName: sleep(3:three)
finishedAt: "2020-06-02T16:04:54Z"
id: many-items-z26lj-1774150289
name: many-items-z26lj[0].sleep(3:three)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-1939921510:
boundaryID: many-items-z26lj
displayName: sleep(0:zero)
finishedAt: "2020-06-02T16:04:48Z"
id: many-items-z26lj-1939921510
name: many-items-z26lj[0].sleep(0:zero)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-1942531647:
boundaryID: many-items-z26lj
displayName: sleep(5:five)
finishedAt: "2020-06-02T16:04:47Z"
id: many-items-z26lj-1942531647
name: many-items-z26lj[0].sleep(5:five)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-2156977535:
boundaryID: many-items-z26lj
displayName: sleep(1:one)
finishedAt: "2020-06-02T16:04:53Z"
id: many-items-z26lj-2156977535
name: many-items-z26lj[0].sleep(1:one)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-2619926859:
boundaryID: many-items-z26lj
displayName: sleep(9:nine)
finishedAt: "2020-06-02T16:04:40Z"
id: many-items-z26lj-2619926859
name: many-items-z26lj[0].sleep(9:nine)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-3011405271:
boundaryID: many-items-z26lj
displayName: sleep(11:eleven)
finishedAt: "2020-06-02T16:04:44Z"
id: many-items-z26lj-3011405271
name: many-items-z26lj[0].sleep(11:eleven)
phase: Succeeded
startedAt: "2020-06-02T16:04:22Z"
templateName: sleep
type: Pod
many-items-z26lj-3031375822:
boundaryID: many-items-z26lj
displayName: sleep(7:seven)
finishedAt: "2020-06-02T16:04:57Z"
id: many-items-z26lj-3031375822
name: many-items-z26lj[0].sleep(7:seven)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-3126938806:
boundaryID: many-items-z26lj
displayName: sleep(12:twelve)
finishedAt: "2020-06-02T16:04:59Z"
id: many-items-z26lj-3126938806
name: many-items-z26lj[0].sleep(12:twelve)
phase: Succeeded
startedAt: "2020-06-02T16:04:22Z"
templateName: sleep
type: Pod
many-items-z26lj-3178865096:
boundaryID: many-items-z26lj
displayName: sleep(6:six)
finishedAt: "2020-06-02T16:04:56Z"
id: many-items-z26lj-3178865096
name: many-items-z26lj[0].sleep(6:six)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-3409403178:
boundaryID: many-items-z26lj
displayName: sleep(2:two)
finishedAt: "2020-06-02T16:04:51Z"
id: many-items-z26lj-3409403178
name: many-items-z26lj[0].sleep(2:two)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
many-items-z26lj-3491220632:
boundaryID: many-items-z26lj
displayName: sleep(4:four)
finishedAt: "2020-06-02T16:04:50Z"
id: many-items-z26lj-3491220632
name: many-items-z26lj[0].sleep(4:four)
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
templateName: sleep
type: Pod
phase: Succeeded
startedAt: "2020-06-02T16:04:21Z"
`, &wf)
output := PrintWorkflowHelper(&wf, GetFlags{})
// derive expected pod name:
expectedPodName := util.GeneratePodName(wf.GetObjectMeta().GetName(), "many-items-z26lj[0].sleep(9:nine)", "sleep", "many-items-z26lj-2619926859", util.GetPodNameVersion())
assert.Contains(t, output, fmt.Sprintf("sleep(9:nine) sleep %s 19s", expectedPodName))
expectedPodName = util.GeneratePodName(wf.GetObjectMeta().GetName(), "many-items-z26lj[0].sleep(10:ten)", "sleep", "many-items-z26lj-1052882686", util.GetPodNameVersion())
assert.Contains(t, output, fmt.Sprintf("sleep(10:ten) sleep %s 23s", expectedPodName))
expectedPodName = util.GeneratePodName(wf.GetObjectMeta().GetName(), "many-items-z26lj[0].sleep(11:eleven)", "sleep", "many-items-z26lj-3011405271", util.GetPodNameVersion())
assert.Contains(t, output, fmt.Sprintf("sleep(11:eleven) sleep %s 22s", expectedPodName))
})
}
func Test_printWorkflowHelperNudges(t *testing.T) {
securedWf := wfv1.Workflow{
ObjectMeta: metav1.ObjectMeta{},
Spec: wfv1.WorkflowSpec{
SecurityContext: &corev1.PodSecurityContext{},
},
}
insecureWf := securedWf
insecureWf.Spec.SecurityContext = nil
securityNudges := "This workflow does not have security context set. " +
"You can run your workflow pods more securely by setting it.\n" +
"Learn more at https://argo-workflows.readthedocs.io/en/latest/workflow-pod-security-context/\n"
t.Run("SecuredWorkflow", func(t *testing.T) {
output := PrintWorkflowHelper(&securedWf, GetFlags{})
assert.NotContains(t, output, securityNudges)
})
}

View File

@ -0,0 +1,38 @@
package common
import (
"context"
"fmt"
"io"
corev1 "k8s.io/api/core/v1"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
)
func LogWorkflow(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace, workflow, podName, grep, selector string, logOptions *corev1.PodLogOptions) error {
// logs
stream, err := serviceClient.WorkflowLogs(ctx, &workflowpkg.WorkflowLogRequest{
Name: workflow,
Namespace: namespace,
PodName: podName,
LogOptions: logOptions,
Selector: selector,
Grep: grep,
})
if err != nil {
return err
}
// loop on log lines
for {
event, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
fmt.Println(ansiFormat(fmt.Sprintf("%s: %s", event.PodName, event.Content), ansiColorCode(event.PodName)))
}
}

View File

@ -0,0 +1,53 @@
package common
import (
"context"
corev1 "k8s.io/api/core/v1"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
"github.com/argoproj/argo-workflows/v3/workflow/common"
)
// CliSubmitOpts holds submission options specific to CLI submission (e.g. controlling output)
type CliSubmitOpts struct {
Output EnumFlagValue // --output
Wait bool // --wait
Watch bool // --watch
Log bool // --log
Strict bool // --strict
Priority *int32 // --priority
GetArgs GetFlags
ScheduledTime string // --scheduled-time
Parameters []string // --parameter
}
func NewCliSubmitOpts() CliSubmitOpts {
return CliSubmitOpts{
Output: NewPrintWorkflowOutputValue(""),
}
}
func WaitWatchOrLog(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, workflowNames []string, cliSubmitOpts CliSubmitOpts) error {
if cliSubmitOpts.Log {
for _, workflow := range workflowNames {
if err := LogWorkflow(ctx, serviceClient, namespace, workflow, "", "", "", &corev1.PodLogOptions{
Container: common.MainContainerName,
Follow: true,
Previous: false,
}); err != nil {
return err
}
}
}
if cliSubmitOpts.Wait {
WaitWorkflows(ctx, serviceClient, namespace, workflowNames, false, cliSubmitOpts.Output.String() != "" && cliSubmitOpts.Output.String() != "wide")
} else if cliSubmitOpts.Watch {
for _, workflow := range workflowNames {
if err := WatchWorkflow(ctx, serviceClient, namespace, workflow, cliSubmitOpts.GetArgs); err != nil {
return err
}
}
}
return nil
}

View File

@ -0,0 +1,84 @@
package common
import (
"context"
"fmt"
"io"
"os"
"sync"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util"
)
// waitWorkflows waits for the given workflowNames.
func WaitWorkflows(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, workflowNames []string, ignoreNotFound, quiet bool) {
var wg sync.WaitGroup
wfSuccessStatus := true
for _, name := range workflowNames {
wg.Add(1)
go func(name string) {
if ok, _ := waitOnOne(serviceClient, ctx, name, namespace, ignoreNotFound, quiet); !ok {
wfSuccessStatus = false
}
wg.Done()
}(name)
}
wg.Wait()
if !wfSuccessStatus {
os.Exit(1)
}
}
func waitOnOne(serviceClient workflowpkg.WorkflowServiceClient, ctx context.Context, wfName, namespace string, ignoreNotFound, quiet bool) (bool, error) {
req := &workflowpkg.WatchWorkflowsRequest{
Namespace: namespace,
ListOptions: &metav1.ListOptions{
FieldSelector: util.GenerateFieldSelectorFromWorkflowName(wfName),
ResourceVersion: "0",
},
}
stream, err := serviceClient.WatchWorkflows(ctx, req)
if err != nil {
if status.Code(err) == codes.NotFound && ignoreNotFound {
return true, nil
}
return false, nil
}
for {
event, err := stream.Recv()
if err == io.EOF {
log.Debug("Re-establishing workflow watch")
stream, err = serviceClient.WatchWorkflows(ctx, req)
if err != nil {
return false, err
}
continue
}
if err != nil {
return false, err
}
if event == nil {
continue
}
wf := event.Object
if wf != nil && !wf.Status.FinishedAt.IsZero() {
if !quiet {
fmt.Printf("%s %s at %v\n", wfName, wf.Status.Phase, wf.Status.FinishedAt)
}
if wf.Status.Phase == wfv1.WorkflowFailed || wf.Status.Phase == wfv1.WorkflowError {
return false, nil
}
return true, nil
}
}
}

View File

@ -0,0 +1,88 @@
package common
import (
"context"
"fmt"
"io"
"time"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util"
"github.com/argoproj/argo-workflows/v3/util/errors"
"github.com/argoproj/argo-workflows/v3/workflow/packer"
)
func WatchWorkflow(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, workflow string, getArgs GetFlags) error {
req := &workflowpkg.WatchWorkflowsRequest{
Namespace: namespace,
ListOptions: &metav1.ListOptions{
FieldSelector: util.GenerateFieldSelectorFromWorkflowName(workflow),
ResourceVersion: "0",
},
}
stream, err := serviceClient.WatchWorkflows(ctx, req)
if err != nil {
return err
}
wfChan := make(chan *wfv1.Workflow)
go func() {
for {
event, err := stream.Recv()
if err == io.EOF {
log.Debug("Re-establishing workflow watch")
stream, err = serviceClient.WatchWorkflows(ctx, req)
errors.CheckError(ctx, err)
continue
}
errors.CheckError(ctx, err)
if event == nil {
continue
}
wfChan <- event.Object
}
}()
var wf *wfv1.Workflow
ticker := time.NewTicker(time.Second)
for {
select {
case newWf := <-wfChan:
// If we get a new event, update our workflow
if newWf == nil {
return nil
}
wf = newWf
case <-ticker.C:
// If we don't, refresh the workflow screen every second
case <-ctx.Done():
// When the context gets canceled
return nil
}
err := printWorkflowStatus(ctx, wf, getArgs)
if err != nil {
return err
}
if wf != nil && !wf.Status.FinishedAt.IsZero() {
return nil
}
}
}
func printWorkflowStatus(ctx context.Context, wf *wfv1.Workflow, getArgs GetFlags) error {
if wf == nil {
return nil
}
if err := packer.DecompressWorkflow(ctx, wf); err != nil {
return err
}
print("\033[H\033[2J")
print("\033[0;0H")
fmt.Print(PrintWorkflowHelper(wf, getArgs))
return nil
}

View File

@ -0,0 +1,162 @@
package commands
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
)
const (
bashCompletionFunc = `
__argo_get_workflow() {
local status="$1"
local -a argo_out
if argo_out=($(argo list --status="$status" --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argo_out[*]}" -- "$cur" ) )
fi
}
__argo_get_workflow_template() {
local -a argo_out
if argo_out=($(argo template list --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argo_out[*]}" -- "$cur" ) )
fi
}
__argo_get_cluster_workflow_template() {
local -a argo_out
if argo_out=($(argo cluster-template list --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argo_out[*]}" -- "$cur" ) )
fi
}
__argo_get_cron_workflow() {
local -a argo_out
if argo_out=($(argo cron list --output name 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${argo_out[*]}" -- "$cur" ) )
fi
}
__argo_get_logs() {
# Determine if were completing a workflow or not.
if [[ $prev == "logs" ]]; then
__argo_get_workflow && return $?
fi
local workflow=$prev
# Otherwise, complete the list of pods
local -a kubectl_out
if kubectl_out=($(kubectl get pods --no-headers --selector=workflows.argoproj.io/workflow="${workflow}" 2>/dev/null | awk '{print $1}' 2>/dev/null)); then
COMPREPLY+=( $( compgen -W "${kubectl_out[*]}" -- "$cur" ) )
fi
}
__argo_list_files() {
COMPREPLY+=( $( compgen -f -o plusdirs -X '!*.@(yaml|yml|json)' -- "$cur" ) )
}
__argo_custom_func() {
case ${last_command} in
argo_delete | argo_get | argo_resubmit)
__argo_get_workflow
return
;;
argo_suspend | argo_terminate | argo_wait | argo_watch)
__argo_get_workflow "Running,Pending"
return
;;
argo_resume)
__argo_get_workflow "Running"
return
;;
argo_retry)
__argo_get_workflow "Failed"
return
;;
argo_logs)
__argo_get_logs
return
;;
argo_submit | argo_lint)
__argo_list_files
return
;;
argo_template_get | argo_template_delete)
__argo_get_workflow_template
return
;;
argo_template_create | argo_template_lint)
__argo_list_files
return
;;
argo_cluster-template_get | argo_cluster-template_delete)
__argo_get_cluster_workflow_template
return
;;
argo_cluster-template_create | argo_cluster-template_lint)
__argo_list_files
return
;;
argo_cron_get | argo_cron_delete | argo_cron_resume | argo_cron_suspend)
__argo_get_cron_workflow
return
;;
argo_cron_create | argo_cron_lint)
__argo_list_files
return
;;
*)
;;
esac
}
`
)
func NewCompletionCommand() *cobra.Command {
command := &cobra.Command{
Use: "completion SHELL",
Short: "output shell completion code for the specified shell (bash, zsh or fish)",
Long: `Write bash, zsh or fish shell completion code to standard output.
For bash, ensure you have bash completions installed and enabled.
To access completions in your current shell, run
$ source <(argo completion bash)
Alternatively, write it to a file and source in .bash_profile
For zsh, output to a file in a directory referenced by the $fpath shell
variable.
For fish, output to a file in ~/.config/fish/completions
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
shell := args[0]
rootCommand := NewCommand()
rootCommand.BashCompletionFunction = bashCompletionFunc
availableCompletions := map[string]func(out io.Writer, cmd *cobra.Command) error{
"bash": runCompletionBash,
"zsh": runCompletionZsh,
"fish": runCompletionFish,
}
completion, ok := availableCompletions[shell]
if !ok {
return fmt.Errorf("Invalid shell '%s'. The supported shells are bash and zsh.\n", shell)
}
return completion(os.Stdout, rootCommand)
},
}
return command
}
func runCompletionBash(out io.Writer, cmd *cobra.Command) error {
return cmd.GenBashCompletion(out)
}
func runCompletionZsh(out io.Writer, cmd *cobra.Command) error {
return cmd.GenZshCompletion(out)
}
func runCompletionFish(out io.Writer, cmd *cobra.Command) error {
return cmd.GenFishCompletion(out, true)
}

148
cmd/argo/commands/cp.go Normal file
View File

@ -0,0 +1,148 @@
package commands
import (
"crypto/tls"
"fmt"
"io"
"log"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient"
workflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
wfutil "github.com/argoproj/argo-workflows/v3/workflow/util"
)
func NewCpCommand() *cobra.Command {
var (
namespace string // --namespace
nodeID string // --node-id
templateName string // --template-name
artifactName string // --artifact-name
customPath string // --path
)
command := &cobra.Command{
Use: "cp my-wf output-directory ...",
Short: "copy artifacts from workflow",
Example: `# Copy a workflow's artifacts to a local output directory:
argo cp my-wf output-directory
# Copy artifacts from a specific node in a workflow to a local output directory:
argo cp my-wf output-directory --node-id=my-wf-node-id-123
`,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) != 2 {
cmd.HelpFunc()(cmd, args)
return fmt.Errorf("incorrect number of arguments")
}
workflowName := args[0]
outputDir := args[1]
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient := apiClient.NewWorkflowServiceClient()
if len(namespace) == 0 {
namespace = client.Namespace()
}
workflow, err := serviceClient.GetWorkflow(ctx, &workflowpkg.WorkflowGetRequest{
Name: workflowName,
Namespace: namespace,
})
if err != nil {
return fmt.Errorf("failed to get workflow: %w", err)
}
workflowName = workflow.Name
artifactSearchQuery := v1alpha1.ArtifactSearchQuery{
ArtifactName: artifactName,
TemplateName: templateName,
NodeId: nodeID,
}
artifactSearchResults := workflow.SearchArtifacts(&artifactSearchQuery)
c := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: client.ArgoServerOpts.InsecureSkipVerify,
},
},
}
for _, artifact := range artifactSearchResults {
customPath := filepath.Join(outputDir, customPath)
nodeInfo := workflow.Status.Nodes.Find(func(n v1alpha1.NodeStatus) bool { return n.ID == artifact.NodeID })
if nodeInfo == nil {
return fmt.Errorf("could not get node status for node ID %s", artifact.NodeID)
}
customPath = strings.Replace(customPath, "{templateName}", wfutil.GetTemplateFromNode(*nodeInfo), 1)
customPath = strings.Replace(customPath, "{namespace}", namespace, 1)
customPath = strings.Replace(customPath, "{workflowName}", workflowName, 1)
customPath = strings.Replace(customPath, "{nodeId}", artifact.NodeID, 1)
customPath = strings.Replace(customPath, "{artifactName}", artifact.Name, 1)
err = os.MkdirAll(customPath, os.ModePerm)
if err != nil {
return fmt.Errorf("failed to create folder path: %w", err)
}
key, err := artifact.GetKey()
if err != nil {
return fmt.Errorf("error getting key for artifact: %w", err)
}
err = getAndStoreArtifactData(namespace, workflowName, artifact.NodeID, artifact.Name, path.Base(key), customPath, c, client.ArgoServerOpts)
if err != nil {
return fmt.Errorf("failed to get and store artifact data: %w", err)
}
}
return nil
},
}
command.Flags().StringVarP(&namespace, "namespace", "n", "", "namespace of workflow")
command.Flags().StringVar(&nodeID, "node-id", "", "id of node in workflow")
command.Flags().StringVar(&templateName, "template-name", "", "name of template in workflow")
command.Flags().StringVar(&artifactName, "artifact-name", "", "name of output artifact in workflow")
command.Flags().StringVar(&customPath, "path", "{namespace}/{workflowName}/{nodeId}/outputs/{artifactName}", "use variables {workflowName}, {nodeId}, {templateName}, {artifactName}, and {namespace} to create a customized path to store the artifacts; example: {workflowName}/{templateName}/{artifactName}")
return command
}
func getAndStoreArtifactData(namespace string, workflowName string, nodeID string, artifactName string, fileName string, customPath string, c *http.Client, argoServerOpts apiclient.ArgoServerOpts) error {
request, err := http.NewRequest("GET", fmt.Sprintf("%s/artifacts/%s/%s/%s/%s", argoServerOpts.GetURL(), namespace, workflowName, nodeID, artifactName), nil)
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
authString, err := client.GetAuthString()
if err != nil {
return err
}
request.Header.Set("Authorization", authString)
resp, err := c.Do(request)
if err != nil {
return fmt.Errorf("request failed with: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("request failed %s", resp.Status)
}
artifactFilePath := filepath.Join(customPath, fileName)
fileWriter, err := os.Create(artifactFilePath)
if err != nil {
return fmt.Errorf("creating file failed: %w", err)
}
defer fileWriter.Close()
_, err = io.Copy(fileWriter, resp.Body)
if err != nil {
return fmt.Errorf("copying file contents failed: %w", err)
}
log.Printf("Created %q", fileName)
return nil
}

View File

@ -0,0 +1,255 @@
package cron
import (
"context"
"encoding/json"
"fmt"
"math"
"os"
"time"
"github.com/argoproj/argo-workflows/v3/workflow/util"
cron "github.com/robfig/cron/v3"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/workflow"
"github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/rand"
"github.com/argoproj/argo-workflows/v3/workflow/common"
)
type backfillOpts struct {
cronWfName string
name string
startDate string
endDate string
parallel bool
argName string
dateFormat string
maxWorkflowCount int
}
func NewBackfillCommand() *cobra.Command {
var (
cliOps backfillOpts
)
var command = &cobra.Command{
Use: "backfill cronwf",
Short: "create a cron backfill(new alpha feature)",
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
cmd.HelpFunc()(cmd, args)
os.Exit(0)
}
if cliOps.name == "" {
name, err := rand.RandString(5)
if err != nil {
return err
}
cliOps.name = name
}
cliOps.cronWfName = args[0]
return backfillCronWorkflow(cmd.Context(), args[0], cliOps)
},
}
command.Flags().StringVar(&cliOps.name, "name", "", "Backfill name")
command.Flags().StringVar(&cliOps.startDate, "start", "", "Start date")
command.Flags().StringVar(&cliOps.endDate, "end", "", "End Date")
command.Flags().BoolVar(&cliOps.parallel, "parallel", false, "Enabled all backfile workflows run parallel")
command.Flags().StringVar(&cliOps.argName, "argname", "cronScheduleTime", "Schedule time argument name for workflow")
command.Flags().StringVar(&cliOps.dateFormat, "format", time.RFC1123, "Date format for Schedule time value")
command.Flags().IntVar(&cliOps.maxWorkflowCount, "maxworkflowcount", 1000, "Maximum number of generated backfill workflows")
return command
}
func backfillCronWorkflow(ctx context.Context, cronWFName string, cliOps backfillOpts) error {
if cliOps.startDate == "" {
return fmt.Errorf("Start Date should not be empty")
}
startTime, err := time.Parse(cliOps.dateFormat, cliOps.startDate)
if err != nil {
return err
}
var endTime time.Time
if cliOps.endDate != "" {
endTime, err = time.Parse(cliOps.dateFormat, cliOps.endDate)
if err != nil {
return err
}
} else {
endTime = time.Now()
cliOps.endDate = endTime.Format(time.RFC1123)
}
ctx, apiClient, err := client.NewAPIClient(ctx)
if err != nil {
return err
}
cronClient, err := apiClient.NewCronWorkflowServiceClient()
if err != nil {
return err
}
wfClient := apiClient.NewWorkflowServiceClient()
req := cronworkflow.GetCronWorkflowRequest{
Name: cronWFName,
Namespace: client.Namespace(),
}
cronWF, err := cronClient.GetCronWorkflow(ctx, &req)
if err != nil {
return err
}
cronTab, err := cron.ParseStandard(cronWF.Spec.Schedule)
if err != nil {
return err
}
scheTime := startTime
priority := int32(math.MaxInt32)
var scheList []string
wf := common.ConvertCronWorkflowToWorkflow(cronWF)
paramArg := `{{inputs.parameters.backfillscheduletime}}`
wf.GenerateName = util.GenerateBackfillWorkflowPrefix(cronWF.Name, cliOps.name) + "-"
param := v1alpha1.Parameter{
Name: cliOps.argName,
Value: v1alpha1.AnyStringPtr(paramArg),
}
if !cliOps.parallel {
wf.Spec.Priority = &priority
wf.Spec.Synchronization = &v1alpha1.Synchronization{
Mutex: &v1alpha1.Mutex{Name: cliOps.name},
}
}
wf.Spec.Arguments.Parameters = append(wf.Spec.Arguments.Parameters, param)
for {
scheTime = cronTab.Next(scheTime)
if endTime.Before(scheTime) {
break
}
timeStr := scheTime.String()
scheList = append(scheList, timeStr)
}
wfJSONByte, err := json.Marshal(wf)
if err != nil {
return err
}
yamlbyte, err := yaml.JSONToYAML(wfJSONByte)
if err != nil {
return err
}
wfYamlStr := "apiVersion: argoproj.io/v1alpha1 \n" + string(yamlbyte)
if len(scheList) > 0 {
return CreateMonitorWf(ctx, wfYamlStr, client.Namespace(), cronWFName, scheList, wfClient, cliOps)
} else {
fmt.Print("There is no suitable scheduling time.")
}
return nil
}
const backfillWf = `{
"apiVersion": "argoproj.io/v1alpha1",
"kind": "Workflow",
"metadata": {
"generateName": "backfill-wf-"
},
"spec": {
"entrypoint": "main",
"templates": [
{
"name": "main",
"steps": [
[
{
"name": "create-workflow",
"template": "create-workflow",
"arguments": {
"parameters": [
{
"name": "backfillscheduletime",
"value": "{{item}}"
}
],
"withParam": "{{workflows.parameters.cronscheduletime}}"
}
}
]
]
},
{
"name": "create-workflow",
"inputs": {
"parameters": [
{
"name": "backfillscheduletime"
}
]
},
"resource": {
"successCondition": "status.phase == Succeeded",
"action": "create"
}
}
]
}
}
`
func CreateMonitorWf(ctx context.Context, wf, namespace, cronWFName string, scheTime []string, wfClient workflow.WorkflowServiceClient, cliOps backfillOpts) error {
var monitorWfObj v1alpha1.Workflow
err := json.Unmarshal([]byte(backfillWf), &monitorWfObj)
if monitorWfObj.Labels == nil {
monitorWfObj.Labels = make(map[string]string)
}
monitorWfObj.Labels[common.LabelKeyCronWorkflowBackfill] = cronWFName
if err != nil {
return err
}
TotalScheCount := len(scheTime)
iterCount := int(float64(len(scheTime)/cliOps.maxWorkflowCount)) + 1
startIdx := 0
var endIdx int
var wfNames []string
for i := 0; i < iterCount; i++ {
tmpl := monitorWfObj.GetTemplateByName("create-workflow")
if (TotalScheCount - i*cliOps.maxWorkflowCount) < cliOps.maxWorkflowCount {
endIdx = TotalScheCount
} else {
endIdx = startIdx + cliOps.maxWorkflowCount
}
scheTimeByte, err := json.Marshal(scheTime[startIdx:endIdx])
startIdx = endIdx
if err != nil {
return err
}
tmpl.Resource.Manifest = fmt.Sprint(wf)
stepTmpl := monitorWfObj.GetTemplateByName("main")
stepTmpl.Steps[0].Steps[0].WithParam = string(scheTimeByte)
c, err := wfClient.CreateWorkflow(ctx, &workflow.WorkflowCreateRequest{Namespace: namespace, Workflow: &monitorWfObj})
if err != nil {
return err
}
wfNames = append(wfNames, c.Name)
}
printBackFillOutput(wfNames, len(scheTime), cliOps)
return nil
}
func printBackFillOutput(wfNames []string, totalSches int, cliOps backfillOpts) {
fmt.Printf("Created %s Backfill task for Cronworkflow %s \n", cliOps.name, cliOps.cronWfName)
fmt.Printf("==================================================\n")
fmt.Printf("Backfill Period :\n")
fmt.Printf("Start Time : %s \n", cliOps.startDate)
fmt.Printf(" End Time : %s \n", cliOps.endDate)
fmt.Printf("Total Backfill Schedule: %d \n", totalSches)
fmt.Printf("==================================================\n")
fmt.Printf("Backfill Workflows: \n")
fmt.Printf(" NAMESPACE\t WORKFLOW: \n")
namespace := client.Namespace()
for idx, wfName := range wfNames {
fmt.Printf("%d. %s \t %s \n", idx+1, namespace, wfName)
}
}

View File

@ -0,0 +1,97 @@
package cron
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/workflow/util"
)
type cliCreateOpts struct {
output common.EnumFlagValue // --output
schedule string // --schedule
strict bool // --strict
}
func NewCreateCommand() *cobra.Command {
var (
cliCreateOpts = cliCreateOpts{output: common.NewPrintWorkflowOutputValue("")}
submitOpts wfv1.SubmitOpts
parametersFile string
)
command := &cobra.Command{
Use: "create FILE1 FILE2...",
Short: "create a cron workflow",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if parametersFile != "" {
err := util.ReadParametersFile(parametersFile, &submitOpts)
if err != nil {
return err
}
}
return CreateCronWorkflows(cmd.Context(), args, &cliCreateOpts, &submitOpts)
},
}
util.PopulateSubmitOpts(command, &submitOpts, &parametersFile, false)
command.Flags().VarP(&cliCreateOpts.output, "output", "o", "Output format. "+cliCreateOpts.output.Usage())
command.Flags().BoolVar(&cliCreateOpts.strict, "strict", true, "perform strict workflow validation")
command.Flags().StringVar(&cliCreateOpts.schedule, "schedule", "", "override cron workflow schedule")
return command
}
func CreateCronWorkflows(ctx context.Context, filePaths []string, cliOpts *cliCreateOpts, submitOpts *wfv1.SubmitOpts) error {
ctx, apiClient, err := client.NewAPIClient(ctx)
if err != nil {
return err
}
serviceClient, err := apiClient.NewCronWorkflowServiceClient()
if err != nil {
return err
}
cronWorkflows := generateCronWorkflows(filePaths, cliOpts.strict)
for _, cronWf := range cronWorkflows {
if cliOpts.schedule != "" {
cronWf.Spec.Schedule = cliOpts.schedule
}
newWf := wfv1.Workflow{Spec: cronWf.Spec.WorkflowSpec}
err := util.ApplySubmitOpts(&newWf, submitOpts)
if err != nil {
return err
}
cronWf.Spec.WorkflowSpec = newWf.Spec
// We have only copied the workflow spec to the cron workflow but not the metadata
// that includes name and generateName. Here we copy the metadata to the cron
// workflow's metadata and remove the unnecessary and mutually exclusive part.
if generateName := newWf.GenerateName; generateName != "" {
cronWf.GenerateName = generateName
cronWf.Name = ""
}
if name := newWf.Name; name != "" {
cronWf.Name = name
cronWf.GenerateName = ""
}
if cronWf.Namespace == "" {
cronWf.Namespace = client.Namespace()
}
created, err := serviceClient.CreateCronWorkflow(ctx, &cronworkflowpkg.CreateCronWorkflowRequest{
Namespace: cronWf.Namespace,
CronWorkflow: &cronWf,
})
if err != nil {
return fmt.Errorf("Failed to create cron workflow: %v", err)
}
fmt.Print(getCronWorkflowGet(ctx, created))
}
return nil
}

View File

@ -0,0 +1,52 @@
package cron
import (
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow"
)
// NewDeleteCommand returns a new instance of an `argo delete` command
func NewDeleteCommand() *cobra.Command {
var all bool
command := &cobra.Command{
Use: "delete [CRON_WORKFLOW... | --all]",
Short: "delete a cron workflow",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewCronWorkflowServiceClient()
if err != nil {
return err
}
if all {
cronWfList, err := serviceClient.ListCronWorkflows(ctx, &cronworkflowpkg.ListCronWorkflowsRequest{
Namespace: client.Namespace(),
})
if err != nil {
return err
}
for _, cronWf := range cronWfList.Items {
args = append(args, cronWf.Name)
}
}
for _, name := range args {
_, err := serviceClient.DeleteCronWorkflow(ctx, &cronworkflowpkg.DeleteCronWorkflowRequest{
Name: name,
Namespace: client.Namespace(),
})
if err != nil {
return err
}
}
return nil
},
}
command.Flags().BoolVar(&all, "all", false, "Delete all cron workflows")
return command
}

View File

@ -0,0 +1,45 @@
package cron
import (
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
"github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow"
)
func NewGetCommand() *cobra.Command {
var output = common.NewPrintWorkflowOutputValue("")
command := &cobra.Command{
Use: "get CRON_WORKFLOW...",
Short: "display details about a cron workflow",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewCronWorkflowServiceClient()
if err != nil {
return err
}
namespace := client.Namespace()
for _, arg := range args {
cronWf, err := serviceClient.GetCronWorkflow(ctx, &cronworkflow.GetCronWorkflowRequest{
Name: arg,
Namespace: namespace,
})
if err != nil {
return err
}
printCronWorkflow(ctx, cronWf, output.String())
}
return nil
},
}
command.Flags().VarP(&output, "output", "o", "Output format. "+output.Usage())
return command
}

View File

@ -0,0 +1,42 @@
package cron
import (
"os"
"github.com/spf13/cobra"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
"github.com/argoproj/argo-workflows/v3/cmd/argo/lint"
wf "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow"
)
func NewLintCommand() *cobra.Command {
var (
strict bool
output = common.EnumFlagValue{AllowedValues: []string{"pretty", "simple"}, Value: "pretty"}
)
command := &cobra.Command{
Use: "lint FILE...",
Short: "validate files or directories of cron workflow manifests",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
opts := lint.LintOptions{
Files: args,
Strict: strict,
DefaultNamespace: client.Namespace(),
Printer: os.Stdout,
}
return lint.RunLint(ctx, apiClient, []string{wf.CronWorkflowPlural}, output.String(), false, opts)
},
}
command.Flags().VarP(&output, "output", "o", "Linting results output format. "+output.Usage())
command.Flags().BoolVar(&strict, "strict", true, "perform strict validation")
return command
}

View File

@ -0,0 +1,101 @@
package cron
import (
"context"
"fmt"
"os"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/client"
"github.com/argoproj/argo-workflows/v3/cmd/argo/commands/common"
cronworkflowpkg "github.com/argoproj/argo-workflows/v3/pkg/apiclient/cronworkflow"
wfv1 "github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo-workflows/v3/util/humanize"
)
type listFlags struct {
allNamespaces bool // --all-namespaces
output common.EnumFlagValue // --output
labelSelector string // --selector
}
func NewListCommand() *cobra.Command {
var listArgs = listFlags{
output: common.EnumFlagValue{AllowedValues: []string{"wide", "name"}},
}
command := &cobra.Command{
Use: "list",
Short: "list cron workflows",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, apiClient, err := client.NewAPIClient(cmd.Context())
if err != nil {
return err
}
serviceClient, err := apiClient.NewCronWorkflowServiceClient()
if err != nil {
return err
}
namespace := client.Namespace()
if listArgs.allNamespaces {
namespace = ""
}
listOpts := metav1.ListOptions{}
listOpts.LabelSelector = listArgs.labelSelector
cronWfList, err := serviceClient.ListCronWorkflows(ctx, &cronworkflowpkg.ListCronWorkflowsRequest{
Namespace: namespace,
ListOptions: &listOpts,
})
if err != nil {
return err
}
switch listArgs.output.String() {
case "", "wide":
printTable(ctx, cronWfList.Items, &listArgs)
case "name":
for _, cronWf := range cronWfList.Items {
fmt.Println(cronWf.Name)
}
default:
return fmt.Errorf("Unknown output mode: %s", listArgs.output.String())
}
return nil
},
}
command.Flags().BoolVarP(&listArgs.allNamespaces, "all-namespaces", "A", false, "Show workflows from all namespaces")
command.Flags().VarP(&listArgs.output, "output", "o", "Output format. "+listArgs.output.Usage())
command.Flags().StringVarP(&listArgs.labelSelector, "selector", "l", "", "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints.")
return command
}
func printTable(ctx context.Context, wfList []wfv1.CronWorkflow, listArgs *listFlags) {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 3, ' ', 0)
if listArgs.allNamespaces {
_, _ = fmt.Fprint(w, "NAMESPACE\t")
}
_, _ = fmt.Fprint(w, "NAME\tAGE\tLAST RUN\tNEXT RUN\tSCHEDULES\tTIMEZONE\tSUSPENDED")
_, _ = fmt.Fprint(w, "\n")
for _, cwf := range wfList {
if listArgs.allNamespaces {
_, _ = fmt.Fprintf(w, "%s\t", cwf.Namespace)
}
var cleanLastScheduledTime string
if cwf.Status.LastScheduledTime != nil {
cleanLastScheduledTime = humanize.RelativeDurationShort(cwf.Status.LastScheduledTime.Time, time.Now())
} else {
cleanLastScheduledTime = "N/A"
}
var cleanNextScheduledTime string
if next, err := GetNextRuntime(ctx, &cwf); err == nil {
cleanNextScheduledTime = humanize.RelativeDurationShort(next, time.Now())
} else {
cleanNextScheduledTime = "N/A"
}
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%t", cwf.Name, humanize.RelativeDurationShort(cwf.CreationTimestamp.Time, time.Now()), cleanLastScheduledTime, cleanNextScheduledTime, cwf.Spec.GetScheduleString(), cwf.Spec.Timezone, cwf.Spec.Suspend)
_, _ = fmt.Fprintf(w, "\n")
}
_ = w.Flush()
}

Some files were not shown because too many files have changed in this diff Show More