Compare commits

...

No commits in common. "1.1.0" and "main" have entirely different histories.
1.1.0 ... main

6702 changed files with 875229 additions and 475956 deletions

View File

@ -1,21 +0,0 @@
---
type: container
version: 1
name: argo-approval
description: Base step for Argo Approval container
image: argoproj/argoapproval:v2.1
resources:
mem_mib: 128
cpu_cores: 0.1
command: ["sh", "-c"]
args: [
/ax/bin/axapproval.py --required_list "%%inputs.parameters.REQUIRED_APPROVALS%%"
--optional_list "%%inputs.parameters.OPTIONAL_APPROVALS%%"
--number_optional "%%inputs.parameters.NUMBER_OF_OPTIONAL_APPROVALS_NEEDED%%"
--timeout "%%inputs.parameters.TIMEOUT_IN_MINUTES%%"]
inputs:
parameters:
REQUIRED_APPROVALS:
OPTIONAL_APPROVALS:
NUMBER_OF_OPTIONAL_APPROVALS_NEEDED:
TIMEOUT_IN_MINUTES:

View File

@ -1,64 +0,0 @@
---
type: workflow
version: 1
name: Argo Simple Build
description: Argo build in a single step
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
NAMESPACE:
default: testing
REGISTRY:
default: docker.applatix.net
REG_USER:
default: "%%config.admin@internal.docker.user%%"
REG_PASSWORD:
default: "%%config.admin@internal.docker.password%%"
VERSION_OPTION:
default: latest
PY_BUILD_OPTIONS:
default: ""
GO_BUILD_OPTIONS:
default: ""
steps:
- CHECKOUT:
template: argo-checkout
- ARGO-BUILD:
template: argo-build-all
arguments:
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
---
type: container
version: 1
name: argo-build-all
description: Base container for building platform and devops containers
image: argoproj/argobuild2:latest
command: ["bash", "-c"]
args: ["
REGISTRY=`echo %%inputs.parameters.REGISTRY%% | sed \"s/^docker.io$//g\"` &&
docker login -u %%inputs.parameters.REG_USER%% -p %%inputs.parameters.REG_PASSWORD%% $REGISTRY &&
/src/build/build_platform.py -r %%inputs.parameters.REGISTRY%% -n %%inputs.parameters.NAMESPACE%% -v %%inputs.parameters.VERSION_OPTION%% %%inputs.parameters.PY_BUILD_OPTIONS%% &&
/src/build/build_devops.py -r %%inputs.parameters.REGISTRY%% -n %%inputs.parameters.NAMESPACE%% -v %%inputs.parameters.VERSION_OPTION%% %%inputs.parameters.PY_BUILD_OPTIONS%% &&
/src/build/build_saas.py -bl -r %%inputs.parameters.REGISTRY%% -n %%inputs.parameters.NAMESPACE%% -v %%inputs.parameters.VERSION_OPTION%% %%inputs.parameters.GO_BUILD_OPTIONS%%
"]
resources:
mem_mib: 600
cpu_cores: 0.2
inputs:
parameters:
NAMESPACE:
REGISTRY:
REG_USER:
REG_PASSWORD:
VERSION_OPTION:
PY_BUILD_OPTIONS:
GO_BUILD_OPTIONS:
artifacts:
CODE:
path: /src
annotations:
ax_ea_docker_enable: '{ "graph-storage-name": "axbuildstorage", "graph-storage-size": "25Gi", "cpu_cores":0.9, "mem_mib":1000}'

View File

@ -1,21 +0,0 @@
---
type: container
version: 1
name: argo-checkout
description: Checks out a source repository to /src
resources:
mem_mib: 500
cpu_cores: 0.1
image: argoproj/argoscm:v2.0
command: ["axscm"]
args: ["clone", "%%inputs.parameters.REPO%%", "/src", "--commit", "%%inputs.parameters.COMMIT%%"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
outputs:
artifacts:
CODE:
path: /src

View File

@ -1,303 +0,0 @@
---
type: workflow
version: 1
name: Argo CI
description: Argo continuous test and build
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
NAMESPACE:
default: staging
REGISTRY:
default: docker.applatix.net
REG_USER:
default: "%%config.admin@internal.docker.user%%"
REG_PASSWORD:
default: "%%config.admin@internal.docker.password%%"
VERSION_OPTION:
default: -v latest
BUILD_OPTIONS:
default: ""
steps:
- CHECKOUT:
template: argo-checkout
- ARGO-TEST:
template: argo-test
arguments:
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
- ARGO-BUILD:
template: argo-build
arguments:
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
---
type: workflow
version: 1
name: Argo Build Standalone
description: Argo standalone build
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
NAMESPACE:
default: staging
REGISTRY:
default: docker.applatix.net
REG_USER:
default: "%%config.admin@internal.docker.user%%"
REG_PASSWORD:
default: "%%config.admin@internal.docker.password%%"
VERSION_OPTION:
default: -v latest
BUILD_OPTIONS:
default: ""
steps:
- CHECKOUT:
template: argo-checkout
- ARGO-BUILD:
template: argo-build
arguments:
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
---
type: workflow
version: 1
name: argo-test
description: Workflow for testing argo
inputs:
parameters:
COMMIT:
REPO:
artifacts:
CODE:
steps:
- COMMON-LIB-TEST:
template: argo-common-lib-tests
arguments:
artifacts.CODE: "%%inputs.artifacts.CODE%%"
DEVOPS-TEST:
template: argo-devops-unit-tests
arguments:
artifacts.CODE: "%%inputs.artifacts.CODE%%"
PLATFORM-TEST:
template: argo-platform-unit-tests
arguments:
artifacts.CODE: "%%inputs.artifacts.CODE%%"
SAAS-TEST:
template: argo-saas-unit-tests
arguments:
artifacts.CODE: "%%inputs.artifacts.CODE%%"
termination_policy:
time_seconds: 2700
spending_cents: 20
---
type: workflow
version: 1
name: argo-build
description: Workflow for building argo
inputs:
parameters:
COMMIT:
REPO:
NAMESPACE:
REGISTRY:
REG_USER:
REG_PASSWORD:
VERSION_OPTION:
BUILD_OPTIONS:
artifacts:
CODE:
steps:
- PLATFORM-BUILD:
template: argo-build-base
arguments:
parameters.COMPONENT: platform
DEVOPS-BUILD:
template: argo-build-base
arguments:
parameters.COMPONENT: devops
SAAS-BUILD:
template: argo-saas-build
termination_policy:
time_seconds: 2700
spending_cents: 20
---
type: container
version: 1
name: argo-build-base
description: Base container for building platform and devops containers
image: argoproj/argobuild2:latest
command: ["sh", "-c"]
args: [
REGISTRY=`echo %%inputs.parameters.REGISTRY%% | sed "s/^docker.io$//g"` &&
docker login -u %%inputs.parameters.REG_USER%% -p %%inputs.parameters.REG_PASSWORD%% $REGISTRY &&
/src/build/build_%%inputs.parameters.COMPONENT%%.py -r %%inputs.parameters.REGISTRY%% -n %%inputs.parameters.NAMESPACE%% %%inputs.parameters.VERSION_OPTION%% %%inputs.parameters.BUILD_OPTIONS%%
]
resources:
mem_mib: 300
cpu_cores: 0.1
inputs:
parameters:
COMPONENT:
NAMESPACE:
REGISTRY:
REG_USER:
REG_PASSWORD:
VERSION_OPTION:
default: -v latest
BUILD_OPTIONS:
default: ""
artifacts:
CODE:
path: /src
annotations:
ax_ea_docker_enable: '{ "graph-storage-name": "axbuildstorage", "graph-storage-size": "25Gi", "cpu_cores":0.4, "mem_mib":750}'
---
type: workflow
version: 1
name: argo-saas-build
description: Workflow step for building all SaaS components
inputs:
parameters:
NAMESPACE:
REGISTRY:
REG_USER:
REG_PASSWORD:
VERSION_OPTION:
default: -v latest
BUILD_OPTIONS:
default: ""
artifacts:
CODE:
steps:
- AXAMM:
template: argo-saas-build-base
arguments:
parameters.SERVICE: axamm
AXDB:
template: argo-saas-build-base
arguments:
parameters.SERVICE: axdb
AXNC:
template: argo-saas-build-base
arguments:
parameters.SERVICE: axnc
AXOPS:
template: argo-axops-build
ARGOCLI:
template: argo-saas-build-base
arguments:
parameters.SERVICE: argocli
KAFKA:
template: argo-saas-build-base
arguments:
parameters.SERVICE: kafka
ZOOKEEPER:
template: argo-saas-build-base
arguments:
parameters.SERVICE: zookeeper
---
type: workflow
version: 1
name: argo-axops-build
description: Workflow step for building all SaaS components
inputs:
parameters:
NAMESPACE:
REGISTRY:
REG_USER:
REG_PASSWORD:
VERSION_OPTION:
default: -v latest
BUILD_OPTIONS:
default: ""
artifacts:
CODE:
steps:
- AXUI:
image: node:6.3.1
command: ["bash", "-c"]
args: ["cd /src/saas/axops/src/ui && npm install && npm run build:prod"]
resources:
mem_mib: 1536
cpu_cores: 0.2
inputs:
artifacts:
CODE:
from: "%%inputs.artifacts.CODE%%"
path: /src
outputs:
artifacts:
CODE:
path: /src
- AXOPS:
template: argo-saas-build-base
arguments:
parameters.SERVICE: axops
artifacts.CODE: "%%steps.AXUI.outputs.artifacts.CODE%%"
---
type: container
version: 1
name: argo-saas-build-base
description: Base template for building SaaS component
image: argobase/saasbuilder:v1
command: ["sh", "-c"]
args: [
REGISTRY=`echo %%inputs.parameters.REGISTRY%% | sed "s/^docker.io$//g"` &&
docker login -u %%inputs.parameters.REG_USER%% -p %%inputs.parameters.REG_PASSWORD%% $REGISTRY &&
/src/build/build_saas.py -bl -r %%inputs.parameters.REGISTRY%% -n %%inputs.parameters.NAMESPACE%%
-s %%inputs.parameters.SERVICE%% %%inputs.parameters.VERSION_OPTION%% %%inputs.parameters.BUILD_OPTIONS%%
]
resources:
mem_mib: 600
cpu_cores: 0.2
inputs:
parameters:
SERVICE:
NAMESPACE:
REGISTRY:
REG_USER:
REG_PASSWORD:
VERSION_OPTION:
default: -v latest
BUILD_OPTIONS:
default: ""
artifacts:
CODE:
path: /src
annotations:
ax_ea_docker_enable: '{ "graph-storage-name": "axbuildstorage", "graph-storage-size": "25Gi", "cpu_cores":0.3, "mem_mib":600}'
---
type: policy
version: 1
name: Argo CI Policy
description: Policy to trigger build for all events
template: Argo CI
arguments:
parameters.NAMESPACE: staging
parameters.VERSION_OPTION: -v latest
parameters.BUILD_OPTIONS: --no-push
notifications:
- when:
- on_failure
whom:
- committer
- author
- prod@slack
when:
- event: on_push
- event: on_pull_request
- event: on_cron
schedule: "0 0 * * *"
timezone: "US/Pacific"

View File

@ -1,35 +0,0 @@
---
type: workflow
version: 1
name: argo-common-lib-tests
description: This is the workflow step for running some common library tests
inputs:
artifacts:
CODE:
steps:
- PYTHON-LIB-TESTS:
template: argo-common-python-unit-test-container
arguments:
parameters.COMMAND:
pytest -vv /src/common/python/argo/
---
type: container
version: 1
name: argo-common-python-unit-test-container
description: Common python library unit tests
inputs:
artifacts:
CODE:
path: "/src"
parameters:
COMMAND:
image: argobase/axplatbuilder:v16
command: ["sh", "-c"]
args: ["%%inputs.parameters.COMMAND%%"]
env:
- name: PYTHONPATH
value: /src/common/python
resources:
mem_mib: 256
cpu_cores: 0.1

View File

@ -1,86 +0,0 @@
---
type: workflow
version: 1
name: argo-devops-unit-tests
description: Argo devops unit tests
inputs:
artifacts:
CODE:
steps:
- SCM-TESTS:
template: argo-devops-unit-test-base
arguments:
parameters.COMMAND:
git config --global user.email "argouser@argoproj" &&
git config --global user.name "argouser" &&
pytest -vv --maxfail=5 /src/devops/test/ax/test/devops/unit/scm
#WORKFLOW-EXECUTOR-TESTS:
# template: argo-devops-unit-test-base
# arguments:
# parameters.COMMAND: pytest -vv --maxfail=5 /src/devops/test/ax/test/devops/e2e/executor
FIXTUREMANAGER-TESTS:
template: fixturemanager-unit-tests
ARTIFACTMANAGER-TESTS:
template: argo-devops-unit-test-base
arguments:
parameters.COMMAND: pytest -vv --maxfail=5 /src/devops/test/ax/test/devops/unit/artifact_manager
---
type: container
version: 1
name: argo-devops-unit-test-base
description: Base template for DevOps unit tests
resources:
mem_mib: 128
cpu_cores: 0.1
image: argobase/axplatbuilder:v16
command: ["sh", "-c"]
args: ["%%inputs.parameters.COMMAND%%"]
env:
- name: PYTHONPATH
value: /src/common/python
inputs:
artifacts:
CODE:
path: "/src"
parameters:
COMMAND:
---
type: workflow
version: 1
name: fixturemanager-unit-tests
description: fixturemanager unit tests
inputs:
artifacts:
CODE:
fixtures:
- MONGODB:
template: test-mongodb
REDISDB:
template: test-redis
steps:
- FIXTUREMANAGER-TESTS:
template: argo-devops-unit-test-base
arguments:
parameters.COMMAND: pytest -vv --maxfail=5 /src/devops/test/ax/test/devops/unit/fixture --redis %%fixtures.REDISDB.ip%% --mongodb %%fixtures.MONGODB.ip%%
---
type: container
version: 1
name: test-mongodb
description: MongoDB container for purposes of unit tests
image: mongo:3.2.9
resources:
mem_mib: 128
cpu_cores: 0.1
---
type: container
version: 1
name: test-redis
description: Redis container for purposes of unit tests
image: redis:3.0.5
resources:
mem_mib: 128
cpu_cores: 0.1

View File

@ -1,57 +0,0 @@
---
type: workflow
version: 1
name: argo-platform-unit-tests
description: This is the workflow step for running platform unit tests.
inputs:
artifacts:
CODE:
steps:
- MISC-TESTS:
template: argo-platform-unit-test-base
arguments:
parameters.COMMAND:
pytest -vv /src/platform/tests/resource_config/ &&
pytest -vv /src/platform/tests/apiclient/ &&
pytest -vv /src/platform/tests/kubeobject/ &&
pytest -vv /src/platform/tests/util/ &&
pytest -vv /src/platform/tests/lib/ax/platform/ax_asg_test.py &&
pytest -vv /src/platform/tests/axmon/operations_test.py &&
python2 -m pytest -s -vv /src/platform/tests/cluster_state_machine/ &&
python2 -m pytest -s -vv /src/platform/tests/minion_manager/aws/aws_minion_manager_test.py &&
python2 -m pytest -s -vv /src/platform/tests/minion_manager/aws/aws_bid_advisor_test.py &&
python2 -m pytest -s -vv /src/platform/tests/minion_manager/broker_test.py &&
python2 -m pytest -s -vv /src/platform/tests/spot_instance_option_manager_test.py
KUBE-MONITOR-TESTS:
template: argo-platform-unit-test-base
arguments:
parameters.COMMAND: pytest -s -vv /src/platform/tests/axkubemonitor/
HANDSHAKE-SERVER-TESTS:
template: argo-platform-unit-test-base
arguments:
parameters.COMMAND: pytest -vv /src/platform/tests/handshake_server/
APPLET-TESTS:
template: argo-platform-unit-test-base
arguments:
parameters.COMMAND: pytest -vv /src/platform/tests/applet/
---
type: container
version: 1
name: argo-platform-unit-test-base
description: Base template for platform unit tests
inputs:
artifacts:
CODE:
path: "/src"
parameters:
COMMAND:
image: argobase/axplatbuilder:v15
command: ["sh", "-c"]
args: ["%%inputs.parameters.COMMAND%%"]
env:
- name: PYTHONPATH
value: /src/common/python
resources:
mem_mib: 256
cpu_cores: 0.1

View File

@ -1,33 +0,0 @@
---
type: workflow
version: 1
name: Argo Release
description: Argo release workflow
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
NAMESPACE:
default: axsys
REGISTRY:
default: docker.applatix.net
REG_USER:
default: "%%config.admin@internal.docker.user%%"
REG_PASSWORD:
default: "%%config.admin@internal.docker.password%%"
BUILD_OPTIONS:
default: ""
steps:
- CHECKOUT:
template: argo-checkout
- ARGO-TEST:
template: argo-test
arguments:
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
- ARGO-BUILD:
template: argo-build
arguments:
parameters.VERSION_OPTION: ""
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"

View File

@ -1,79 +0,0 @@
---
type: workflow
version: 1
name: argo-saas-unit-tests
description: Workflow step for running all SaaS unit tests
inputs:
artifacts:
CODE:
steps:
- AXDB-TEST:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/axdb/run-test.sh
AXOPS-TEST:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/axops/run-test-axops.sh
TEMPLATE-TEST:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/common/run-test.sh
AXAMM-TEST:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/axamm/run-test.sh
AXNC-TEST:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/axnc/run-test.sh
EVENT-TEST:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/axops/run-test-event.sh
MODULES-TEST-1:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/axops/run-test-modules-1.sh
MODULES-TEST-2:
template: saas-unit-test-base
arguments:
parameters.COMMAND: /src/saas/axops/run-test-modules-2.sh
outputs:
artifacts:
coverage-report-axops:
from: "%%steps.AXOPS-TEST.outputs.artifacts.COVERAGE-REPORT%%"
coverage-report-axdb:
from: "%%steps.AXDB-TEST.outputs.artifacts.COVERAGE-REPORT%%"
coverage-report-axamm:
from: "%%steps.AXAMM-TEST.outputs.artifacts.COVERAGE-REPORT%%"
coverage-report-axnc:
from: "%%steps.AXNC-TEST.outputs.artifacts.COVERAGE-REPORT%%"
---
type: container
version: 1
name: saas-unit-test-base
description: Base template for running SaaS unit tests
image: argobase/saasbuilder:v1
inputs:
artifacts:
CODE:
path: /src
parameters:
COMMAND:
command: ["sh", "-c"]
args: ["
/src/saas/common/config/cassandra-test-config.sh /etc/cassandra &&
/src/saas/common/config/kafka-test-config.sh &&
%%inputs.parameters.COMMAND%%"]
resources:
mem_mib: 1536
cpu_cores: 0.5
outputs:
artifacts:
COVERAGE-REPORT:
path: /src/saas/test
meta_data:
- browsable

View File

@ -1,42 +0,0 @@
---
type: container
version: 1
name: junit-reporting-test
description: This is the base template for junit reporting test
image: argobase/axplatbuilder:v16
resources:
mem_mib: 128
cpu_cores: 0.1
command: ["%%inputs.parameters.CMD%%"]
inputs:
parameters:
CMD:
PATH:
artifacts:
CODE:
path: /src
outputs:
artifacts:
REPORT:
path: "%%path%%"
---
type: workflow
version: 1
name: Junit Reporting Example
description: This is the workflow step for Junit reporting example
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- CHECKOUT:
template: argo-checkout
- REPORT:
template: junit-reporting-test
arguments:
parameters.CMD: "pytest -vv /src/devops/test/junit/ --junitxml=/tmp/junit_report.xml"
parameters.PATH: "/tmp/junit_report.xml"
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"

View File

@ -1,148 +0,0 @@
---
type: container
version: 1
name: argo-workflow-test
description: Base ARGO test container
image: argobase/axplatbuilder:v16
resources:
mem_mib: 1024
cpu_cores: 0.1
command: ["%%inputs.parameters.TEST_COMMAND%%"]
inputs:
parameters:
TEST_COMMAND: {}
artifacts:
CODE:
path: /src
---
type: container
version: 1
name: argo-deploy
description: Deploy a fixture
image: argoproj/argoclustermanager:v1
resources:
mem_mib: 1000
cpu_cores: 1.0
command: ["bash", "-c"]
args: [
export AX_NAMESPACE=%%inputs.parameters.NAMESPACE%% &&
export AX_VERSION=%%inputs.parameters.VERSION%% &&
export AWS_CROSS_ACCOUNT_ARN=%%inputs.parameters.ARN%% &&
export AX_CUSTOMER_ID=%%inputs.parameters.CUSTOMER_ID%% &&
export AX_UPGRADE_FOR_CUSTOMER=true &&
/ax/bin/ax-kube-upgrade %%inputs.parameters.CLUSTER_NAME%% reset
]
inputs:
parameters:
ARN:
default: ""
CLUSTER_NAME:
default: ""
CLUSTER_ID:
default: ""
CUSTOMER_ID:
default: ""
NAMESPACE:
default: ""
VERSION:
default: ""
artifacts:
CODE:
path: /src
annotations:
"ax_ea_docker_enable": '{ "graph-storage-name": "axdeploy2storage", "graph-storage-size": "25Gi", "cpu_cores":"0.5", "mem_mib":"500"}'
---
type: workflow
version: 1
name: AX Workflow Test
description: AX Workflow Test
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
NAMESPACE:
default: staging
REGISTRY:
default: docker.applatix.net
REG_USER:
default: "%%config.admin@internal.docker.user%%"
REG_PASSWORD:
default: "%%config.admin@internal.docker.password%%"
VERSION_OPTION:
default: -v latest
BUILD_OPTIONS:
default: --no-push
fixtures:
- target_cluster:
class: "Cluster"
steps:
- CHECKOUT:
template: argo-checkout
- ARGO_BUILD:
template: argo-build
arguments:
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
- DEPLOY:
template: argo-deploy
arguments:
parameters.CLUSTER_NAME: "%%fixtures.target_cluster.name%%"
parameters.CLUSTER_ID: "%%fixtures.target_cluster.cluster_id%%"
parameters.CUSTOMER_ID: "%%fixtures.target_cluster.customer_id%%"
parameters.ARN: ""
parameters.NAMESPACE: ""
parameters.VERSION: ""
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
- BASIC_WORKFLOW_TEST_NEGATIVE:
template: argo-workflow-test
arguments:
parameters.TEST_COMMAND: sh -c 'cd /src && export PYTHONPATH=./common/python &&
python3 devops/test/ax/test/devops/e2e/workflow/basic_workflow.py -c %%fixtures.target_cluster.axops_dnsname%% -u %%fixtures.target_cluster.admin_username%% -p %%fixtures.target_cluster.admin_password%% --negative-test'
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
BASIC_WORKFLOW_TEST_POSITIVE:
template: argo-workflow-test
arguments:
parameters.TEST_COMMAND: sh -c 'cd /src && export PYTHONPATH=./common/python &&
python3 devops/test/ax/test/devops/e2e/workflow/basic_workflow.py -c %%fixtures.target_cluster.axops_dnsname%% -u %%fixtures.target_cluster.admin_username%% -p %%fixtures.target_cluster.admin_password%%'
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
NESTED_WORKFLOW_TEST_NEGATIVE:
template: argo-workflow-test
arguments:
parameters.TEST_COMMAND: sh -c 'cd /src && export PYTHONPATH=./common/python &&
python3 devops/test/ax/test/devops/e2e/workflow/nested_workflow.py -c %%fixtures.target_cluster.axops_dnsname%% -u %%fixtures.target_cluster.admin_username%% -p %%fixtures.target_cluster.admin_password%% --negative-test'
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
NESTED_WORKFLOW_TEST_POSITIVE:
template: argo-workflow-test
arguments:
parameters.TEST_COMMAND: sh -c 'cd /src && export PYTHONPATH=./common/python &&
python3 devops/test/ax/test/devops/e2e/workflow/nested_workflow.py -c %%fixtures.target_cluster.axops_dnsname%% -u %%fixtures.target_cluster.admin_username%% -p %%fixtures.target_cluster.admin_password%%'
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"
---
type: policy
version: 1
name: AX Workflow Test Policy
description: Policy to trigger test with randomized workflows
template: AX Workflow Test
arguments:
parameters.NAMESPACE: axsys
parameters.VERSION_OPTION: -v latest
notifications:
- when:
- on_start
- on_success
- on_failure
whom:
- committer
- author
- when:
- on_change
whom:
- j7l5a4s7j9c5t9m3@applatix.slack.com
when:
- event: on_cron
schedule: "0 * * * *"
timezone: "US/Pacific"

View File

@ -1,21 +0,0 @@
---
type: container
version: 1
name: git-checkout
description: Checks out a source repository to /src
resources:
mem_mib: 500
cpu_cores: 0.1
image: indiehosters/git
command: ["bash", "-c"]
args: ["git clone %%inputs.parameters.REPO%% /src && cd /src && git checkout %%inputs.parameters.COMMIT%%"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
outputs:
artifacts:
CODE:
path: /src

View File

@ -1,111 +0,0 @@
---
type: workflow
version: 1
name: Argo Lite Release
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
IMAGE_URL:
default: "argoproj/argo-lite:staging"
REG_USER:
default: "%%config.amatyushentsev@gmail.com.dockerhub.user%%"
REG_PASSWORD:
default: "%%config.amatyushentsev@gmail.com.dockerhub.password%%"
steps:
- build:
template: Argo Lite CI
- publish:
template: lite-publish
arguments:
artifacts.API: "%%steps.build.outputs.artifacts.API%%"
artifacts.UI: "%%steps.build.outputs.artifacts.UI%%"
parameters.IMAGE_URL: "%%inputs.parameters.IMAGE_URL%%"
parameters.REG_USER: "%%inputs.parameters.REG_USER%%"
parameters.REG_PASSWORD: "%%inputs.parameters.REG_PASSWORD%%"
---
type: workflow
version: 1
name: Argo Lite CI
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- checkout:
template: git-checkout
- build-api:
template: lite-build-api
arguments:
artifacts.CODE: "%%steps.checkout.outputs.artifacts.CODE%%"
build-ui:
template: gui-build
arguments:
artifacts.CODE: "%%steps.checkout.outputs.artifacts.CODE%%"
outputs:
artifacts:
UI:
from: "%%steps.build-ui.outputs.artifacts.CODE%%"
API:
from: "%%steps.build-api.outputs.artifacts.CODE%%"
---
type: container
version: 1
name: lite-build-api
inputs:
artifacts:
CODE:
path: /src
outputs:
artifacts:
CODE:
path: /src
image: node:6.3.1
resources:
mem_mib: 1024
cpu_cores: 0.1
command: ["sh", "-c"]
args: [cd /src/lite && npm install -g yarn && yarn install && npm run build]
---
type: container
version: 1
name: lite-publish
inputs:
parameters:
IMAGE_URL:
REG_USER:
REG_PASSWORD:
artifacts:
UI:
path: /ui
API:
path: /api
image: docker
resources:
mem_mib: 512
cpu_cores: 0.1
command: ["sh", "-c"]
args: [
docker login -u %%inputs.parameters.REG_USER%% -p %%inputs.parameters.REG_PASSWORD%% $REGISTRY &&
cd /api/lite && cp -r /ui/saas/axops/src/ui/dist/ ./dist/ui && docker build -t %%inputs.parameters.IMAGE_URL%% . && docker push %%inputs.parameters.IMAGE_URL%%
]
annotations:
ax_ea_docker_enable: '{"graph-storage-size": "10Gi", "cpu_cores":0.1, "mem_mib":200}'

View File

@ -1,65 +0,0 @@
---
type: container
version: 1
name: switch-to-all-spot-base
description: This is the base template for switching-to-all-spot
inputs:
artifacts:
CHECKOUT_ARTIFACT:
path: "/src"
parameters:
BID:
ASG_NAME:
image: argobase/axplatbuilder:v15
command: ['sh', -c]
args: ['/src/platform/source/tools/update_lc.py --bid %%inputs.parameters.BID%% --asg %%inputs.parameters.ASG_NAME%%']
env:
- name: PYTHONPATH
value: /src/common/python
resources:
mem_mib: 128
cpu_cores: 0.1
---
type: workflow
version: 1
name: switch-to-all-spot-steps
description: This is the workflow step for updating the launch-config
inputs:
artifacts:
CHECKOUT_ARTIFACT:
parameters:
BID:
ASG_NAME:
steps:
- UPDATING-LC:
template: switch-to-all-spot-base
---
type: workflow
version: 1
name: Switch to all spot
description: This is the workflow for switching an ASG to all spot
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
BID:
default: '0.150'
ASG_NAME:
default: 'my-asg-name'
steps:
- CHECKOUT:
template: argo-checkout
- SWITCH-TO-ALL-SPOT:
template: switch-to-all-spot-steps
arguments:
artifacts.CHECKOUT_ARTIFACT: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"

View File

@ -1,294 +0,0 @@
---
type: "policy"
version: 1
name: "Argo Stress Every 20 minutes #1"
description: "Trigger build every 20 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/20 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 20 minutes #2"
description: "Trigger build every 20 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/20 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 20 minutes #3"
description: "Trigger build every 20 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/20 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 20 minutes #4"
description: "Trigger build every 20 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/20 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 20 minutes #5"
description: "Trigger build every 20 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/20 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 30 minutes"
description: "Trigger build every 30 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/30 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every Hour"
description: "Trigger build every hour"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "37 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 2 Hours"
description: "Trigger build every 2 hours"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "23 */2 * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 10 Minutes"
description: "Trigger build every 10 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/10 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 5 Minutes #1"
description: "Trigger build every 5 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/5 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 5 Minutes #2"
description: "Trigger build every 5 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/5 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 5 Minutes #3"
description: "Trigger build every 5 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/5 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 5 Minutes #4"
description: "Trigger build every 5 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/5 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 5 Minutes #5"
description: "Trigger build every 5 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/5 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 1 Minutes #1"
description: "Trigger build every 1 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/1 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 1 Minutes #2"
description: "Trigger build every 1 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/1 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 1 Minutes #3"
description: "Trigger build every 1 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/1 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 1 Minutes #4"
description: "Trigger build every 1 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/1 * * * *"
timezone: "US/Pacific"
---
type: "policy"
version: 1
name: "Argo Stress Every 1 Minutes #5"
description: "Trigger build every 1 minutes"
template: "Argo CI"
arguments:
parameters.NAMESPACE: "stresstest"
parameters.VERSION_OPTION: "-v test"
parameters.BUILD_OPTIONS: "--no-push"
when:
- event: "on_cron"
schedule: "*/1 * * * *"
timezone: "US/Pacific"

View File

@ -1,20 +0,0 @@
---
type: workflow
version: 1
name: argo-aws-s3-regional-test
description: Test S3 wrapper against real aws server in a randomly choosen region
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- checkout:
template: argo-checkout
- aws-s3:
template: argo-platform-unit-test-base
arguments:
artifacts.CODE: "%%steps.checkout.outputs.artifacts.CODE%%"
parameters.COMMAND: "pytest -vv /src/platform/tests/aws_s3/"

View File

@ -1,71 +0,0 @@
---
type: container
version: 1
name: noop-container
description: Container which does nothing but sleep for 2 minutes
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 2 minutes' ; sleep 120; echo 'done'"]
---
type: container
version: 1
name: test-container-with-input-parameter
description: Container which has a required input parameter
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for %%inputs.parameters.SLEEP%% seconds' ; sleep %%inputs.parameters.SLEEP%%; echo 'done'"]
inputs:
parameters:
SLEEP:
---
type: container
version: 1
name: test-container-with-default-input-parameter
description: Container which has a input parameter with a default value
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for %%inputs.parameters.SLEEP%% seconds' ; sleep %%inputs.parameters.SLEEP%%; echo 'done'"]
inputs:
parameters:
SLEEP:
default: 60
---
type: container
version: 1
name: test-container-with-env-var
description: Container expects an environment variable to be set
image: alpine:latest
command: ["sh", "-c"]
args: ['env && [ "$FOO" == "bar" ]']
env:
- name: FOO
value: "%%inputs.parameters.FOO_VAL%%"
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
FOO_VAL:
default: bar
---
type: container
version: 1
name: test-container-resource-parameterization
description: Container which parameterizes resource requests
inputs:
parameters:
CPU_CORES:
default: "0.1"
MEM_MIB:
default: "32"
image: alpine:latest
command: ["sh", "-c"]
args: ["sleep 300"]
resources:
cpu_cores: "%%inputs.parameters.CPU_CORES%%"
mem_mib: "%%inputs.parameters.MEM_MIB%%"

View File

@ -1,10 +0,0 @@
---
type: container
version: 1
name: container-with-input-volume
description: Container which accepts an input volume
image: httpd:latest
inputs:
volumes:
DATA:
mount_path: /data

View File

@ -1,87 +0,0 @@
---
type: workflow
version: 1
name: test-deployment-with-artifacts-launcher
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- PRODUCE:
image: alpine:latest
command: [sh, -c]
args: ["echo 'sleeping for 20 seconds' && sleep 20 && echo 'done'"]
outputs:
artifacts:
DATA:
path: /bin
- DEP_WITH_ART:
template: test-deployment-with-input-artifact
arguments:
artifacts.DATA: "%%steps.PRODUCE.outputs.artifacts.DATA%%"
DEP_WITH_ART_INLINED:
template: test-deployment-with-input-artifact-inlined
arguments:
artifacts.DATA: "%%steps.PRODUCE.outputs.artifacts.DATA%%"
---
type: deployment
version: 1
name: test-deployment-with-input-artifact
inputs:
artifacts:
DATA:
application_name: test-app
deployment_name: dep-with-input-art
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
containers:
WEB:
template: test-nginx-with-artifact
---
type: container
version: 1
name: test-nginx-with-artifact
inputs:
artifacts:
DATA:
path: /data
image: nginx:latest
command: ["sh", "-c"]
args: ["find /data && nginx -g 'daemon off;'"]
resources:
cpu_cores: 0.05
mem_mib: 32
---
type: deployment
version: 1
name: test-deployment-with-input-artifact-inlined
inputs:
artifacts:
DATA:
application_name: test-app
deployment_name: dep-with-input-art-inlined
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
containers:
WEB:
inputs:
artifacts:
DATA:
from: "%%inputs.artifacts.DATA%%"
path: /data
image: nginx:latest
command: ["sh", "-c"]
args: ["find /data && nginx -g 'daemon off;'"]
resources:
cpu_cores: 0.05
mem_mib: 32

View File

@ -1,46 +0,0 @@
---
type: deployment
version: 1
name: test-deployment-basic
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
application_name: test-app
deployment_name: dep-basic
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
containers:
WEB:
template: nginx-server
---
type: container
version: 1
name: nginx-server
image: nginx:latest
---
type: deployment
version: 1
name: test-deployment-basic-inlined
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
application_name: test-app
deployment_name: dep-basic-inlined
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
containers:
WEB:
image: nginx:latest

View File

@ -1,88 +0,0 @@
---
type: deployment
version: 1
name: test-deployment-with-fixtures
description: Deployment which requests a fixture
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
application_name: test-app
deployment_name: dep-with-fixtures
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
fixtures:
- fix1:
class: test-fixture
attributes:
instance_type: m3.large
fix2:
class: test-fixture
attributes:
group: prod
containers:
WEB:
template: test-container-with-fixture
arguments:
parameters.FIXTURE_NAME: "%%fixtures.fix1.name%%"
parameters.FIXTURE_MEM: "%%fixtures.fix2.memory_gib%%"
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
---
type: container
version: 1
name: test-container-with-fixture
inputs:
parameters:
FIXTURE_NAME:
FIXTURE_MEM:
image: nginx:latest
command: ["sh", "-c"]
args: ["echo %%inputs.parameters.FIXTURE_NAME%% %%inputs.parameters.FIXTURE_MEM%% && nginx -g 'daemon off;'"]
resources:
cpu_cores: 0.05
mem_mib: 32
---
type: deployment
version: 1
name: test-deployment-with-fixtures-inlined
description: Deployment which requests a fixture (inlined)
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
application_name: test-app
deployment_name: dep-with-fixtures-inlined
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
fixtures:
- fix1:
class: test-fixture
attributes:
instance_type: m3.large
fix2:
class: test-fixture
attributes:
group: prod
containers:
WEB:
image: nginx:latest
command: ["sh", "-c"]
args: ["echo %%fixtures.fix1.name%% %%fixtures.fix2.memory_gib%% && nginx -g 'daemon off;'"]
resources:
cpu_cores: 0.05
mem_mib: 32

View File

@ -1,102 +0,0 @@
---
type: deployment
version: 1
name: test-deployment-with-named-volume
description: Deployment which requests a named volume
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
VOLUME_NAME:
default: my-test-vol
application_name: test-app
deployment_name: dep-with-named-vol
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
volumes:
data:
name: "%%inputs.parameters.VOLUME_NAME%%"
containers:
WEB:
template: test-container-with-volume
arguments:
volumes.DATA: "%%volumes.data%%"
---
type: container
version: 1
name: test-container-with-volume
image: nginx:latest
resources:
cpu_cores: 0.05
mem_mib: 32
inputs:
volumes:
DATA:
mount_path: /data
---
type: deployment
version: 1
name: test-deployment-with-named-volume-inlined
description: Deployment which requests a named volume (inlined)
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
VOLUME_NAME:
default: my-test-vol
application_name: test-app
deployment_name: dep-with-named-vol-inlined
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
volumes:
data:
name: "%%inputs.parameters.VOLUME_NAME%%"
containers:
WEB:
image: nginx:latest
inputs:
volumes:
DATA:
from: "%%volumes.data%%"
mount_path: /data
resources:
cpu_cores: 0.05
mem_mib: 32
---
type: deployment
version: 1
name: test-deployment-with-anon-volume
description: Deployment which requests an anonymous volume
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
application_name: test-app
deployment_name: dep-with-anon-vol
external_routes:
- target_port: 80
ip_white_list:
- 0.0.0.0/0
volumes:
data:
storage_class: ssd
size_gb: 1
containers:
WEB:
template: test-container-with-volume
arguments:
volumes.DATA: "%%volumes.data%%"

View File

@ -1,26 +0,0 @@
---
type: workflow
version: 1
name: dind-test-wf
description: This is the workflow for testing dind
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- dind-test-step:
template: dind-test-container
---
type: container
version: 1
name: dind-test-container
description: dind testing
image: docker:latest
command: ["sh", "-c", "sleep 600"]
resources:
mem_mib: 300
cpu_cores: 0.1
annotations:
ax_ea_docker_enable: '{"graph-storage-size": "10Gi", "cpu_cores":0.1, "mem_mib":200}'

View File

@ -1,148 +0,0 @@
---
type: workflow
version: 1
name: test-fixtures-dynamic
description: Workflow which utilizes dynamic fixtures
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
fixtures:
- NGINX:
template: test-nginx
steps:
- WEB-CLIENT:
template: test-web-client
arguments:
parameters.URL: "%%fixtures.NGINX.ip%%"
WEB-CLIENT-INLINED:
image: alpine:latest
command: ["sh", "-c"]
args: ["
apk update &&
apk add curl &&
for i in $(seq 1 100); do
curl http://%%fixtures.NGINX.ip%% && echo success! && exit 0;
sleep 3;
done;
echo failed!;
exit 1
"]
resources:
cpu_cores: 0.05
mem_mib: 64
---
type: container
version: 1
name: test-nginx
image: nginx:latest
resources:
cpu_cores: 0.05
mem_mib: 64
---
type: container
version: 1
name: test-web-client
image: alpine:latest
inputs:
parameters:
URL:
command: ["sh", "-c"]
args: ["
apk update &&
apk add curl &&
for i in $(seq 1 100); do
curl http://%%inputs.parameters.URL%% && echo success! && exit 0;
sleep 3;
done;
echo failed!;
exit 1
"]
resources:
cpu_cores: 0.05
mem_mib: 64
---
type: workflow
version: 1
name: test-fixtures-dynamic-outputs
description: Workflow which exports artifacts produced by dynamic fixtures
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
fixtures:
- DYN_FIX_WITH_OUTPUTS:
template: test-dynamic-fixture-container-with-outputs
steps:
- SLEEPER:
image: alpine:latest
command: [sh, -c]
args: ["sleep 60"]
resources:
cpu_cores: 0.05
mem_mib: 64
outputs:
artifacts:
WF_OUTPUTS:
from: "%%fixtures.DYN_FIX_WITH_OUTPUTS.outputs.artifacts.BIN-DIR%%"
---
type: container
version: 1
name: test-dynamic-fixture-container-with-outputs
image: alpine:latest
command: [sh, -c]
args: ["sleep 999999"]
resources:
cpu_cores: 0.05
mem_mib: 64
outputs:
artifacts:
BIN-DIR:
path: /bin
---
type: workflow
version: 1
name: test-fixtures-dynamic-inputs
description: Workflow which supplies input parameters to dynamic fixtures
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
ECHO:
default: hello world
fixtures:
- DYN_FIX_WITH_INPUTS:
template: test-dynamic-fixture-container-with-inputs
steps:
- SLEEPER:
image: alpine:latest
command: [sh, -c]
args: ["sleep 60"]
resources:
cpu_cores: 0.05
mem_mib: 64
---
type: container
version: 1
name: test-dynamic-fixture-container-with-inputs
image: alpine:latest
command: [sh, -c]
args: ["echo %%inputs.parameters.ECHO%%; sleep 999999"]
resources:
cpu_cores: 0.05
mem_mib: 64
inputs:
parameters:
ECHO:

View File

@ -1,146 +0,0 @@
---
type: fixture
version: 1
name: test-fixture
description: fixture for test purposes
attributes:
instance_type:
type: string
flags: required
options:
- m3.medium
- m3.large
- m3.xlarge
- m3.2xlarge
default: m3.large
memory_gib:
type: int
default: 4
cpu_cores:
type: int
default: 1
options: [1, 2, 4, 8]
ip_address:
type: string
group:
type: string
flags: required
options: [dev, qa, prod]
tags:
type: string
flags: array
disable_nightly:
type: bool
actions:
create:
template: test-fixture-action
arguments:
parameters.INSTANCE_TYPE: "%%attributes.instance_type%%"
parameters.ACTION: create
delete:
template: test-fixture-action
arguments:
parameters.ACTION: delete
suspend:
template: test-fixture-action
arguments:
parameters.ACTION: suspend
on_success: disable
resume:
template: test-fixture-action
arguments:
parameters.ACTION: resume
on_success: enable
upgrade:
template: test-fixture-action
arguments:
parameters.ACTION: upgrade
parameters.UPGRADE_VERSION:
health_check_fail:
template: test-fixture-action
arguments:
parameters.ACTION: fail
on_failure: disable
bad_attributes:
template: test-fixture-action
arguments:
parameters.ACTION: success
parameters.ATTRIBUTES: "{\"memory_gib\": \"foo\"}"
snapshot:
template: test-fixture-action
arguments:
parameters.ACTION: snapshot
---
type: container
version: 1
name: test-fixture-action
resources:
mem_mib: 64
cpu_cores: 0.02
image: debian:8.5
command: ["sh", "-c"]
args: ["echo 'performing action %%inputs.parameters.ACTION%% instance_type: %%inputs.parameters.INSTANCE_TYPE%%'; sleep 30; echo '%%inputs.parameters.ATTRIBUTES%%' > /tmp/fix_attrs.json; if [ %%inputs.parameters.ACTION%% = fail ] ; then exit 1; fi"]
inputs:
parameters:
ACTION:
INSTANCE_TYPE:
default: ""
ATTRIBUTES:
default: "{\"ip_address\": \"1.2.3.4\"}"
UPGRADE_VERSION:
default: ""
outputs:
artifacts:
attributes:
path: /tmp/fix_attrs.json
---
type: workflow
version: 1
name: test-workflow-fixture-request
description: Workflow which will utilize a fixture
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
fixtures:
- fix1:
class: test-fixture
attributes:
instance_type: m3.large
fix2:
class: test-fixture
attributes:
group: qa
steps:
- SLEEP:
template: echo-and-sleep
arguments:
parameters.MESSAGE: "%%fixtures.fix1.name%% %%fixtures.fix1.instance_type%% %%fixtures.fix2.name%% %%fixtures.fix2.group%%"
SLEEP-INLINE:
image: alpine:latest
command: [sh, -c]
args: ['echo %%fixtures.fix1.name%% %%fixtures.fix1.instance_type%% %%fixtures.fix2.name%% %%fixtures.fix2.group%% && sleep 60']
resources:
mem_mib: 64
cpu_cores: 0.02
---
type: container
version: 1
name: echo-and-sleep
description: Does nothing but echo a supplied string and sleep
image: alpine:latest
command: [sh, -c]
args: ['echo %%inputs.parameters.MESSAGE%% && sleep %%inputs.parameters.SLEEP_SEC%%']
inputs:
parameters:
MESSAGE:
SLEEP_SEC:
default: 60
resources:
mem_mib: 64
cpu_cores: 0.02

View File

@ -1,31 +0,0 @@
---
type: fixture
version: 1
name: test-fixture-no-actions
description: fixture with no actions
attributes:
instance_type:
type: string
flags: required
options:
- m3.medium
- m3.large
- m3.xlarge
- m3.2xlarge
default: m3.large
memory_gib:
type: int
default: 4
cpu_cores:
type: int
default: 1
options: [1, 2, 4, 8]
ip_address:
type: string
group:
type: string
flags: required
options: [dev, qa, prod]
tags:
type: string
flags: array

View File

@ -1,9 +0,0 @@
---
type: container
version: 1
name: hello-world-commands
description: hello world by commands
image: ubuntu
command: ["/bin/echo", "hello"]
args: ["world", "!"]

View File

@ -1,223 +0,0 @@
type: container
version: 1
name: quick-exit
description: does nothing and exits quickly
resources:
mem_mib: 32
cpu_cores: 0.01
image: alpine:latest
command: ["sh", "-c", "exit 0"]
---
type: workflow
version: 1
name: pod-throughput-test
description: Test for checking pod creation and deletion overhead which can be used for getting pod througput
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- step0:
template: quick-exit
step1:
template: quick-exit
step2:
template: quick-exit
step3:
template: quick-exit
step4:
template: quick-exit
step5:
template: quick-exit
step6:
template: quick-exit
step7:
template: quick-exit
step8:
template: quick-exit
step9:
template: quick-exit
step10:
template: quick-exit
step11:
template: quick-exit
step12:
template: quick-exit
step13:
template: quick-exit
step14:
template: quick-exit
step15:
template: quick-exit
step16:
template: quick-exit
step17:
template: quick-exit
step18:
template: quick-exit
step19:
template: quick-exit
step20:
template: quick-exit
step21:
template: quick-exit
step22:
template: quick-exit
step23:
template: quick-exit
step24:
template: quick-exit
step25:
template: quick-exit
step26:
template: quick-exit
step27:
template: quick-exit
step28:
template: quick-exit
step29:
template: quick-exit
step30:
template: quick-exit
step31:
template: quick-exit
step32:
template: quick-exit
step33:
template: quick-exit
step34:
template: quick-exit
step35:
template: quick-exit
step36:
template: quick-exit
step37:
template: quick-exit
step38:
template: quick-exit
step39:
template: quick-exit
step40:
template: quick-exit
step41:
template: quick-exit
step42:
template: quick-exit
step43:
template: quick-exit
step44:
template: quick-exit
step45:
template: quick-exit
step46:
template: quick-exit
step47:
template: quick-exit
step48:
template: quick-exit
step49:
template: quick-exit
step50:
template: quick-exit
step51:
template: quick-exit
step52:
template: quick-exit
step53:
template: quick-exit
step54:
template: quick-exit
step55:
template: quick-exit
step56:
template: quick-exit
step57:
template: quick-exit
step58:
template: quick-exit
step59:
template: quick-exit
step60:
template: quick-exit
step61:
template: quick-exit
step62:
template: quick-exit
step63:
template: quick-exit
step64:
template: quick-exit
step65:
template: quick-exit
step66:
template: quick-exit
step67:
template: quick-exit
step68:
template: quick-exit
step69:
template: quick-exit
step70:
template: quick-exit
step71:
template: quick-exit
step72:
template: quick-exit
step73:
template: quick-exit
step74:
template: quick-exit
step75:
template: quick-exit
step76:
template: quick-exit
step77:
template: quick-exit
step78:
template: quick-exit
step79:
template: quick-exit
step80:
template: quick-exit
step81:
template: quick-exit
step82:
template: quick-exit
step83:
template: quick-exit
step84:
template: quick-exit
step85:
template: quick-exit
step86:
template: quick-exit
step87:
template: quick-exit
step88:
template: quick-exit
step89:
template: quick-exit
step90:
template: quick-exit
step91:
template: quick-exit
step92:
template: quick-exit
step93:
template: quick-exit
step94:
template: quick-exit
step95:
template: quick-exit
step96:
template: quick-exit
step97:
template: quick-exit
step98:
template: quick-exit
step99:
template: quick-exit

View File

@ -1,39 +0,0 @@
---
type: policy
version: 1
name: test-policy
description: Policy for test purposes (performs checkout)
template: test-policy-checkout
notifications:
- when:
- on_failure
whom:
- committer
- author
- prod@slack
when:
- event: on_push
- event: on_pull_request
- event: on_pull_request_merge
---
type: container
version: 1
name: test-policy-checkout
description: Checks out a source repository to /src
resources:
mem_mib: 500
cpu_cores: 0.1
image: argoproj/argoscm:v2.0
command: ["axscm"]
args: ["clone", "%%inputs.parameters.REPO%%", "/src", "--commit", "%%inputs.parameters.COMMIT%%"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
outputs:
artifacts:
CODE:
path: /src

View File

@ -1,107 +0,0 @@
---
type: "workflow"
version: 1
name: "AX Workflow Stress Test"
description: "AX Workflow Stress Test"
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STRESS_TEST_1:
template: "ax-workflow-stress-test-step-1"
- STRESS_TEST_2:
template: "ax-workflow-stress-test-step-2"
- STRESS_TEST_3:
template: "ax-workflow-stress-test-step-3"
- STRESS_TEST_4:
template: "ax-workflow-stress-test-step-4"
---
type: "container"
version: 1
name: "ax-workflow-stress-test-step-1"
description: "ax workflow stess test step"
resources:
mem_mib: 400
cpu_cores: 0.4
image: "ubuntu:latest"
command: ["sh"]
args: ["-c", "sleep 300"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
---
type: "container"
version: 1
name: "ax-workflow-stress-test-step-2"
description: "ax workflow stess test step"
resources:
mem_mib: 300
cpu_cores: 0.3
image: "ubuntu:latest"
command: ["sh"]
args: ["-c", "sleep 300"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
---
type: "container"
version: 1
name: "ax-workflow-stress-test-step-3"
description: "ax workflow stess test step"
resources:
mem_mib: 200
cpu_cores: 0.2
image: "ubuntu:latest"
command: ["sh"]
args: ["-c", "sleep 300"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
---
type: "container"
version: 1
name: "ax-workflow-stress-test-step-4"
description: "ax workflow stess test step"
resources:
mem_mib: 100
cpu_cores: 0.1
image: "ubuntu:latest"
command: ["sh"]
args: ["-c", "sleep 300"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
---
type: policy
version: 1
name: AX Workflow Stress Test Policy
description: Policy to test stress workflows to overload kubenetes
template: AX Workflow Stress Test
when:
-
event: on_cron
schedule: "* * * * *"
timezone: "US/Pacific"

View File

@ -1,32 +0,0 @@
---
type: workflow
version: 1
name: Argo Approval
description: This is a test workflow for Argo Approval
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
NAMESPACE:
default: axsys
VERSION:
default: staging
REQUIRED_APPROVALS:
default: ""
OPTIONAL_APPROVALS:
default: " "
NUMBER_OF_OPTIONAL_APPROVALS_NEEDED:
default: "0"
TIMEOUT_IN_MINUTES:
default: "0"
steps:
- CHECKOUT:
template: argo-checkout
- ARGO-APPROVAL:
template: argo-approval
- ARGO-TEST:
template: argo-test
arguments:
artifacts.CODE: "%%steps.CHECKOUT.outputs.artifacts.CODE%%"

View File

@ -1,68 +0,0 @@
---
type: workflow
version: 1
name: test-secret-management-workflow
description: This is the workflow to test whether secret encrytion works
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
MYPASSWORD:
default: "%%config.admin@internal.testprivate.password%%"
MYTOKEN:
default: "%%config.admin@internal.testprivate.token%%"
MYUSERNAME:
default: "%%config.admin@internal.testpublic.username%%"
steps:
- SECRET-MANAGEMENT:
template: test-secret-management
---
type: container
version: 1
name: test-secret-management
description: Test whether secret substitution works properly in various places
inputs:
parameters:
MYPASSWORD:
MYUSERNAME:
MYTOKEN:
image: alpine:latest
env:
- name: MYSECRETPASSWORD1
value: "%%inputs.parameters.MYPASSWORD%%"
- name: MYSECRETPASSWORD2
value: "%%config.admin@internal.testprivate.password%%"
- name: MYSECRETTOKEN1
value: "%%inputs.parameters.MYTOKEN%%"
- name: MYSECRETTOKEN2
value: "%%config.admin@internal.testprivate.token%%"
- name: MYPUBLICUSERNAME1
value: "%%inputs.parameters.MYUSERNAME%%"
- name: MYPUBLICUSERNAME2
value: "%%config.admin@internal.testpublic.username%%"
command: [sh, -c]
args: ["
env &&
echo %%config.admin@internal.testprivate.password%% &&
echo %%config.admin@internal.testprivate.token%% &&
echo %%config.admin@internal.testprivate.password%%%%config.admin@internal.testprivate.token%% &&
test $MYSECRETPASSWORD1 = abc123 &&
test $MYSECRETPASSWORD2 = abc123 &&
test %%inputs.parameters.MYPASSWORD%% = abc123 &&
test %%config.admin@internal.testprivate.password%% = abc123 &&
test $MYSECRETTOKEN1 = xyz456 &&
test $MYSECRETTOKEN1 = xyz456 &&
test %%inputs.parameters.MYTOKEN%% = xyz456 &&
test %%config.admin@internal.testprivate.token%% = xyz456 &&
test %%config.admin@internal.testprivate.password%%%%config.admin@internal.testprivate.token%% = abc123xyz456 &&
test $MYPUBLICUSERNAME1 = user &&
test $MYPUBLICUSERNAME2 = user &&
test %%inputs.parameters.MYUSERNAME%% = user &&
test %%config.admin@internal.testpublic.username%% = user
"]
resources:
mem_mib: 32
cpu_cores: 0.05

View File

@ -1,67 +0,0 @@
---
type: container
version: 1
name: test-container-with-output-artifact-a
description: Container which produces an output artifact
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 20 seconds' ; sleep 20; echo 'done'"]
outputs:
artifacts:
BIN-OUTPUT:
path: /bin
---
type: container
version: 1
name: test-container-with-input-artifact-a
description: Container which accepts an input artifact
image: alpine:latest
command: ["sh", "-c"]
args: ["find /root/bin; echo 'sleeping for 20 seconds'; sleep 20; echo 'done'"]
inputs:
artifacts:
BIN-INPUT:
path: /root/bin
---
type: workflow
version: 1
name: test-workflow-passing-artifacts-a
description: Basic workflow with artifacts between steps
steps:
- STEP1:
template: test-container-with-output-artifact-a
- STEP2:
template: test-container-with-input-artifact-a
arguments:
artifacts.BIN-INPUT: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
outputs:
artifacts:
BIN-OUTPUT:
from: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
---
type: workflow
version: 1
name: test-workflow-passing-artifacts-outer-a
description: Basic workflow using artifact from a workflow
steps:
- STEP1:
template: test-container-with-output-artifact-a
- STEP2:
template: test-container-with-input-artifact-a
arguments:
artifacts.BIN-INPUT: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
- STEP3:
template: test-workflow-passing-artifacts-a
- STEP4:
template: test-container-with-input-artifact
arguments:
artifacts.BIN-INPUT: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
outputs:
artifacts:
BIN-OUTPUT1:
from: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
BIN-OUTPUT2:
from: "%%steps.STEP3.outputs.artifacts.BIN-OUTPUT%%"

View File

@ -1,148 +0,0 @@
---
type: container
version: 1
name: test-container-with-output-artifact
description: Container which produces an output artifact
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 20 seconds' && sleep 20 && echo 'done'"]
outputs:
artifacts:
BIN-OUTPUT:
path: /bin
---
type: container
version: 1
name: test-container-with-input-artifact
description: Container which accepts an input artifact
image: alpine:latest
command: ["sh", "-c"]
args: ["find /root/bin && echo 'sleeping for 20 seconds' && sleep 20 && echo 'done'"]
inputs:
artifacts:
BIN-INPUT:
path: /root/bin
---
type: workflow
version: 1
name: test-workflow-passing-artifacts
description: Basic workflow with artifacts between steps
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STEP1:
template: test-container-with-output-artifact
- STEP2:
template: test-container-with-input-artifact
arguments:
artifacts.BIN-INPUT: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
---
type: workflow
version: 1
name: test-workflow-passing-artifacts-inlined
description: Basic workflow with artifacts between steps
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STEP1:
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 20 seconds' && sleep 20 && echo 'done'"]
outputs:
artifacts:
BIN-OUTPUT:
path: /bin
- STEP2:
image: alpine:latest
command: ["sh", "-c"]
args: ["find /root/bin && echo 'sleeping for 20 seconds' && sleep 20 && echo 'done'"]
inputs:
artifacts:
BIN-INPUT:
from: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
path: /root/bin
---
type: workflow
version: 1
name: test-workflow-artifact-tag-export
description: Export an artifact and add tag
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- GENERATE:
template: argo-checkout
outputs:
artifacts:
CODE:
from: "%%steps.GENERATE.outputs.artifacts.CODE%%"
artifact_tags:
- my_export_artifact
---
type: workflow
version: 1
name: test-workflow-artifact-tag-consume
description: Use an artifact exported by a top-level workflow
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- CONSUME:
template: test-container-consume-artifact
arguments:
artifacts.ARTIFACT_TO_CONSUME: "%%artifacts.tag.my_export_artifact.CODE%%"
CONSUME_INLINE:
image: alpine:latest
command: ["sh", "-c"]
args: ["find /src && sleep 5"]
inputs:
artifacts:
ARTIFACT_TO_CONSUME:
from: "%%artifacts.tag.my_export_artifact.CODE%%"
path: /src
---
type: container
version: 1
name: test-container-consume-artifact
description: Container which consumes an artifact
image: alpine:latest
command: ["sh", "-c"]
args: ["find /src && sleep 5"]
inputs:
artifacts:
ARTIFACT_TO_CONSUME:
path: /src
---
type: container
version: 1
name: test-container-consume-artifact-tag
description: Container which consumes an artifact byt its tag
image: alpine:latest
command: ["sh", "-c"]
args: ["find /src && sleep 5"]
inputs:
artifacts:
ARTIFACT_TO_CONSUME:
from: "%%artifacts.tag.my_export_artifact.CODE%%"
path: /src

View File

@ -1,33 +0,0 @@
---
type: workflow
version: 1
name: test-basic-sequential-workflow
description: Basic workflow with two sequential steps
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STEP1:
template: noop-container
- STEP2:
template: noop-container
---
type: workflow
version: 1
name: test-basic-parallel-workflow
description: Basic workflow with two parallel steps
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STEP1A:
template: noop-container
STEP1B:
template: noop-container

View File

@ -1,44 +0,0 @@
---
type: workflow
version: 1
name: test-basic-workflow-inline
description: Basic workflow with inlined containers
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STEP1:
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 2 minutes' ; sleep 120; echo 'done'"]
- STEP2:
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 2 minutes' ; sleep 120; echo 'done'"]
---
type: workflow
version: 1
name: test-basic-workflow-inline-strange-names
description: Basic workflow with inlined containers with strange step names
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- with-CAPITAL-LETTERS:
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 2 minutes' ; sleep 120; echo 'done'"]
12345:
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 2 minutes' ; sleep 120; echo 'done'"]
letters-endingwith-:
image: alpine:latest
command: ["sh", "-c"]
args: ["echo 'sleeping for 2 minutes' ; sleep 120; echo 'done'"]

View File

@ -1,10 +0,0 @@
---
type: workflow
version: 1
name: test-workflow-with-list-expansion
description: Workflow with list expansion
steps:
- STEP1:
template: echo-container
arguments:
parameters.MESSAGE: "$${a,b,c,d}$$"

View File

@ -1,43 +0,0 @@
---
type: workflow
version: 1
name: test-workflow-with-output-artifact
description: Workflow with an output artifact
steps:
- STEP1:
template: test-container-with-output-artifact
outputs:
artifacts:
BIN-OUTPUT:
from: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"
---
type: workflow
version: 1
name: test-workflow-with-input-artifact
description: Container which accepts an input artifact
steps:
- STEP1:
template: test-container-with-input-artifact
inputs:
artifacts:
BIN-INPUT:
---
type: workflow
version: 1
name: test-workflow-nested-artifacts
description: Workflow which passes output from a workflow as an input to another workflow
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STEP1:
template: test-workflow-with-output-artifact
- STEP2:
template: test-workflow-with-input-artifact
arguments:
artifacts.BIN-INPUT: "%%steps.STEP1.outputs.artifacts.BIN-OUTPUT%%"

View File

@ -1,16 +0,0 @@
---
type: workflow
version: 1
name: test-nested-workflow-basic
description: Workflow which nests two other workflows
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
steps:
- STEP1:
template: test-basic-sequential-workflow
- STEP2:
template: test-basic-parallel-workflow

View File

@ -1,38 +0,0 @@
---
type: container
version: 1
name: echo-container
description: Container which echos the supplied message and sleep for a default time
image: alpine:latest
command: ["sh", "-c"]
args: ["echo '%%inputs.parameters.MESSAGE%%'; sleep %%inputs.parameters.SLEEP%%"]
inputs:
parameters:
MESSAGE:
SLEEP:
default: 60
---
type: workflow
version: 1
name: test-workflow-with-child-argument
description: Basic workflow with artifacts between steps
steps:
- STEP1:
template: echo-container
arguments:
parameters.MESSAGE: "hello world"
---
type: workflow
version: 1
name: test-workflow-with-input-parameter
description: Basic workflow passing input parameter to child
inputs:
parameters:
ECHO:
steps:
- STEP1:
template: echo-container
arguments:
parameters.MESSAGE: "%%inputs.parameters.ECHO%%"

View File

@ -1,77 +0,0 @@
---
type: container
version: 1
name: volume-no-op
description: Container which mounts two volumes
resources:
mem_mib: 64
cpu_cores: 0.1
image: alpine:latest
command: ["sh", "-c"]
args: ["ls /ax/data && ls /ax/log && echo 'sleeping' && sleep 600"]
inputs:
volumes:
DATA:
mount_path: /ax/data
LOG:
mount_path: /ax/log
---
type: workflow
version: 1
name: test-workflow-with-volume
description: Test workflow which requests a named and anonymous volume
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
VOLUME_NAME:
volumes:
named-data-vol:
name: "%%inputs.parameters.VOLUME_NAME%%"
anon-log-vol: # anonymous
storage_class: ssd
size_gb: 10
steps:
- test:
template: volume-no-op
arguments:
volumes.DATA: "%%volumes.named-data-vol%%"
volumes.LOG: "%%volumes.anon-log-vol%%"
---
type: workflow
version: 1
name: test-workflow-with-volume-inlined
description: Test workflow which requests a named and anonymous volume (inlined)
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
VOLUME_NAME:
volumes:
named-data-vol:
name: "%%inputs.parameters.VOLUME_NAME%%"
anon-log-vol: # anonymous
storage_class: ssd
size_gb: 10
steps:
- test:
resources:
mem_mib: 64
cpu_cores: 0.1
image: alpine:latest
command: ["sh", "-c"]
args: ["ls /ax/data && ls /ax/log && echo 'sleeping' && sleep 600"]
inputs:
volumes:
DATA:
from: "%%volumes.named-data-vol%%"
mount_path: /ax/data
LOG:
from: "%%volumes.anon-log-vol%%"
mount_path: /ax/log

View File

@ -1,101 +0,0 @@
---
type: workflow
version: 1
name: gui-proxy
description: "Workflow which builds and starts UI on existing cluster. Useful for testing UI changes without redeploying whole cluster."
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
APPNAME:
default: "%%session.branch%%"
DOMAIN:
default: " "
steps:
- checkout:
template: argo-checkout
- build:
template: gui-build
arguments:
artifacts.CODE: "%%steps.checkout.outputs.artifacts.CODE%%"
- deploy:
template: gui-proxy-server-deployment
arguments:
artifacts.CODE: "%%steps.build.outputs.artifacts.CODE%%"
---
type: container
version: 1
name: gui-build
inputs:
artifacts:
CODE:
path: /src
outputs:
artifacts:
CODE:
path: /src
image: node:6.3.1
command: ["bash", "-c"]
args: ["cd /src/saas/axops/src/ui && npm set progress=false && npm install --unsafe-perm && npm run build:prod"]
resources:
mem_mib: 1536
cpu_cores: 0.4
---
type: container
version: 1
name: gui-proxy-server
resources:
mem_mib: 2048
cpu_cores: 0.4
image: node:6.3.1
command: ["sh", "-c"]
args: ["cd /src/saas/axops/src/ui && AX_CLUSTER_HOST=axops.axsys npm run server:prod"]
inputs:
artifacts:
CODE:
path: "/src"
---
type: deployment
version: 1
name: gui-proxy-server-deployment
inputs:
parameters:
APPNAME:
DOMAIN:
artifacts:
CODE:
application_name: "%%inputs.parameters.APPNAME%%"
deployment_name: "argo-gui-proxy"
scale:
min: 1
external_routes:
- dns_prefix: "%%inputs.parameters.APPNAME%%"
dns_domain: "%%inputs.parameters.DOMAIN%%"
target_port: 3000
ip_white_list:
- 0.0.0.0/0
containers:
SERVER:
template: gui-proxy-server
---
type: policy
version: 1
name: gui-proxy-policy
template: gui-proxy
notifications:
-
when:
- on_change
whom:
- committer
- submitter
- author
when:
-
event: on_push

2
.clang-format Normal file
View File

@ -0,0 +1,2 @@
# Allow unlimited column length, rather than 80. This prevents word-wrapping comments, which end up in Swagger.
ColumnLimit: 0

17
.codecov.yml Normal file
View File

@ -0,0 +1,17 @@
ignore:
- "**/*.pb.go"
- "**/*.pb.gw.go"
- "**/*generated.go"
- "**/*generated.deepcopy.go"
- "**/*_test.go"
- "pkg/apis/client/.*"
- "pkg/client/.*"
- "vendor/.*"
coverage:
status:
# we've found this not to be useful
patch: off
project:
default:
# allow test coverage to drop by 2%, assume that it's typically due to CI problems
threshold: 2

View File

@ -0,0 +1,24 @@
# TODO: Upgrade to Ubuntu 24.04 when https://bugs.launchpad.net/ubuntu/+source/curl/+bug/2073448 is addressed
FROM mcr.microsoft.com/vscode/devcontainers/base:ubuntu-22.04
# k3d version: https://github.com/k3d-io/k3d/releases
ARG K3D_VERSION=v5.8.3
# TARGETARCH is automatically set by BuildKit to the architecture (e.g. "amd64" or "arm64")
# Docs: https://docs.docker.com/reference/dockerfile/#automatic-platform-args-in-the-global-scope
ARG TARGETARCH
# Use bash to allow us to source hack/k8s-versions.sh
SHELL ["/bin/bash", "-c"]
# install protocol buffer tools
RUN apt-get update && apt-get install -y protobuf-compiler clang-format
# install k3d
RUN wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG=${K3D_VERSION} bash
# install kubectl
COPY hack/k8s-versions.sh /tmp/
RUN . /tmp/k8s-versions.sh && \
wget -O /usr/local/bin/kubectl "https://dl.k8s.io/release/${K8S_VERSIONS[min]}/bin/linux/${TARGETARCH}/kubectl" && \
chmod +x /usr/local/bin/kubectl

View File

@ -0,0 +1,24 @@
{
"features": {
"ghcr.io/devcontainers/features/docker-in-docker:2": {
"version": "2.12.0",
"resolved": "ghcr.io/devcontainers/features/docker-in-docker@sha256:5f3e2005aad161ce3ff7700b2603f11935348c039f9166960efd050d69cd3014",
"integrity": "sha256:5f3e2005aad161ce3ff7700b2603f11935348c039f9166960efd050d69cd3014"
},
"ghcr.io/devcontainers/features/go:1": {
"version": "1.3.1",
"resolved": "ghcr.io/devcontainers/features/go@sha256:a485a757492868d4ee3b9dca0b9bb1cbeaef21763e7812a1a804f84720bc5ab5",
"integrity": "sha256:a485a757492868d4ee3b9dca0b9bb1cbeaef21763e7812a1a804f84720bc5ab5"
},
"ghcr.io/devcontainers/features/node:1": {
"version": "1.6.1",
"resolved": "ghcr.io/devcontainers/features/node@sha256:71590121aaf7b2040f3e1e2dfc4bb9a1389277fd5a88a7199094542b82ce5340",
"integrity": "sha256:71590121aaf7b2040f3e1e2dfc4bb9a1389277fd5a88a7199094542b82ce5340"
},
"ghcr.io/devcontainers/features/python:1": {
"version": "1.7.0",
"resolved": "ghcr.io/devcontainers/features/python@sha256:8452f39db0852420728c9f7503dd94b3fc71aa558b5e7c8f6f9ce6687e494ae3",
"integrity": "sha256:8452f39db0852420728c9f7503dd94b3fc71aa558b5e7c8f6f9ce6687e494ae3"
}
}
}

View File

@ -0,0 +1,25 @@
{
// Dev container definition for building the image used by .devcontainer/devcontainer.json.
// Docs: https://containers.dev/guide/prebuild#how-to
"name": "Argo Workflows (builder)",
"build": {
"dockerfile": "Dockerfile",
"context": "../..",
// Override this hardcoded param from the devcontainer CLI because it bloats
// the image and we export the cache to a separate image:
// https://github.com/devcontainers/cli/blob/2fafdcc8a8dee5a922616325f3726043f1ea92c3/src/spec-node/singleContainer.ts#L208
"args": { "BUILDKIT_INLINE_CACHE": "0" }
},
"features": {
"ghcr.io/devcontainers/features/go:1": {
"version": "1.24"
},
"ghcr.io/devcontainers/features/node:1": {
"version": "20"
},
"ghcr.io/devcontainers/features/docker-in-docker:2": {},
"ghcr.io/devcontainers/features/python:1": {}
}
}

View File

@ -0,0 +1,56 @@
{
// Dev container definition. Reference: https://containers.dev/implementors/json_reference/
"name": "Argo Workflows (pre-built)",
// This image is built and pushed by .github/workflows/devcontainer.yaml using .devcontainer/builder/devcontainer.json
"image": "quay.io/argoproj/argo-workflows-devcontainer",
"forwardPorts": [9000, 9001, 9090, 2746, 8080, 5556, 6060, 9091, 3306, 5432, 10000, 8000],
"hostRequirements": {
"cpus": 4
},
"runArgs": [
"--add-host=host.docker.internal:host-gateway",
"--add-host=dex:127.0.0.1",
"--add-host=minio:127.0.0.1",
"--add-host=postgres:127.0.0.1",
"--add-host=mysql:127.0.0.1",
"--add-host=azurite:127.0.0.1"
],
"postCreateCommand": ".devcontainer/pre-build.sh",
"workspaceMount": "source=${localWorkspaceFolder},target=/home/vscode/go/src/github.com/argoproj/argo-workflows,type=bind",
"workspaceFolder": "/home/vscode/go/src/github.com/argoproj/argo-workflows",
"remoteEnv": {
"PATH": "${containerEnv:PATH}:/home/vscode/go/bin",
"GOPATH": "/home/vscode/go"
},
"customizations": {
"codespaces": {
"openFiles": [
"docs/running-locally.md"
]
},
"vscode": {
"settings": {
"launch": {
"configurations": [
{
"name": "Attach to argo server",
"type": "go",
"request": "attach",
"mode": "local",
"processId": "argo"
},
{
"name": "Attach to workflow controller",
"type": "go",
"request": "attach",
"mode": "local",
"processId": "workflow-controller"
}
]
}
}
}
}
}

16
.devcontainer/pre-build.sh Executable file
View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -eux
# create cluster using the minimum tested Kubernetes version
. hack/k8s-versions.sh
k3d cluster get k3s-default || k3d cluster create --image "rancher/k3s:${K8S_VERSIONS[min]}-k3s1" --wait
k3d kubeconfig merge --kubeconfig-merge-default
kubectl cluster-info
# Make sure go path is owned by vscode
sudo chown vscode:vscode /home/vscode/go || true
sudo chown vscode:vscode /home/vscode/go/src || true
sudo chown vscode:vscode /home/vscode/go/src/github.com || true
# Patch CoreDNS to have host.docker.internal inside the cluster available
kubectl get cm coredns -n kube-system -o yaml | sed "s/ NodeHosts: |/ NodeHosts: |\n `grep host.docker.internal /etc/hosts`/" | kubectl apply -f -

28
.dockerignore Normal file
View File

@ -0,0 +1,28 @@
# Prevent vendor directory from being copied to ensure we are not not pulling unexpected cruft from
# a user's workspace, and are only building off of what is locked by dep.
*.iml
*.md
*.yaml
.devcontainer
# The .git folder gets modified every time git runs preventing caching of builds. Once removed, we must pass GIT_COMMIT as Docker build-args.
.git
.github
.idea
.run
assets
community
coverage.out
dist
docs
examples
logs
manifests
plugins
sdks
site
tasks.yaml
test/e2e
ui/dist
ui/node_modules
v3
vendor

16
.features/TEMPLATE.md Normal file
View File

@ -0,0 +1,16 @@
<!-- Required: All of these fields are required, including at least one issue -->
Description: <!-- A brief one line description of the feature -->
Author: <!-- Author name and GitHub link in markdown format e.g. [Alan Clucas](https://github.com/Joibel) -->
Component: <!-- component name here, see hack/featuregen/components.go for the list -->
Issues: <!-- Space separated list of issues 1234 5678 -->
<!--
Optional
Additional details about the feature written in markdown, aimed at users who want to learn about it
* Explain when you would want to use the feature
* Include code examples if applicable
* Provide working examples
* Format code using back-ticks
* Use Kubernetes style
* One sentence per line of markdown
-->

View File

@ -0,0 +1,12 @@
Component: General
Issues: 14069
Description: Name filter parameter for prefix/contains/exact search in `/archived-workflows`
Author: [Armin Friedl](https://github.com/arminfriedl)
A new `nameFilter` parameter was added to the `GET
/archived-workflows` endpoint. The filter works analogous to the one
in `GET /workflows`. It allows to specify how a search for
`?listOptions.fieldSelector=metadata.name=<search-string>` in these
endpoints should be interpreted. Possible values are `Prefix`,
`Contains` and `Exact`. The `metadata.name` field is matched
accordingly against the value for `<search-string>`.

View File

@ -0,0 +1,9 @@
Component: General
Issues: 11120
Description: This migrates most of the logging off logrus and onto a custom logger.
Author: [Isitha Subasinghe](https://github.com/isubasinghe)
Currently it is quite hard to identify log lines with it's corresponding workflow.
This change propagates a context object down the call hierarchy containing an
annotated logging object. This allows context aware logging from deep within the
codebase.

View File

@ -0,0 +1,6 @@
Component: Build and Development
Issues: 14155
Description: Document features as they are created
Author: [Alan Clucas](https://github.com/Joibel)
To assist with creating release documentation and blog postings, all features now require a document in .features/pending explaining what they do for users.

View File

@ -0,0 +1,14 @@
Component: UI
Issues: 13114
Description: Support open custom links in new tab automatically.
Author: [Shuangkun Tian](https://github.com/shuangkun)
Support configuring a custom link to open in a new tab by default.
If target == _blank, open in new tab, if target is null or _self, open in this tab. For example:
```
- name: Pod Link
scope: pod
target: _blank
url: http://logging-facility?namespace=${metadata.namespace}&podName=${metadata.name}&startedAt=${status.startedAt}&finishedAt=${status.finishedAt}
```

7
.gitattributes vendored Normal file
View File

@ -0,0 +1,7 @@
sdks/python/client/** linguist-generated
sdks/java/client/** linguist-generated
manifests/base/crds/*/argoproj.io*.yaml linguist-generated
manifests/quick-start-*.yaml linguist-generated
api/jsonschema/schema.json linguist-generated
api/openapi-spec/swagger.json linguist-generated
pkg/client/** linguist-generated

61
.github/ISSUE_TEMPLATE/bug_report.yaml vendored Normal file
View File

@ -0,0 +1,61 @@
name: Reproducible bug report
description: Create a reproducible bug report. Not for support requests.
type: Bug
body:
- type: checkboxes
id: terms
attributes:
label: Pre-requisites
options:
- label: I have double-checked my configuration
required: true
- label: I have tested with the `:latest` image tag (i.e. `quay.io/argoproj/workflow-controller:latest`) and can confirm the issue still exists on `:latest`. If not, I have explained why, **in detail**, in my description below.
required: true
- label: I have searched existing issues and could not find a match for this bug
required: true
- label: I'd like to contribute the fix myself (see [contributing guide](https://github.com/argoproj/argo-workflows/blob/main/docs/CONTRIBUTING.md))
- type: textarea
id: description
attributes:
label: What happened? What did you expect to happen?
validations:
required: true
- type: input
id: version
attributes:
label: Version(s)
description: What versions did you experience this on? Please provide all versions. When testing `:latest`, please provide a SHA
placeholder: v3.5.7, v3.5.8, 3ece3b30f0c445204fec468fd437e77283cab913
validations:
required: true
- type: textarea
id: failing-workflow
attributes:
label: Paste a minimal workflow that reproduces the issue. We must be able to run the workflow; don't enter a workflow that uses private images.
description: A [minimal reproduction](https://stackoverflow.com/help/minimal-reproducible-example) is essential to debugging and prioritizing your issue
render: YAML
validations:
required: true
- type: textarea
id: controller-logs
attributes:
label: Logs from the workflow controller
render: text
value: kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
validations:
required: true
- type: textarea
id: wait-logs
attributes:
label: Logs from in your workflow's wait container
render: text
value: kubectl logs -n argo -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
validations:
required: true
- type: markdown
attributes:
value: |
Thanks for submitting this issue! Are you a contributor? If not, have you thought about it?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.

12
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,12 @@
blank_issues_enabled: false
contact_links:
- name: Have you read the docs?
url: https://argo-workflows.readthedocs.io/en/latest/
about: Much help can be found in the docs
- name: Ask a question
url: https://github.com/argoproj/argo-workflows/discussions/new
about: Ask a question or start a discussion about workflows
- name: Chat on Slack
url: https://argoproj.github.io/community/join-slack
about: Maybe chatting with the community can help

View File

@ -0,0 +1,27 @@
---
name: Feature
about: Propose a feature for this project
type: Feature
---
# Summary
What change needs making?
## Use Cases
When would you use this?
---
<!-- Issue Author: Don't delete this message to encourage other users to support your issue! -->
**Message from the maintainers**:
Love this feature request? Give it a 👍. We prioritise the proposals with the most 👍.
<!--
**Beyond this issue**:
Are you a contributor? If not, have you thought about it?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.
-->

64
.github/ISSUE_TEMPLATE/regression.yaml vendored Normal file
View File

@ -0,0 +1,64 @@
name: Regression report
description: Create a regression report. Not for support requests.
type: Bug
labels: [ type/regression ]
body:
- type: checkboxes
id: terms
attributes:
label: Pre-requisites
options:
- label: I have double-checked my configuration
required: true
- label: I have tested with the `:latest` image tag (i.e. `quay.io/argoproj/workflow-controller:latest`) and can confirm the issue still exists on `:latest`. If not, I have explained why, **in detail**, in my description below.
required: true
- label: I have searched existing issues and could not find a match for this bug
required: true
- label: I'd like to contribute the fix myself (see [contributing guide](https://github.com/argoproj/argo-workflows/blob/main/docs/CONTRIBUTING.md))
- type: textarea
id: description
attributes:
label: What happened? What did you expect to happen?
validations:
required: true
- type: input
id: version
attributes:
label: Version(s)
description: What versions did you experience this on? Please provide all versions. When testing `:latest`, please provide a SHA
placeholder: v3.5.7, v3.5.8, 3ece3b30f0c445204fec468fd437e77283cab913
validations:
required: true
- type: textarea
id: failing-workflow
attributes:
label: Paste a minimal workflow that reproduces the issue. We must be able to run the workflow; don't enter a workflows that uses private images.
description: A [minimal reproduction](https://stackoverflow.com/help/minimal-reproducible-example) is essential to debugging and prioritizing your issue
render: YAML
validations:
required: true
- type: textarea
id: controller-logs
attributes:
label: Logs from the workflow controller
render: text
value: kubectl logs -n argo deploy/workflow-controller | grep ${workflow}
validations:
required: true
- type: textarea
id: wait-logs
attributes:
label: Logs from in your workflow's wait container
render: text
value: kubectl logs -n argo -c wait -l workflows.argoproj.io/workflow=${workflow},workflow.argoproj.io/phase!=Succeeded
validations:
required: true
- type: markdown
attributes:
value: |
Thanks for submitting this issue! Are you a contributor? If not, have you thought about it?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.

2
.github/cherry-pick-bot.yml vendored Normal file
View File

@ -0,0 +1,2 @@
enabled: true
preservePullRequestTitle: true

87
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,87 @@
version: 2
updates:
# prod dependencies
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
day: "saturday"
ignore:
- dependency-name: k8s.io/*
- dependency-name: github.com/grpc-ecosystem/*
- dependency-name: google.golang.org/grpc
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- go
commit-message:
prefix: chore(deps)
- package-ecosystem: "npm"
directory: "/ui"
schedule:
interval: "weekly"
day: "saturday"
# split devDeps and prod deps as typically only prod deps need security backports
groups:
devDeps:
applies-to: security-updates
dependency-type: "development"
deps:
applies-to: security-updates
dependency-type: "production"
ignore:
- dependency-name: raw-loader
- dependency-name: style-loader
- dependency-name: react-router-dom
- dependency-name: "@types/react-router-dom"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- javascript
commit-message:
prefix: chore(deps)
prefix-development: chore(deps-dev)
# build / CI dependencies
- package-ecosystem: "pip"
directory: "/docs"
schedule:
interval: "weekly"
day: "saturday"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- python
commit-message:
prefix: chore(deps)
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
day: "saturday"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- github_actions
commit-message:
prefix: chore(deps-dev)
# Docs: https://containers.dev/guide/dependabot
- package-ecosystem: "devcontainers"
directory: "/"
schedule:
interval: "weekly"
day: "saturday"
# ignore all non-security updates: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#open-pull-requests-limit
open-pull-requests-limit: 0
labels:
- type/dependencies
- devcontainer
commit-message:
prefix: chore(deps-dev)

52
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,52 @@
<!-- markdownlint-disable MD041 -- this is rendered within existing HTML, so allow starting without an H1 -->
<!--
### Before you open your PR
- Run `make pre-commit -B` to fix codegen and lint problems (build will fail).
- [Signed-off your commits](https://github.com/apps/dco/) (otherwise the DCO check will fail).
- Used [a conventional commit message](https://www.conventionalcommits.org/en/v1.0.0/).
### When you open your PR
- PR title format should also conform to [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/).
- "Fixes #" is in both the PR title (for release notes) and this description (to automatically link and close the issue).
- Create the PR as draft.
- Once builds are green, mark your PR "Ready for review".
When changes are requested, please address them and then dismiss the review to get it reviewed again.
-->
<!-- Does this PR fix an issue -->
Fixes #TODO
### Motivation
<!-- TODO: Say why you made your changes. -->
### Modifications
<!-- TODO: Say what changes you made. -->
<!-- TODO: Attach screenshots if you changed the UI. -->
### Verification
<!-- TODO: Say how you tested your changes. -->
### Documentation
<!-- TODO: Say how you have updated the documentation or explain why this isn't needed here -->
<!-- Required for features: Explain how the user will discover this feature through documentation and examples -->
<!--
### Beyond this PR
Thank you for submitting this! Have you ever thought of becoming a Reviewer or Approver on the project?
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.
-->

34
.github/workflows/changelog.yaml vendored Normal file
View File

@ -0,0 +1,34 @@
name: Changelog
on:
push:
tags:
- v*
- "!v0.0.0"
permissions:
contents: read
jobs:
generate_changelog:
name: Generate changelog
if: github.repository == 'argoproj/argo-workflows'
permissions:
contents: write # for peter-evans/create-pull-request to create branch
pull-requests: write # for peter-evans/create-pull-request to create a PR
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
ref: main
fetch-depth: 0
- run: git fetch --prune --prune-tags
- run: git tag -l 'v*'
# avoid invoking `make` to reduce the risk of a Makefile bug failing this workflow
- run: ./hack/changelog.sh > CHANGELOG.md
- uses: peter-evans/create-pull-request@a4f52f8033a6168103c2538976c07b467e8163bc # v6.0.1
with:
title: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
commit-message: 'docs: update CHANGELOG.md for ${{ github.ref_name }}'
branch: create-pull-request/changelog
signoff: true

521
.github/workflows/ci-build.yaml vendored Normal file
View File

@ -0,0 +1,521 @@
name: CI
on:
push:
branches:
- "main"
- "release-*"
- "!release-2.8"
pull_request:
branches:
- "main"
- "release-*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
changed-files:
name: Get changed files
outputs:
# reference: https://github.com/tj-actions/changed-files#outputs-
tests: ${{ steps.changed-files.outputs.tests_any_modified == 'true' }}
e2e-tests: ${{ steps.changed-files.outputs.e2e-tests_any_modified == 'true' }}
codegen: ${{ steps.changed-files.outputs.codegen_any_modified == 'true' }}
lint: ${{ steps.changed-files.outputs.lint_any_modified == 'true' }}
ui: ${{ steps.changed-files.outputs.ui_any_modified == 'true' }}
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
fetch-depth: 50 # assume PRs are less than 50 commits
- name: Get relevant files changed per group
id: changed-files
uses: tj-actions/changed-files@cbda684547adc8c052d50711417fa61b428a9f88 # v41.1.2
with:
files_yaml: |
common: &common
- .github/workflows/ci-build.yaml
- Makefile
- tasks.yaml
tests: &tests
- *common
- cmd/**
- config/**
- errors/**
- persist/**
- pkg/**
- server/**
- test/**
- util/**
- workflow/**
- go.mod
- go.sum
e2e-tests:
- *tests
# plus manifests and SDKs that are used in E2E tests
- Dockerfile
- manifests/**
- sdks/**
# example test suite
- examples/**
codegen:
- *common
# generated files
- api/**
- docs/fields.md
- docs/executor_swagger.md
- docs/cli/**
- pkg/**
- sdks/java/**
- sdks/python/**
# files that generation is based off
- pkg/**
- cmd/**
- examples/** # examples are used within the fields lists
- manifests/** # a few of these are generated and committed
# generation scripts
- hack/api/**
- hack/docs/**
- hack/manifests/**
- .clang-format
lint:
- *tests
- .features/**
# plus lint config
- .golangci.yml
# all GH workflows / actions
- .github/workflows/**
# docs files below
- docs/**
# generated files are covered by codegen
- '!docs/fields.md'
- '!docs/executor_swagger.md'
- '!docs/cli/**'
# proposals live only on GH as pure markdown
- '!docs/proposals/**'
# docs scripts & tools from `make docs`
- hack/docs/copy-readme.sh
- hack/docs/check-env-doc.sh
- hack/featuregen/**
- .markdownlint.yaml
- .mlc_config.json
- .spelling
- mkdocs.yml
ui:
- *common
- ui/**
tests:
name: Unit Tests
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.tests == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 20
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- run: make test STATIC_FILES=false GOTEST='go test -p 20 -covermode=atomic -coverprofile=coverage.out'
- name: Upload coverage report
# engineers just ignore this in PRs, so lets not even run it
if: github.ref == 'refs/heads/main'
uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed # v4.3.0
with:
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
tests-windows:
name: Windows Unit Tests
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.tests == 'true' }}
runs-on: windows-2022
timeout-minutes: 20
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
# windows run does not use makefile target because it does a lot more than just testing and is not cross-platform compatible
- run: if (!(Test-Path "ui/dist/app/index.html")) { New-Item -ItemType Directory -Force -Path "ui/dist/app" | Out-Null; New-Item -ItemType File -Path "ui/dist/app/placeholder" | Out-Null }; go test -p 20 -covermode=atomic -coverprofile='coverage.out' $(go list ./... | select-string -Pattern 'github.com/argoproj/argo-workflows/v3/workflow/controller' , 'github.com/argoproj/argo-workflows/v3/server' -NotMatch)
env:
KUBECONFIG: /dev/null
- name: Upload coverage report
# engineers just ignore this in PRs, so lets not even run it
if: github.ref == 'refs/heads/main'
uses: codecov/codecov-action@84508663e988701840491b86de86b666e8a86bed # v4.3.0
with:
fail_ci_if_error: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
build-binaries:
name: Build Binaries
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.e2e-tests == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
include:
- target: controller
- target: cli
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Build
run: make ${{matrix.target}}
- name: Upload
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with:
name: ${{matrix.target}}
path: dist
if-no-files-found: error
argo-images:
name: argo-images
# needs: [ lint ]
runs-on: ubuntu-24.04
timeout-minutes: 10
strategy:
fail-fast: false
matrix:
include:
- image: argoexec
- image: argocli
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
- name: Build and export
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0
with:
context: .
tags: quay.io/argoproj/${{matrix.image}}:latest
outputs: type=docker,dest=/tmp/${{matrix.image}}_image.tar
target: ${{matrix.image}}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Upload
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with:
name: ${{matrix.image}}_image.tar
path: /tmp/${{matrix.image}}_image.tar
if-no-files-found: error
e2e-tests:
name: E2E Tests
needs: [ changed-files, argo-images, build-binaries ]
if: ${{ needs.changed-files.outputs.e2e-tests == 'true' }}
runs-on: ubuntu-24.04
# These tests usually finish in ~25m, but occasionally they take much longer due to resource
# contention on the runner, which we have no control over.
timeout-minutes: 60
env:
KUBECONFIG: /home/runner/.kubeconfig
E2E_ENV_FACTOR: 2
strategy:
fail-fast: false
matrix:
include:
- test: test-executor
profile: minimal
use-api: false
- test: test-corefunctional
profile: minimal
use-api: false
- test: test-functional
profile: minimal
use-api: false
- test: test-api
profile: mysql
use-api: true
- test: test-api
profile: postgres
use-api: true
- test: test-cli
profile: mysql
use-api: true
- test: test-cron
profile: minimal
use-api: false
- test: test-examples
profile: minimal
use-api: false
- test: test-plugins
profile: plugins
use-api: false
- test: test-java-sdk
profile: minimal
use-api: true
- test: test-python-sdk
profile: minimal
use-api: true
- test: test-executor
k8s_version: min
profile: minimal
use-api: false
- test: test-corefunctional
k8s_version: min
profile: minimal
use-api: false
- test: test-functional
k8s_version: min
profile: minimal
use-api: false
- test: test-dbsemaphore
k8s_version: min
profile: mysql
use-api: false
- test: test-dbsemaphore
k8s_version: min
profile: postgres
use-api: false
steps:
- name: Free up unused disk space
run: |
printf "==> Available space before cleanup\n"
df -h
# these directories are not used by E2E tests
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /usr/local/.ghcup /opt/hostedtoolcache/CodeQL
printf "==> Available space after cleanup\n"
df -h
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Install Java for the SDK
if: ${{matrix.test == 'test-java-sdk'}}
uses: actions/setup-java@387ac29b308b003ca37ba93a6cab5eb57c8f5f93 # v4.0.0
with:
java-version: '8'
distribution: adopt
cache: maven
- name: Install Python for the SDK
if: ${{matrix.test == 'test-python-sdk'}}
uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: '3.x'
cache: pip
- name: Install socat (needed by Kubernetes) and kit
# socat is needed for "kubectl port-forward" to work when using cri-dockerd: https://github.com/k3s-io/cri-dockerd/blob/4995f339edcffdf890406b3f1477d34e38477f18/streaming/streaming_others.go#L46
# Both cri-o and containerd removed it as a dependency awhile ago, but that hasn't been ported to cri-dockerd.
# Running "make kit" isn't strictly necessary, since it would be installed automatically by "make start",
# but it's noisy and makes the logs for "Start controller/API" hard to follow.
run: sudo apt-get -y install socat && make kit
- name: Install and start K3S
env:
K8S_VERSION: ${{ matrix.k8s_version || 'max' }}
run: |
. hack/k8s-versions.sh
export INSTALL_K3S_VERSION="${K8S_VERSIONS[$K8S_VERSION]}+k3s1"
curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=stable \
INSTALL_K3S_EXEC="--docker --kubelet-arg=config=${GITHUB_WORKSPACE}/test/e2e/manifests/kubelet-configuration.yaml" \
K3S_KUBECONFIG_MODE=644 \
sh -
until kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml cluster-info ; do sleep 10s ; done
cp /etc/rancher/k3s/k3s.yaml /home/runner/.kubeconfig
echo "- name: fake_token_user" >> $KUBECONFIG
echo " user:" >> $KUBECONFIG
echo " token: xxxxxx" >> $KUBECONFIG
until kubectl cluster-info ; do sleep 10s ; done
- name: Download images
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: '*_image.tar'
path: /tmp
- name: Load images
run: |
set -eux
docker load < /tmp/argoexec_image.tar/argoexec_image.tar
docker load < /tmp/argocli_image.tar/argocli_image.tar
- name: Download controller
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: controller
path: dist/
- name: Download CLI
if: ${{matrix.use-api}}
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: cli
path: dist/
- name: Prepare binaries
run: chmod +x dist/* && make --touch dist/*
- name: Set-up /etc/hosts
run: |
echo '127.0.0.1 dex' | sudo tee -a /etc/hosts
echo '127.0.0.1 minio' | sudo tee -a /etc/hosts
echo '127.0.0.1 postgres' | sudo tee -a /etc/hosts
echo '127.0.0.1 mysql' | sudo tee -a /etc/hosts
echo '127.0.0.1 azurite' | sudo tee -a /etc/hosts
- name: Start controller/API
run: |
make start PROFILE=${{matrix.profile}} \
AUTH_MODE=client STATIC_FILES=false \
LOG_LEVEL=info \
API=${{matrix.use-api}} \
UI=false \
POD_STATUS_CAPTURE_FINALIZER=true 2>&1 | tee /tmp/argo.log &
make wait PROFILE=${{matrix.profile}} API=${{matrix.use-api}}
timeout-minutes: 5
- name: Validate release manifests
run: make manifests-validate
- name: Run tests ${{matrix.test}}
run: make ${{matrix.test}} E2E_SUITE_TIMEOUT=20m STATIC_FILES=false
# failure debugging below
- name: Failure debug - k3s logs
if: ${{ failure() }}
run: journalctl -u k3s
- name: Failure debug - describe MinIO/MySQL deployment
if: ${{ failure() }}
run: |
set -eux
kubectl get deploy
kubectl describe deploy
- name: Failure debug - describe MinIO/MySQL pods
if: ${{ failure() }}
run: |
set -eux
kubectl get pods -l '!workflows.argoproj.io/workflow'
kubectl describe pods -l '!workflows.argoproj.io/workflow'
- name: Failure debug - MinIO/MySQL logs
if: ${{ failure() }}
run: kubectl logs -l '!workflows.argoproj.io/workflow' --prefix
- name: Failure debug - Controller/API logs
if: ${{ failure() }}
run: |
[ -e /tmp/argo.log ] && cat /tmp/argo.log
- if: ${{ failure() }}
name: Failure debug - describe Workflows
run: |
set -eux
kubectl get wf
kubectl describe wf
- name: Failure debug - describe Workflow pods
if: ${{ failure() }}
run: |
set -eux
kubectl get pods -l workflows.argoproj.io/workflow
kubectl describe pods -l workflows.argoproj.io/workflow
- name: Failure debug - Workflow Pod logs
if: ${{ failure() }}
run: kubectl logs --all-containers -l workflows.argoproj.io/workflow --prefix
# workaround for status checks -- check this one job instead of each individual E2E job in the matrix
# this allows us to skip the entire matrix when it doesn't need to run while still having accurate status checks
# see https://github.com/orgs/community/discussions/9141#discussioncomment-2296809 and https://github.com/orgs/community/discussions/26822#discussioncomment-3305794
e2e-tests-composite-result:
name: E2E Tests - Composite result
needs: [ e2e-tests ]
if: ${{ always() }}
runs-on: ubuntu-24.04
steps:
- run: |
result="${{ needs.e2e-tests.result }}"
# mark as successful even if skipped
if [[ $result == "success" || $result == "skipped" ]]; then
exit 0
else
exit 1
fi
codegen:
name: Codegen
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.codegen == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 20
env:
GOPATH: /home/runner/go
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Install protoc
run: |
set -eux -o pipefail
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.19.4/protoc-3.19.4-linux-x86_64.zip
sudo unzip -o protoc-3.19.4-linux-x86_64.zip -d /usr/local bin/protoc
sudo unzip -o protoc-3.19.4-linux-x86_64.zip -d /usr/local 'include/*'
sudo chmod +x /usr/local/bin/protoc
sudo find /usr/local/include -type f | xargs sudo chmod a+r
sudo find /usr/local/include -type d | xargs sudo chmod a+rx
ls /usr/local/include/google/protobuf/
- name: Pull OpenAPI Generator CLI Docker image
run: |
docker pull openapitools/openapi-generator-cli:v5.4.0 &
docker pull openapitools/openapi-generator-cli:v5.2.1 &
- name: Create symlinks
run: |
mkdir -p /home/runner/go/src/github.com/argoproj
ln -s "$PWD" /home/runner/go/src/github.com/argoproj/argo-workflows
- run: make codegen -B STATIC_FILES=false
# if codegen makes changes that are not in the PR, fail the build
- name: Check if codegen made changes not present in the PR
run: git diff --exit-code
lint:
name: Lint
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.lint == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 15 # must be strictly greater than the timeout in .golangci.yml
env:
GOPATH: /home/runner/go
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- run: make lint STATIC_FILES=false
# if lint makes changes that are not in the PR, fail the build
- name: Check if lint made changes not present in the PR
run: git diff --exit-code
# lint GH Actions
- name: Ensure GH Actions are pinned to SHAs
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@ba37328d4ea95eaf8b3bd6c6cef308f709a5f2ec # v3.0.3
ui:
name: UI
needs: [ changed-files ]
if: ${{ needs.changed-files.outputs.ui == 'true' }}
runs-on: ubuntu-24.04
timeout-minutes: 6
env:
NODE_OPTIONS: --max-old-space-size=4096
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "20" # change in all GH Workflows
cache: yarn
cache-dependency-path: ui/yarn.lock
- run: yarn --cwd ui install
- run: yarn --cwd ui build
- run: yarn --cwd ui test
- run: yarn --cwd ui lint
- run: yarn --cwd ui deduplicate
# if lint or deduplicate make changes that are not in the PR, fail the build
- name: Check if lint & deduplicate made changes not present in the PR
run: git diff --exit-code
# check to see if it'll start (but not if it'll render)
- run: yarn --cwd ui start &
- run: until curl http://localhost:8080 > /dev/null ; do sleep 10s ; done
timeout-minutes: 1

View File

@ -0,0 +1,30 @@
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions
name: Approve and enable auto-merge for dependabot
on: pull_request
permissions:
contents: read
jobs:
review:
if: ${{ github.actor == 'dependabot[bot]' && github.repository == 'argoproj/argo-workflows'}}
permissions:
pull-requests: write # for approving a PR
contents: write # for enabling auto-merge on a PR
runs-on: ubuntu-24.04
steps:
- name: Dependabot metadata
id: metadata
uses: dependabot/fetch-metadata@c9c4182bf1b97f5224aee3906fd373f6b61b4526 # v1.6.0
with:
github-token: "${{ secrets.GITHUB_TOKEN }}"
- name: Approve PR
run: gh pr review --approve "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
- name: Enable auto-merge for Dependabot PRs
run: gh pr merge --auto --squash "$PR_URL"
env:
PR_URL: ${{github.event.pull_request.html_url}}
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}

50
.github/workflows/devcontainer.yaml vendored Normal file
View File

@ -0,0 +1,50 @@
name: Dev Container
on:
push:
paths:
- ".devcontainer/**"
- "hack/k8s-versions.sh"
- ".github/workflows/devcontainer.yaml"
branches:
- main
pull_request:
paths:
- ".devcontainer/**"
- "hack/k8s-versions.sh"
- ".github/workflows/devcontainer.yaml"
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
packages: write
jobs:
build:
runs-on: ubuntu-22.04
if: github.repository == 'argoproj/argo-workflows'
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
with:
# Workaround for segfaults under arm64:
# https://github.com/docker/setup-qemu-action/issues/198#issuecomment-2653791775
image: tonistiigi/binfmt:qemu-v7.0.0-28
- uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
- name: Login to registry
if: ${{ github.event_name == 'push' }}
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Build dev container image
run: make devcontainer-build
env:
TARGET_PLATFORM: linux/amd64,linux/arm64
DEVCONTAINER_PUSH: ${{ github.event_name == 'push' && 'true' || 'false' }}

48
.github/workflows/docs.yaml vendored Normal file
View File

@ -0,0 +1,48 @@
name: Docs
on:
push:
branches:
- main
pull_request:
branches:
- main
- release/*
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
jobs:
docs:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0
with:
python-version: 3.9
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "19"
# This is mainly here so the dependencies get saved to the cache by "actions/setup-go"
- name: Download go dependencies
run: go mod download
# Use the same make target both locally and on CI to make it easier to debug failures.
- name: Build & Lint docs
run: make docs
# If linters auto-fix issues, files will be changed. If so, fail the build.
- name: Check if linters made changes
run: git diff --exit-code
# Upload the site so reviewers see it.
- name: Upload Docs Site
uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0
with:
name: docs
path: site
if-no-files-found: error

73
.github/workflows/pr.yaml vendored Normal file
View File

@ -0,0 +1,73 @@
name: PR
on:
pull_request_target:
types:
- opened
- edited
- reopened
- synchronize
permissions:
contents: read
jobs:
title-check:
runs-on: ubuntu-24.04
outputs:
type: ${{ steps.semantic-pr-check.outputs.type }}
steps:
- name: Check PR Title's semantic conformance
id: semantic-pr-check
uses: amannn/action-semantic-pull-request@0723387faaf9b38adef4775cd42cfd5155ed6017 # v5.5.3
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
feature-pr-handling:
needs: title-check
runs-on: ubuntu-24.04
if: needs.title-check.outputs.type == 'feat'
env:
PR_HEAD: ${{ github.event.pull_request.head.sha }}
steps:
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
cache: true
- name: Checkout
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
fetch-depth: 50
- name: Ensure ./.features/pending/*.md addition(s)
id: changed-files
uses: tj-actions/changed-files@cbda684547adc8c052d50711417fa61b428a9f88 # v41.1.2
with:
files: |
.features/pending/*.md
- name: No ./.features/*.md addition
if: steps.changed-files.outputs.added_files_count == 0
run: |
echo "No feature description was added to the ./.features/ directory for this feature PR."
echo "Please add a .md file to the ./.features/ directory."
echo "See docs/running-locally.md for more details."
false
- name: Validate ./.features/*.md changes
if: steps.changed-files.outputs.added_files_count > 0
run: |
echo "A feature description was added to the ./.features/ directory."
make features-validate \
|| { echo "New ./.features/*.md file failed validation."; exit 1; }
# In order to validate any links in the yaml file, render the config to markdown
- name: Render .features/*.md feature descriptions
run: make features-preview > features_preview.md
- name: Link Checker
id: lychee
uses: lycheeverse/lychee-action@f613c4a64e50d792e0b31ec34bbcbba12263c6a6 # f613c4a64e50d792e0b31ec34bbcbba12263c6a6
with:
args: "--verbose --no-progress ./features_preview.md"
failIfEmpty: false

364
.github/workflows/release.yaml vendored Normal file
View File

@ -0,0 +1,364 @@
name: Release
on:
push:
tags:
- v*
branches:
- main
- dev-*
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
defaults:
run:
shell: bash
permissions:
contents: read
jobs:
build-linux:
name: Build & push linux
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
strategy:
matrix:
platform: [ linux/amd64, linux/arm64 ]
target: [ workflow-controller, argocli, argoexec, argoexec-nonroot ]
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Set up QEMU
uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
with:
version: v0.10.4
- name: Cache Docker layers
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
id: cache
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-${{ matrix.platform }}-${{ matrix.target }}-buildx-
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Docker Buildx
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
run: |
set -eux
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
# copied verbatim from Makefile
GIT_COMMIT=$(git rev-parse HEAD || echo unknown)
GIT_TAG=$(git describe --exact-match --tags --abbrev=0 2> /dev/null || echo untagged)
GIT_TREE_STATE=$(if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
tag_suffix=$(echo $PLATFORM | sed -r "s/\//-/g")
# Special handling for argoexec-nonroot to create argoexec:tag-nonroot-platform instead
if [ "$TARGET" = "argoexec-nonroot" ]; then
image_name="${DOCKERIO_ORG}/argoexec:${tag}-nonroot-${tag_suffix}"
else
image_name="${DOCKERIO_ORG}/${TARGET}:${tag}-${tag_suffix}"
fi
docker buildx build \
--cache-from "type=local,src=/tmp/.buildx-cache" \
--cache-to "type=local,dest=/tmp/.buildx-cache" \
--output "type=image,push=true" \
--build-arg GIT_COMMIT=$GIT_COMMIT \
--build-arg GIT_TAG=$GIT_TAG \
--build-arg GIT_TREE_STATE=$GIT_TREE_STATE \
--platform="${PLATFORM}" \
--target $TARGET \
--provenance=false \
--tag quay.io/$image_name .
build-windows:
name: Build & push windows
if: github.repository == 'argoproj/argo-workflows'
runs-on: windows-2022
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Build & Push Windows Docker Images
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
run: |
docker_org=$DOCKERIO_ORG
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
targets="argoexec"
for target in $targets; do
image_name="${docker_org}/${target}:${tag}-windows"
docker build \
--build-arg GIT_COMMIT=$tag \
--build-arg GIT_BRANCH=$branch \
--build-arg GIT_TREE_STATE=$tree_state \
--target $target \
-t $image_name \
-f Dockerfile.windows \
.
docker tag $image_name quay.io/$image_name
docker push quay.io/$image_name
done
push-images:
name: Push manifest with all images
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
needs: [ build-linux, build-windows ]
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Install cosign
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
with:
cosign-release: 'v2.2.3'
- name: Push Multiarch Image
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
run: |
: ${DOCKER_CONFIG:=~/.docker}
echo $(jq -c '. + { "experimental": "enabled" }' ${DOCKER_CONFIG}/config.json) > ${DOCKER_CONFIG}/config.json
docker_org=$DOCKERIO_ORG
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
targets="workflow-controller argoexec argoexec-nonroot argocli"
for target in $targets; do
if [ "$target" = "argoexec-nonroot" ]; then
# Special handling for argoexec-nonroot: create argoexec:tag-nonroot manifest
image_name="${docker_org}/argoexec:${tag}-nonroot"
docker manifest create quay.io/$image_name quay.io/${docker_org}/argoexec:${tag}-nonroot-linux-arm64 quay.io/${docker_org}/argoexec:${tag}-nonroot-linux-amd64
elif [ "$target" = "argoexec" ]; then
image_name="${docker_org}/${target}:${tag}"
docker manifest create quay.io/$image_name quay.io/${image_name}-linux-arm64 quay.io/${image_name}-linux-amd64 quay.io/${image_name}-windows
else
image_name="${docker_org}/${target}:${tag}"
docker manifest create quay.io/$image_name quay.io/${image_name}-linux-arm64 quay.io/${image_name}-linux-amd64
fi
docker manifest push quay.io/$image_name
cosign sign -y --key env://COSIGN_PRIVATE_KEY quay.io/$image_name
done
test-images-linux-amd64:
name: Try pulling linux/amd64
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
needs: [ push-images ]
strategy:
matrix:
platform: [ linux/amd64 ]
target: [ workflow-controller, argocli, argoexec, argoexec-nonroot ]
steps:
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Docker Buildx
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
PLATFORM: ${{ matrix.platform }}
TARGET: ${{ matrix.target }}
run: |
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
if [ "$TARGET" = "argoexec-nonroot" ]; then
image_name="${DOCKERIO_ORG}/argoexec:${tag}-nonroot"
else
image_name="${DOCKERIO_ORG}/${TARGET}:${tag}"
fi
docker pull quay.io/$image_name
test-images-windows:
name: Try pulling windows
if: github.repository == 'argoproj/argo-workflows'
runs-on: windows-2022
needs: [ push-images ]
steps:
- name: Docker Login
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
username: ${{ secrets.DOCKERIO_USERNAME }}
password: ${{ secrets.DOCKERIO_PASSWORD }}
- name: Login to Quay
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: quay.io
username: ${{ secrets.QUAYIO_USERNAME }}
password: ${{ secrets.QUAYIO_PASSWORD }}
- name: Try pulling
env:
DOCKERIO_ORG: ${{ secrets.DOCKERIO_ORG }}
run: |
docker_org=$DOCKERIO_ORG
tag=$(basename $GITHUB_REF)
if [ $tag = "main" ]; then
tag="latest"
fi
targets="argoexec"
for target in $targets; do
image_name="${docker_org}/${target}:${tag}"
docker pull quay.io/$image_name
done
publish-release:
permissions:
contents: write # for softprops/action-gh-release to create GitHub release
runs-on: ubuntu-24.04
if: github.repository == 'argoproj/argo-workflows'
needs: [ push-images, test-images-linux-amd64, test-images-windows ]
env:
NODE_OPTIONS: --max-old-space-size=4096
COSIGN_PRIVATE_KEY: ${{secrets.COSIGN_PRIVATE_KEY}}
COSIGN_PASSWORD: ${{secrets.COSIGN_PASSWORD}}
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "20" # change in all GH Workflows
- uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
go-version: "1.24"
- name: Restore node packages cache
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
with:
path: ui/node_modules
key: ${{ runner.os }}-node-dep-v1-${{ hashFiles('**/yarn.lock') }}
- name: Install cosign
uses: sigstore/cosign-installer@e1523de7571e31dbe865fd2e80c5c7c23ae71eb4 # v3.4.0
with:
cosign-release: 'v2.2.3'
# https://stackoverflow.com/questions/58033366/how-to-get-current-branch-within-github-actions
- run: |
if [ ${GITHUB_REF##*/} = main ]; then
echo "VERSION=latest" >> $GITHUB_ENV
else
echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_ENV
fi
- run: go install sigs.k8s.io/bom/cmd/bom@v0.2.0
- run: go install github.com/spdx/spdx-sbom-generator/cmd/generator@v0.0.13
- run: mkdir -p dist
- run: generator -o dist -p .
- run: yarn --cwd ui install
- run: generator -o dist -p ui
- run: bom generate --image quay.io/argoproj/workflow-controller:$VERSION -o dist/workflow-controller.spdx
- run: bom generate --image quay.io/argoproj/argocli:$VERSION -o dist/argocli.spdx
- run: bom generate --image quay.io/argoproj/argoexec:$VERSION -o dist/argoexec.spdx
- run: bom generate --image quay.io/argoproj/argoexec:$VERSION-nonroot -o dist/argoexec-nonroot.spdx
# pack the boms into one file to make it easy to download
- run: tar -zcf dist/sbom.tar.gz dist/*.spdx
- run: make release-notes VERSION=$VERSION
- run: cat release-notes
- run: make manifests VERSION=$VERSION
- name: Print image tag (please check it is not `:latest`)
run: |
grep image: dist/manifests/install.yaml
- run: go mod download
- run: make clis STATIC_FILES=true VERSION=$VERSION
- name: Print version (please check it is not dirty)
run: dist/argo-linux-amd64 version
- run: make checksums
- name: Sign checksums and create public key for release assets
run: |
cosign sign-blob -y --key env://COSIGN_PRIVATE_KEY ./dist/argo-workflows-cli-checksums.txt > ./dist/argo-workflows-cli-checksums.sig
# Retrieves the public key to release as an asset
cosign public-key --key env://COSIGN_PRIVATE_KEY > ./dist/argo-workflows-cosign.pub
# https://github.com/softprops/action-gh-release
# This will publish the release and upload assets.
# If a conflict occurs (because you are not on a tag), the release will not be updated. This is a short coming
# of this action.
# Instead, delete the release so it is re-created.
- uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # v1
if: startsWith(github.ref, 'refs/tags/v')
with:
prerelease: ${{ startsWith(github.ref, 'refs/tags/v0') || contains(github.ref, 'rc') }}
body_path: release-notes
files: |
dist/argo-*.gz
dist/argo-workflows-cli-checksums.txt
dist/argo-workflows-cli-checksums.sig
dist/manifests/*.yaml
dist/argo-workflows-cosign.pub
dist/sbom.tar.gz
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

25
.github/workflows/retest.yaml vendored Normal file
View File

@ -0,0 +1,25 @@
name: Detect and Trigger Retest
on:
issue_comment:
types: [created]
permissions:
contents: read
jobs:
retest:
# PR comments where a Member types "/retest" exactly
if: github.event.issue.pull_request && github.event.comment.author_association == 'MEMBER' && github.event.comment.body == '/retest'
permissions:
actions: write # for re-running failed jobs: https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#re-run-a-job-from-a-workflow-run
runs-on: ubuntu-24.04
steps:
- name: Re-run failed jobs for this PR
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
PR_NUMBER: ${{ github.event.issue.number }}
run: |
SHA_REF=$(gh api "/repos/$REPO/pulls/$PR_NUMBER/commits" | jq -r '.[].sha' | tail -n 1)
RUN_ID=$(gh api "repos/$REPO/actions/workflows/ci-build.yaml/runs?per_page=1&event=pull_request&head_sha=$SHA_REF" | jq -r '.workflow_runs[] | .id')
gh api --method POST repos/$REPO/actions/runs/$RUN_ID/rerun-failed-jobs

28
.github/workflows/sdks.yaml vendored Normal file
View File

@ -0,0 +1,28 @@
name: SDKs
on:
push:
tags:
- v*
permissions:
contents: read
jobs:
sdks:
name: Publish SDKs
if: github.repository == 'argoproj/argo-workflows'
permissions:
packages: write # for publishing packages
contents: write # for creating releases
runs-on: ubuntu-24.04
strategy:
matrix:
name:
- java
- python
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- run: make --directory sdks/${{matrix.name}} publish -B
env:
JAVA_SDK_MAVEN_PASSWORD: ${{ secrets.GITHUB_TOKEN }}
PYPI_API_TOKEN: ${{ secrets.PYPI_API_TOKEN }}

47
.github/workflows/snyk.yml vendored Normal file
View File

@ -0,0 +1,47 @@
name: Snyk
on:
schedule:
- cron: "30 2 * * *"
push:
branches:
- main
- release-*
permissions:
contents: read
jobs:
# we do not scan images here, they're scanned here: https://app.snyk.io/org/argoproj/projects
golang:
name: Scan Go deps
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- run: mkdir -p ui/dist/app/ && touch ui/dist/app/placeholder
- name: Run Snyk to check for Go vulnerabilities
uses: snyk/actions/golang@b98d498629f1c368650224d6d212bf7dfa89e4bf # v0.4.0
with:
args: --severity-threshold=high
node:
name: Scan Node deps
if: github.repository == 'argoproj/argo-workflows'
runs-on: ubuntu-24.04
env:
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
- uses: actions/setup-node@b39b52d1213e96004bfcb1c61a8a6fa8ab84f3e8 # v4.0.1
with:
node-version: "20" # change in all GH Workflows
cache: yarn
cache-dependency-path: ui/yarn.lock
- run: yarn --cwd ui install
- name: Run Snyk to check for Node vulnerabilities
uses: snyk/actions/node@b98d498629f1c368650224d6d212bf7dfa89e4bf # v0.4.0
with:
args: --file=ui/package.json --severity-threshold=high

41
.github/workflows/stale.yaml vendored Normal file
View File

@ -0,0 +1,41 @@
# https://github.com/actions/stale
name: Mark stale issues and pull requests
on:
schedule:
- cron: '0 2 * * *' # once a day at 2am
permissions:
contents: read
jobs:
stale:
permissions:
issues: write # for commenting on an issue and editing labels
pull-requests: write # for commenting on a PR and editing labels
runs-on: ubuntu-24.04
steps:
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# timing
days-before-stale: 14 # 2 weeks of inactivity
days-before-close: 14 # 2 more weeks of inactivity
# labels to watch for, add, and remove
only-labels: 'problem/more information needed' # only mark issues/PRs as stale if they have this label
labels-to-remove-when-unstale: 'problem/more information needed' # remove label when unstale -- should be manually added back if information is insufficient
stale-issue-label: 'problem/stale'
stale-pr-label: 'problem/stale'
# automated messages to issue/PR authors
stale-issue-message: >
This issue has been automatically marked as stale because it has not had recent activity and needs more information.
It will be closed if no further activity occurs.
stale-pr-message: >
This PR has been automatically marked as stale because it has not had recent activity and needs further changes.
It will be closed if no further activity occurs.
close-issue-message: >
This issue has been closed due to inactivity and lack of information.
If you still encounter this issue, please add the requested information and re-open.
close-pr-message:
This PR has been closed due to inactivity and lack of changes.
If you would like to still work on this PR, please address the review comments and re-open.

90
.gitignore vendored
View File

@ -1,38 +1,54 @@
*.iml
.*swp
*.pyc
.idea
Pipfile
.vscode/
.idea/
.node-version
.DS_Store
.vagrant
.cache/
.vscode
.project
.pydevproject
.settings/
.sw?
tags
cscope.*
docker_build
devops/builds/*/*/src
Dockerfile.*.tmp
common/python/ax/_version.py
common/error/gen-js
saas/*/pkg/
saas/*/bin/
saas/common/src/git.apache.org/
saas/common/src/github.com/
saas/common/src/golang.org/
saas/common/src/gopkg.in/
saas/*/github.com/
saas/*/golang.org/
saas/*/gopkg.in/
saas/axops/src/applatix.io/axops/axops_server/docs.go
platform/source/go/src/github.com
platform/source/go/src/golang.org
platform/source/go/bin
platform/source/go/pkg
*.db
.coverage
htmlcov/
/bin/
.classpath
vendor/
dist/
# delve debug binaries
cmd/**/debug
hack/**/debug
hack/featuregen/featuregen
/argo
/argoexec
release-notes
debug.test
git-ask-pass.sh
*.iml
/coverage.out
.envrc
/.vendor-new
/kustomize
/workflow-controller
/.scannerwork/
/test-results/
/package-lock.json
/pkg/apiclient/_.secondary.swagger.json
/pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json
/pkg/apiclient/cronworkflow/cron-workflow.swagger.json
/pkg/apiclient/event/event.swagger.json
/pkg/apiclient/eventsource/eventsource.swagger.json
/pkg/apiclient/info/info.swagger.json
/pkg/apiclient/pipeline/pipeline.swagger.json
/pkg/apiclient/sensor/sensor.swagger.json
/pkg/apiclient/workflow/workflow.swagger.json
/pkg/apiclient/workflowarchive/workflow-archive.swagger.json
/pkg/apiclient/workflowtemplate/workflow-template.swagger.json
/site/
/.brew_home
/go-diagrams/
/.run/
sdks/python/client/dist/*
/v3/
/cmd/argoexec/commands/test.txt
/db-dumps/
.spelling.tmp
# Do not commit rendered installation manifests since they are misleading to users.
manifests/install.yaml
manifests/namespace-install.yaml
/logs
node_modules
result
.devenv
.devenv.flake.nix

94
.golangci.yml Normal file
View File

@ -0,0 +1,94 @@
version: "2"
run:
build-tags:
- api
- cli
- cron
- executor
- examples
- corefunctional
- functional
- plugins
linters:
enable:
- asasalint
- bidichk
- bodyclose
- copyloopvar
- errcheck
- gosec
- govet
- ineffassign
- misspell
- nakedret
- nosprintfhostport
- reassign
- rowserrcheck
- sqlclosecheck
- staticcheck
- testifylint
- unparam
- unused
settings:
gosec:
includes:
- G304
- G307
excludes:
# G106: Use of ssh InsecureIgnoreHostKey should be audited
- G106
# G402: TLS InsecureSkipVerify set true
- G402
staticcheck:
checks:
- all
# Capitalised error strings
- "-ST1005"
govet:
enable: [nilness]
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- path: server/artifacts/artifact_server_test.go
text: response body must be closed
paths:
- dist
- docs
- examples
- hack
- manifests
- pkg/client
- sdks
- ui
- vendor
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
settings:
goimports:
local-prefixes:
- github.com/argoproj/argo-workflows/
exclusions:
generated: lax
paths:
- dist
- docs
- examples
- hack
- manifests
- pkg/client
- sdks
- ui
- vendor
- third_party$
- builtin$
- examples$

8
.markdownlint.yaml Normal file
View File

@ -0,0 +1,8 @@
# not fix for line length
MD013: false
# mkdocs uses 4 spaces indent
MD007:
indent: 4
MD024:
siblings_only: true
MD034: false

11
.mlc_config.json Normal file
View File

@ -0,0 +1,11 @@
{
"ignorePatterns": [
{
"pattern": ".*localhost.*"
}
],
"aliveStatusCodes": [
200,
429
]
}

12
.readthedocs.yml Normal file
View File

@ -0,0 +1,12 @@
version: 2
formats: all
mkdocs:
fail_on_warning: false
configuration: mkdocs.yml
python:
install:
- requirements: docs/requirements.txt
build:
os: "ubuntu-22.04"
tools:
python: "3.12"

285
.spelling Normal file
View File

@ -0,0 +1,285 @@
# markdown-spellcheck dictionary
000s
0s
100Gi
100Mi
100s
10h
10s
120s
120sec
1Gi
1Mi
1h
1m
200Ki
2Gi
2h
30s
3min
3s
4Gi
4xx
512Mi
5m
5xx
8Ki
90m
Alexandre
Alibaba
Ang
Anthos
Approvers
ArgoLabs
Artifactory
BlackRock
Breitgand
CRD
CRDs
CloudSQL
ClusterRoleBinding
ClusterRoles
ClusterWorkflowTemplate
ClusterWorkflowTemplates
Codespaces
ConfigMap
ConfigMaps
ContainerSet
Couler
CronWorkflow
CronWorkflows
CustomResource
DataDog
Dataflow
DevOps
Devenv
Dex
EditorConfig
EtcD
EventRouter
Generator
GitOps
Github
Golang
Grafana
Grammarly
Hadoop
Heptio
Homebrew
IAM-based
IPs
InitContainer
InsideBoard
Invocators
Istio
Jemison
JetBrains
KNative
Kaniko
Katacoda
Katib
Kerberos
KeyValueEditor
Killercoda
KubectlExec
Kubeflow
Kustomize
LDFlags
Lifecycle-Hook
LitmusChaos
MLOps
Makefile
Metaflow
MinIO
Minikube
MySQL
Nagal
Nano
Nginx
Node.JS.
OAuth
OAuth2
Okta
OpenAPI
OpenTelemetry
PDBs
PProf
PVCs
Peixuan
PersistentVolumeClaims
Ploomber
PostgreSQL
Postgres
Pre-fill
PriorityClass
RCs
Risc-V
Roadmap
RoleBinding
SDKs
SageMaker
ServiceAccount
Sharding
Singer.io
Snyk
Sumit
Tekton
Traefik
Triaging
TripAdvisor
UI
VSCode
Valasek
Webhooks
Welch
WorkflowEventBinding
WorkflowTemplate
WorkflowTemplates
a.m.
anded
apis
architecting
argo
argoexec
argoproj
args
async
auth
backend
backoff
backported
boolean
booleans
buildkit
changelog
codebase
config
cpu
cron
daemoned
dependabot
dev
devenv
dockershim
docs
dropdown
e.g.
e2e
entrypoint
enum
env
errored
expr
fibonacci
finalizer
gitops
goroutine
goroutines
govaluate
grpc
gzipped
i.e.
idempotence
instantiator
instantiators
jenkins
k3d
k3s
k8s
k8s-jobs
kube
kube-apiserver
kube-scheduler
kubectl
kubelet
kubernetes
liveness
localhost
maxFailures
maxSuccess
md
memoization
memoized
memoizing
metadata
minikube
mutex
mutexes
namespace
namespaces
natively
nix.conf
non-member
p.m.
parameterizing
params
pprof
pre-commit
pytorch
qps
ray
rc2
repo
retryStrategy
roadmap
runtime
runtimes
s3
sandboxed
shortcodes
stateful
stderr
symlinks
temporality
tolerations
triaged
un-reconciled
v1
v1.0
v1.1
v1.2
v1.24
v1.3
v2
v2.0
v2.1
v2.10
v2.10.2
v2.11
v2.12
v2.35.0
v2.4
v2.5
v2.6
v2.7
v2.7.2
v2.8
v2.9
v3.0
v3.0.0
v3.1
v3.1.4
v3.2
v3.2.
v3.3
v3.3.
v3.4
v3.4.
v3.4.4
v3.5
v3.6
v3.6.0
v3.6.1
v3.6.5
v3.7
v3.7.0
validator
vendored
versioned
versioning
webHDFS
webhook
webhooks
workflow-controller-configmap
workqueue
yaml

5040
CHANGELOG-2-x-x.md Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1
CODEOWNERS Normal file
View File

@ -0,0 +1 @@
*.proto @joibel @terrytangyuan @sarabala1979

View File

@ -1,23 +0,0 @@
# Code of Conduct
We welcome participation from individuals and groups of all backgrounds who want to benefit the broader open source community
through participation in this project. We are dedicated to ensuring a productive, safe and educational experience for all.
# Guidelines
Be welcoming
* Make it easy for new members to learn and contribute. Help them along the path. Don't make them jump through hoops.
Be considerate
* There is a live person at the other end of the Internet. Consider how your comments will affect them. It is often better to give a quick but useful reply than to delay to compose a more thorough reply.
Be respectful
* Not everyone is Linus Torvalds, and this is probably a good thing :) but everyone is deserving of respect and consideration for wanting to benefit the broader community. Criticize ideas but respect the person. Saying something positive before you criticize lets the other person know that your criticism is not personal.
Be patient
* We have diverse backgrounds. It will take time and effort to understand each others' points of view. Some of us have day jobs and other responsibilities and may take time to respond to requests.
# Relevant References
* https://www.djangoproject.com/conduct/
* http://contributor-covenant.org/version/1/4/code_of_conduct.md
* http://contributor-covenant.org/

View File

@ -1,27 +1,2 @@
# How to contribute (Work in progress)
## How to report a bug
Open an issue at https://github.com/argoproj/
* What did you do? (how to reproduce)
* What did you see? (include logs and screenshots as appropriate)
* What did you expect?
## How to contribute a bug fix
Go to https://github.com/argoproj/
* Open an issue and discuss it.
* Create a pull request for your fix.
## How to suggest a new feature
Go to https://groups.google.com/forum/#!forum/argoproj
* Create a new topic to discuss your feature.
## How to setup your dev environment
xxx
## Most needed contributions
* TBD
<!-- markdownlint-disable-next-line MD041 -->
See [docs/CONTRIBUTING.md](docs/CONTRIBUTING.md).

132
Dockerfile Normal file
View File

@ -0,0 +1,132 @@
#syntax=docker/dockerfile:1.2
ARG GIT_COMMIT=unknown
ARG GIT_TAG=unknown
ARG GIT_TREE_STATE=unknown
FROM golang:1.24.4-alpine3.22 as builder
# libc-dev to build openapi-gen
RUN apk update && apk add --no-cache \
git \
make \
ca-certificates \
wget \
curl \
gcc \
libc-dev \
bash \
mailcap
WORKDIR /go/src/github.com/argoproj/argo-workflows
COPY go.mod .
COPY go.sum .
RUN --mount=type=cache,target=/go/pkg/mod go mod download
COPY . .
####################################################################################################
FROM node:20-alpine as argo-ui
RUN apk update && apk add --no-cache git
COPY ui/package.json ui/yarn.lock ui/
RUN --mount=type=cache,target=/root/.yarn \
YARN_CACHE_FOLDER=/root/.yarn JOBS=max \
yarn --cwd ui install --network-timeout 1000000
COPY ui ui
COPY api api
RUN --mount=type=cache,target=/root/.yarn \
YARN_CACHE_FOLDER=/root/.yarn JOBS=max \
NODE_OPTIONS="--max-old-space-size=2048" JOBS=max yarn --cwd ui build
####################################################################################################
FROM builder as argoexec-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build make dist/argoexec GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE}
####################################################################################################
FROM builder as workflow-controller-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build make dist/workflow-controller GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE}
####################################################################################################
FROM builder as argocli-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
RUN mkdir -p ui/dist
COPY --from=argo-ui ui/dist/app ui/dist/app
# update timestamp so that `make` doesn't try to rebuild this -- it was already built in the previous stage
RUN touch ui/dist/app/index.html
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build STATIC_FILES=true make dist/argo GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE}
####################################################################################################
FROM gcr.io/distroless/static as argoexec-base
COPY --from=argoexec-build /etc/mime.types /etc/mime.types
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
####################################################################################################
FROM argoexec-base as argoexec-nonroot
USER 8737
COPY --chown=8737 --from=argoexec-build /go/src/github.com/argoproj/argo-workflows/dist/argoexec /bin/
ENTRYPOINT [ "argoexec" ]
####################################################################################################
FROM argoexec-base as argoexec
COPY --from=argoexec-build /go/src/github.com/argoproj/argo-workflows/dist/argoexec /bin/
ENTRYPOINT [ "argoexec" ]
####################################################################################################
FROM gcr.io/distroless/static as workflow-controller
USER 8737
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
COPY --chown=8737 --from=workflow-controller-build /go/src/github.com/argoproj/argo-workflows/dist/workflow-controller /bin/
ENTRYPOINT [ "workflow-controller" ]
####################################################################################################
FROM gcr.io/distroless/static as argocli
USER 8737
WORKDIR /home/argo
# Temporary workaround for https://github.com/grpc/grpc-go/issues/434
ENV GRPC_ENFORCE_ALPN_ENABLED=false
COPY hack/ssh_known_hosts /etc/ssh/
COPY hack/nsswitch.conf /etc/
COPY --from=argocli-build /go/src/github.com/argoproj/argo-workflows/dist/argo /bin/
ENTRYPOINT [ "argo" ]

61
Dockerfile.windows Normal file
View File

@ -0,0 +1,61 @@
####################################################################################################
# Builder image
# Initial stage which pulls prepares build dependencies and CLI tooling we need for our final image
# Also used as the image in CI jobs so needs all dependencies
####################################################################################################
ARG IMAGE_OS_VERSION=ltsc2022-amd64
ARG GIT_COMMIT=unknown
ARG GIT_TAG=unknown
ARG GIT_TREE_STATE=unknown
# had issues with official golange image for windows so I'm using plain servercore
FROM mcr.microsoft.com/windows/servercore:${IMAGE_OS_VERSION} as builder
ENV GOLANG_VERSION=1.24
SHELL ["powershell", "-Command"]
# install chocolatey package manager
ENV chocolateyUseWindowsCompression=false
RUN iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')); \
choco feature disable --name showDownloadProgress ; \
choco feature enable -n allowGlobalConfirmation
# install golang, dep and other tools
RUN choco install golang --version=$env:GOLANG_VERSION ; \
choco install make dep git.portable 7zip.portable
####################################################################################################
# argoexec-base
# Used as the base for both the release and development version of argoexec
####################################################################################################
FROM mcr.microsoft.com/windows/nanoserver:${IMAGE_OS_VERSION} as argoexec-base
COPY --from=builder /windows/system32/netapi32.dll /windows/system32/netapi32.dll
COPY --from=builder C:/ProgramData/chocolatey/lib/7zip.portable/tools/7z-extra/x64/7za.exe C:/app/7za.exe
# add binaries to path
USER Administrator
RUN SETX /m path C:\app;%path%
####################################################################################################
# Argo Build stage which performs the actual build of Argo binaries
####################################################################################################
FROM builder as argo-build
ARG GIT_COMMIT
ARG GIT_TAG
ARG GIT_TREE_STATE
# Perform the build
WORKDIR C:/Users/ContainerAdministrator/go/src/github.com/argoproj/argo-workflows
COPY . .
# run in git bash for all the shell commands in Makefile to work
RUN bash -c 'make dist/argoexec GIT_COMMIT=${GIT_COMMIT} GIT_TAG=${GIT_TAG} GIT_TREE_STATE=${GIT_TREE_STATE} HACK_PKG_FILES_AS_PKGS=true'
####################################################################################################
# argoexec
####################################################################################################
FROM argoexec-base as argoexec
COPY --from=argo-build C:/Users/ContainerAdministrator/go/src/github.com/argoproj/argo-workflows/dist/argoexec C:/app/argoexec.exe
RUN argoexec version
ENTRYPOINT [ "argoexec" ]

202
LICENSE Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-2018 The Argo Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,13 +0,0 @@
Copyright 2017 Applatix, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License..

907
Makefile Normal file
View File

@ -0,0 +1,907 @@
export SHELL:=bash
export SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit
# NOTE: Please ensure dependencies are synced with the flake.nix file in dev/nix/flake.nix before upgrading
# any external dependency. There is documentation on how to do this under the Developer Guide
USE_NIX := false
# https://stackoverflow.com/questions/4122831/disable-make-builtin-rules-and-variables-from-inside-the-make-file
MAKEFLAGS += --no-builtin-rules
.SUFFIXES:
# -- build metadata
BUILD_DATE := $(shell date -u +'%Y-%m-%dT%H:%M:%SZ')
# below 3 are copied verbatim to release.yaml
GIT_COMMIT := $(shell git rev-parse HEAD || echo unknown)
GIT_TAG := $(shell git describe --exact-match --tags --abbrev=0 2> /dev/null || echo untagged)
GIT_TREE_STATE := $(shell if [ -z "`git status --porcelain`" ]; then echo "clean" ; else echo "dirty"; fi)
GIT_REMOTE := origin
GIT_BRANCH := $(shell git rev-parse --symbolic-full-name --verify --quiet --abbrev-ref HEAD)
RELEASE_TAG := $(shell if [[ "$(GIT_TAG)" =~ ^v[0-9]+\.[0-9]+\.[0-9]+.*$$ ]]; then echo "true"; else echo "false"; fi)
DEV_BRANCH := $(shell [ "$(GIT_BRANCH)" = main ] || [ `echo $(GIT_BRANCH) | cut -c -8` = release- ] || [ `echo $(GIT_BRANCH) | cut -c -4` = dev- ] || [ $(RELEASE_TAG) = true ] && echo false || echo true)
SRC := $(GOPATH)/src/github.com/argoproj/argo-workflows
VERSION := latest
# VERSION is the version to be used for files in manifests and should always be latest unless we are releasing
# we assume HEAD means you are on a tag
ifeq ($(RELEASE_TAG),true)
VERSION := $(GIT_TAG)
endif
# -- docker image publishing options
IMAGE_NAMESPACE ?= quay.io/argoproj
DOCKER_PUSH ?= false
TARGET_PLATFORM ?= linux/$(shell go env GOARCH)
K3D_CLUSTER_NAME ?= k3s-default # declares which cluster to import to in case it's not the default name
# -- dev container options
DEVCONTAINER_PUSH ?= false
# Extract image name from devcontainer.json
DEVCONTAINER_IMAGE ?= $(shell sed --quiet 's/^ *"image": "\([^"]*\)",/\1/p' .devcontainer/devcontainer.json)
ifeq ($(DEVCONTAINER_PUSH),true)
# Export both image and cache to the registry using zstd, since that produces much smaller images than gzip.
# Docs: https://docs.docker.com/build/exporters/image-registry/ and https://docs.docker.com/build/cache/backends/registry/
DEVCONTAINER_EXPORTER_COMMON_FLAGS ?= type=registry,compression=zstd,force-compression=true,oci-mediatypes=true
DEVCONTAINER_FLAGS ?= --output $(DEVCONTAINER_EXPORTER_COMMON_FLAGS) \
--cache-to $(DEVCONTAINER_EXPORTER_COMMON_FLAGS),ref=$(DEVCONTAINER_IMAGE):cache,mode=max
else
DEVCONTAINER_FLAGS ?= --output type=cacheonly
endif
# -- test options
E2E_WAIT_TIMEOUT ?= 90s # timeout for wait conditions
E2E_PARALLEL ?= 20
E2E_SUITE_TIMEOUT ?= 15m
GOTEST ?= go test -v -p 20
ALL_BUILD_TAGS ?= api,cli,cron,executor,examples,corefunctional,functional,plugins
BENCHMARK_COUNT ?= 6
# should we build the static files?
ifneq (,$(filter $(MAKECMDGOALS),codegen lint test docs start))
STATIC_FILES := false
else
STATIC_FILES ?= $(shell [ $(DEV_BRANCH) = true ] && echo false || echo true)
endif
# -- install & run options
PROFILE ?= minimal
KUBE_NAMESPACE ?= argo # namespace where Kubernetes resources/RBAC will be installed
PLUGINS ?= $(shell [ $(PROFILE) = plugins ] && echo true || echo false)
UI ?= false # start the UI with HTTP
UI_SECURE ?= false # start the UI with HTTPS
API ?= $(UI) # start the Argo Server
TASKS := controller
ifeq ($(API),true)
TASKS := controller server
endif
ifeq ($(UI_SECURE),true)
TASKS := controller server ui
endif
ifeq ($(UI),true)
TASKS := controller server ui
endif
# Which mode to run in:
# * `local` run the workflowcontroller and argo-server as single replicas on the local machine (default)
# * `kubernetes` run the workflow-controller and argo-server on the Kubernetes cluster
RUN_MODE := local
KUBECTX := $(shell [[ "`which kubectl`" != '' ]] && kubectl config current-context || echo none)
K3D := $(shell [[ "$(KUBECTX)" == "k3d-"* ]] && echo true || echo false)
ifeq ($(PROFILE),prometheus)
RUN_MODE := kubernetes
endif
ifeq ($(PROFILE),stress)
RUN_MODE := kubernetes
endif
# -- controller + server + executor env vars
LOG_LEVEL := debug
UPPERIO_DB_DEBUG := 0
DEFAULT_REQUEUE_TIME ?= 1s # by keeping this short we speed up tests
ALWAYS_OFFLOAD_NODE_STATUS := false
POD_STATUS_CAPTURE_FINALIZER ?= true
NAMESPACED := true
MANAGED_NAMESPACE ?= $(KUBE_NAMESPACE)
SECURE := false # whether or not to start Argo in TLS mode
AUTH_MODE := hybrid
ifeq ($(PROFILE),sso)
AUTH_MODE := sso
endif
ifndef $(GOPATH)
GOPATH:=$(shell go env GOPATH)
export GOPATH
endif
# Makefile managed tools
TOOL_MOCKERY := $(GOPATH)/bin/mockery
TOOL_CONTROLLER_GEN := $(GOPATH)/bin/controller-gen
TOOL_GO_TO_PROTOBUF := $(GOPATH)/bin/go-to-protobuf
TOOL_PROTOC_GEN_GOGO := $(GOPATH)/bin/protoc-gen-gogo
TOOL_PROTOC_GEN_GOGOFAST := $(GOPATH)/bin/protoc-gen-gogofast
TOOL_PROTOC_GEN_GRPC_GATEWAY:= $(GOPATH)/bin/protoc-gen-grpc-gateway
TOOL_PROTOC_GEN_SWAGGER := $(GOPATH)/bin/protoc-gen-swagger
TOOL_OPENAPI_GEN := $(GOPATH)/bin/openapi-gen
TOOL_SWAGGER := $(GOPATH)/bin/swagger
TOOL_GOIMPORTS := $(GOPATH)/bin/goimports
TOOL_GOLANGCI_LINT := $(GOPATH)/bin/golangci-lint
# npm bin -g will do this on later npms than we have
NVM_BIN ?= $(shell npm config get prefix)/bin
TOOL_CLANG_FORMAT := /usr/local/bin/clang-format
TOOL_MDSPELL := $(NVM_BIN)/mdspell
TOOL_MARKDOWN_LINK_CHECK := $(NVM_BIN)/markdown-link-check
TOOL_MARKDOWNLINT := $(NVM_BIN)/markdownlint
TOOL_DEVCONTAINER := $(NVM_BIN)/devcontainer
TOOL_MKDOCS_DIR := $(HOME)/.venv/mkdocs
TOOL_MKDOCS := $(TOOL_MKDOCS_DIR)/bin/mkdocs
$(info GIT_COMMIT=$(GIT_COMMIT) GIT_BRANCH=$(GIT_BRANCH) GIT_TAG=$(GIT_TAG) GIT_TREE_STATE=$(GIT_TREE_STATE) RELEASE_TAG=$(RELEASE_TAG) DEV_BRANCH=$(DEV_BRANCH) VERSION=$(VERSION))
$(info KUBECTX=$(KUBECTX) K3D=$(K3D) DOCKER_PUSH=$(DOCKER_PUSH) TARGET_PLATFORM=$(TARGET_PLATFORM))
$(info RUN_MODE=$(RUN_MODE) PROFILE=$(PROFILE) AUTH_MODE=$(AUTH_MODE) SECURE=$(SECURE) STATIC_FILES=$(STATIC_FILES) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) LOG_LEVEL=$(LOG_LEVEL) NAMESPACED=$(NAMESPACED))
override LDFLAGS += \
-X github.com/argoproj/argo-workflows/v3.version=$(VERSION) \
-X github.com/argoproj/argo-workflows/v3.buildDate=$(BUILD_DATE) \
-X github.com/argoproj/argo-workflows/v3.gitCommit=$(GIT_COMMIT) \
-X github.com/argoproj/argo-workflows/v3.gitTreeState=$(GIT_TREE_STATE)
ifneq ($(GIT_TAG),)
override LDFLAGS += -X github.com/argoproj/argo-workflows/v3.gitTag=${GIT_TAG}
endif
# -- file lists
# These variables are only used as prereqs for the below targets, and we don't want to run them for other targets
# because the "go list" calls are very slow
ifneq (,$(filter dist/argoexec dist/workflow-controller dist/argo dist/argo-% docs/cli/argo.md,$(MAKECMDGOALS)))
HACK_PKG_FILES_AS_PKGS ?= false
ifeq ($(HACK_PKG_FILES_AS_PKGS),false)
ARGOEXEC_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-)
CLI_PKG_FILES := $(shell [ -f ui/dist/app/index.html ] || (mkdir -p ui/dist/app && touch ui/dist/app/placeholder); go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-)
CONTROLLER_PKG_FILES := $(shell go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | xargs go list -f '{{ range $$file := .GoFiles }}{{ print $$.ImportPath "/" $$file "\n" }}{{ end }}' | cut -c 39-)
else
# Building argoexec on windows cannot rebuild the openapi, we need to fall back to the old
# behaviour where we fake dependencies and therefore don't rebuild
ARGOEXEC_PKG_FILES := $(shell echo cmd/argoexec && go list -f '{{ join .Deps "\n" }}' ./cmd/argoexec/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-)
CLI_PKG_FILES := $(shell echo cmd/argo && go list -f '{{ join .Deps "\n" }}' ./cmd/argo/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-)
CONTROLLER_PKG_FILES := $(shell echo cmd/workflow-controller && go list -f '{{ join .Deps "\n" }}' ./cmd/workflow-controller/ | grep 'argoproj/argo-workflows/v3/' | cut -c 39-)
endif
else
ARGOEXEC_PKG_FILES :=
CLI_PKG_FILES :=
CONTROLLER_PKG_FILES :=
endif
TYPES := $(shell find pkg/apis/workflow/v1alpha1 -type f -name '*.go' -not -name openapi_generated.go -not -name '*generated*' -not -name '*test.go')
CRDS := $(shell find manifests/base/crds -type f -name 'argoproj.io_*.yaml')
SWAGGER_FILES := pkg/apiclient/_.primary.swagger.json \
pkg/apiclient/_.secondary.swagger.json \
pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \
pkg/apiclient/cronworkflow/cron-workflow.swagger.json \
pkg/apiclient/event/event.swagger.json \
pkg/apiclient/eventsource/eventsource.swagger.json \
pkg/apiclient/info/info.swagger.json \
pkg/apiclient/sensor/sensor.swagger.json \
pkg/apiclient/workflow/workflow.swagger.json \
pkg/apiclient/workflowarchive/workflow-archive.swagger.json \
pkg/apiclient/workflowtemplate/workflow-template.swagger.json
PROTO_BINARIES := $(TOOL_PROTOC_GEN_GOGO) $(TOOL_PROTOC_GEN_GOGOFAST) $(TOOL_GOIMPORTS) $(TOOL_PROTOC_GEN_GRPC_GATEWAY) $(TOOL_PROTOC_GEN_SWAGGER) $(TOOL_CLANG_FORMAT)
GENERATED_DOCS := docs/fields.md docs/cli/argo.md docs/workflow-controller-configmap.md
# protoc,my.proto
define protoc
# protoc $(1)
[ -e ./vendor ] || go mod vendor
protoc \
-I /usr/local/include \
-I $(CURDIR) \
-I $(CURDIR)/vendor \
-I $(GOPATH)/src \
-I $(GOPATH)/pkg/mod/github.com/gogo/protobuf@v1.3.2/gogoproto \
-I $(GOPATH)/pkg/mod/github.com/grpc-ecosystem/grpc-gateway@v1.16.0/third_party/googleapis \
--gogofast_out=plugins=grpc:$(GOPATH)/src \
--grpc-gateway_out=logtostderr=true:$(GOPATH)/src \
--swagger_out=logtostderr=true,fqn_for_swagger_name=true:. \
$(1)
perl -i -pe 's|argoproj/argo-workflows/|argoproj/argo-workflows/v3/|g' `echo "$(1)" | sed 's/proto/pb.go/g'`
endef
# cli
.PHONY: cli
cli: dist/argo
ui/dist/app/index.html: $(shell find ui/src -type f && find ui -maxdepth 1 -type f)
ifeq ($(STATIC_FILES),true)
# `yarn install` is fast (~2s), so you can call it safely.
JOBS=max yarn --cwd ui install
# `yarn build` is slow, so we guard it with a up-to-date check.
JOBS=max yarn --cwd ui build
else
@mkdir -p ui/dist/app
touch ui/dist/app/index.html
endif
dist/argo-linux-amd64: GOARGS = GOOS=linux GOARCH=amd64
dist/argo-linux-arm64: GOARGS = GOOS=linux GOARCH=arm64
dist/argo-linux-ppc64le: GOARGS = GOOS=linux GOARCH=ppc64le
dist/argo-linux-riscv64: GOARGS = GOOS=linux GOARCH=riscv64
dist/argo-linux-s390x: GOARGS = GOOS=linux GOARCH=s390x
dist/argo-darwin-amd64: GOARGS = GOOS=darwin GOARCH=amd64
dist/argo-darwin-arm64: GOARGS = GOOS=darwin GOARCH=arm64
dist/argo-windows-amd64: GOARGS = GOOS=windows GOARCH=amd64
dist/argo-windows-%.gz: dist/argo-windows-%
gzip --force --keep dist/argo-windows-$*.exe
dist/argo-windows-%: ui/dist/app/index.html $(CLI_PKG_FILES) go.sum
CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@.exe ./cmd/argo
dist/argo-%.gz: dist/argo-%
gzip --force --keep dist/argo-$*
dist/argo-%: ui/dist/app/index.html $(CLI_PKG_FILES) go.sum
CGO_ENABLED=0 $(GOARGS) go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo
dist/argo: ui/dist/app/index.html $(CLI_PKG_FILES) go.sum
ifeq ($(shell uname -s),Darwin)
# if local, then build fast: use CGO and dynamic-linking
go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS}' -o $@ ./cmd/argo
else
CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argo
endif
argocli-image:
.PHONY: clis
clis: dist/argo-linux-amd64.gz dist/argo-linux-arm64.gz dist/argo-linux-ppc64le.gz dist/argo-linux-riscv64.gz dist/argo-linux-s390x.gz dist/argo-darwin-amd64.gz dist/argo-darwin-arm64.gz dist/argo-windows-amd64.gz
# controller
.PHONY: controller
controller: dist/workflow-controller
dist/workflow-controller: $(CONTROLLER_PKG_FILES) go.sum
ifeq ($(shell uname -s),Darwin)
# if local, then build fast: use CGO and dynamic-linking
go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS}' -o $@ ./cmd/workflow-controller
else
CGO_ENABLED=0 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/workflow-controller
endif
workflow-controller-image:
# argoexec
dist/argoexec: $(ARGOEXEC_PKG_FILES) go.sum
ifeq ($(shell uname -s),Darwin)
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -gcflags '${GCFLAGS}' -v -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec
else
CGO_ENABLED=0 go build -v -gcflags '${GCFLAGS}' -ldflags '${LDFLAGS} -extldflags -static' -o $@ ./cmd/argoexec
endif
argoexec-image:
argoexec-nonroot-image:
%-image:
[ ! -e dist/$* ] || mv dist/$* .
# Special handling for argoexec-nonroot to create argoexec:VERSION-nonroot instead of argoexec-nonroot:VERSION
if [ "$*" = "argoexec-nonroot" ]; then \
image_name="$(IMAGE_NAMESPACE)/argoexec:$(VERSION)-nonroot"; \
else \
image_name="$(IMAGE_NAMESPACE)/$*:$(VERSION)"; \
fi; \
docker buildx build \
--platform $(TARGET_PLATFORM) \
--build-arg GIT_COMMIT=$(GIT_COMMIT) \
--build-arg GIT_TAG=$(GIT_TAG) \
--build-arg GIT_TREE_STATE=$(GIT_TREE_STATE) \
-t $$image_name \
--target $* \
--load \
.; \
[ ! -e $* ] || mv $* dist/; \
docker run --rm -t $$image_name version; \
if [ $(K3D) = true ]; then \
k3d image import -c $(K3D_CLUSTER_NAME) $$image_name; \
fi; \
if [ $(DOCKER_PUSH) = true ] && [ $(IMAGE_NAMESPACE) != argoproj ] ; then \
docker push $$image_name; \
fi
.PHONY: codegen
codegen: types swagger manifests $(TOOL_MOCKERY) $(GENERATED_DOCS)
go generate ./...
# The generated markdown contains links to nowhere for interfaces, so remove them
sed -i.bak 's/\[interface{}\](#interface)/`interface{}`/g' docs/executor_swagger.md && rm -f docs/executor_swagger.md.bak
make --directory sdks/java USE_NIX=$(USE_NIX) generate
make --directory sdks/python USE_NIX=$(USE_NIX) generate
.PHONY: check-pwd
check-pwd:
ifneq ($(SRC),$(PWD))
@echo "⚠️ Code generation will not work if code in not checked out into $(SRC)" >&2
endif
.PHONY: types
types: check-pwd pkg/apis/workflow/v1alpha1/generated.proto pkg/apis/workflow/v1alpha1/openapi_generated.go pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go
.PHONY: swagger
swagger: \
pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json \
pkg/apiclient/cronworkflow/cron-workflow.swagger.json \
pkg/apiclient/event/event.swagger.json \
pkg/apiclient/eventsource/eventsource.swagger.json \
pkg/apiclient/info/info.swagger.json \
pkg/apiclient/sensor/sensor.swagger.json \
pkg/apiclient/workflow/workflow.swagger.json \
pkg/apiclient/workflowarchive/workflow-archive.swagger.json \
pkg/apiclient/workflowtemplate/workflow-template.swagger.json \
manifests/base/crds/full/argoproj.io_workflows.yaml \
manifests \
api/openapi-spec/swagger.json \
api/jsonschema/schema.json
$(TOOL_MOCKERY): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install github.com/vektra/mockery/v2@v2.53.3
endif
$(TOOL_CONTROLLER_GEN): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.18.0
endif
$(TOOL_GO_TO_PROTOBUF): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
# TODO: currently fails on v0.30.3 with
# Unable to clean package k8s.io.api.core.v1: remove /home/runner/go/pkg/mod/k8s.io/api@v0.30.3/core/v1/generated.proto: permission denied
go install k8s.io/code-generator/cmd/go-to-protobuf@v0.21.5
endif
$(GOPATH)/src/github.com/gogo/protobuf: Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
[ -e $@ ] || git clone --depth 1 https://github.com/gogo/protobuf.git -b v1.3.2 $@
endif
$(TOOL_PROTOC_GEN_GOGO): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install github.com/gogo/protobuf/protoc-gen-gogo@v1.3.2
endif
$(TOOL_PROTOC_GEN_GOGOFAST): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install github.com/gogo/protobuf/protoc-gen-gogofast@v1.3.2
endif
$(TOOL_PROTOC_GEN_GRPC_GATEWAY): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.16.0
endif
$(TOOL_PROTOC_GEN_SWAGGER): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@v1.16.0
endif
$(TOOL_OPENAPI_GEN): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install k8s.io/kube-openapi/cmd/openapi-gen@v0.0.0-20220124234850-424119656bbf
endif
$(TOOL_SWAGGER): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install github.com/go-swagger/go-swagger/cmd/swagger@v0.31.0
endif
$(TOOL_GOIMPORTS): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
go install golang.org/x/tools/cmd/goimports@v0.1.7
endif
$(TOOL_CLANG_FORMAT):
ifeq (, $(shell which clang-format))
ifeq ($(shell uname),Darwin)
brew install clang-format
else
sudo apt update
sudo apt install -y clang-format
endif
endif
# go-to-protobuf fails with mysterious errors on code that doesn't compile, hence lint-go as a dependency here
pkg/apis/workflow/v1alpha1/generated.proto: $(TOOL_GO_TO_PROTOBUF) $(PROTO_BINARIES) $(TYPES) $(GOPATH)/src/github.com/gogo/protobuf lint-go
# These files are generated on a v3/ folder by the tool. Link them to the root folder
[ -e ./v3 ] || ln -s . v3
# Format proto files. Formatting changes generated code, so we do it here, rather that at lint time.
# Why clang-format? Google uses it.
find pkg/apiclient -name '*.proto'|xargs clang-format -i
$(TOOL_GO_TO_PROTOBUF) \
--go-header-file=./hack/custom-boilerplate.go.txt \
--packages=github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \
--apimachinery-packages=+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1 \
--proto-import $(GOPATH)/src
# Delete the link
[ -e ./v3 ] && rm -rf v3
touch pkg/apis/workflow/v1alpha1/generated.proto
# this target will also create a .pb.go and a .pb.gw.go file, but in Make 3 we cannot use _grouped target_, instead we must choose
# on file to represent all of them
pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto
$(call protoc,pkg/apiclient/clusterworkflowtemplate/cluster-workflow-template.proto)
pkg/apiclient/cronworkflow/cron-workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/cronworkflow/cron-workflow.proto
$(call protoc,pkg/apiclient/cronworkflow/cron-workflow.proto)
pkg/apiclient/event/event.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/event/event.proto
$(call protoc,pkg/apiclient/event/event.proto)
pkg/apiclient/eventsource/eventsource.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/eventsource/eventsource.proto
$(call protoc,pkg/apiclient/eventsource/eventsource.proto)
pkg/apiclient/info/info.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/info/info.proto
$(call protoc,pkg/apiclient/info/info.proto)
pkg/apiclient/sensor/sensor.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/sensor/sensor.proto
$(call protoc,pkg/apiclient/sensor/sensor.proto)
pkg/apiclient/workflow/workflow.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflow/workflow.proto
$(call protoc,pkg/apiclient/workflow/workflow.proto)
pkg/apiclient/workflowarchive/workflow-archive.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowarchive/workflow-archive.proto
$(call protoc,pkg/apiclient/workflowarchive/workflow-archive.proto)
pkg/apiclient/workflowtemplate/workflow-template.swagger.json: $(PROTO_BINARIES) $(TYPES) pkg/apiclient/workflowtemplate/workflow-template.proto
$(call protoc,pkg/apiclient/workflowtemplate/workflow-template.proto)
# generate other files for other CRDs
manifests/base/crds/full/argoproj.io_workflows.yaml: $(TOOL_CONTROLLER_GEN) $(TYPES) ./hack/manifests/crdgen.sh ./hack/manifests/crds.go
./hack/manifests/crdgen.sh
.PHONY: manifests
manifests: \
manifests/install.yaml \
manifests/namespace-install.yaml \
manifests/quick-start-minimal.yaml \
manifests/quick-start-mysql.yaml \
manifests/quick-start-postgres.yaml \
dist/manifests/install.yaml \
dist/manifests/namespace-install.yaml \
dist/manifests/quick-start-minimal.yaml \
dist/manifests/quick-start-mysql.yaml \
dist/manifests/quick-start-postgres.yaml
.PHONY: manifests/install.yaml
manifests/install.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/cluster-install | ./hack/manifests/auto-gen-msg.sh > manifests/install.yaml
.PHONY: manifests/namespace-install.yaml
manifests/namespace-install.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/namespace-install | ./hack/manifests/auto-gen-msg.sh > manifests/namespace-install.yaml
.PHONY: manifests/quick-start-minimal.yaml
manifests/quick-start-minimal.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/minimal | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-minimal.yaml
.PHONY: manifests/quick-start-mysql.yaml
manifests/quick-start-mysql.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/mysql | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-mysql.yaml
.PHONY: manifests/quick-start-postgres.yaml
manifests/quick-start-postgres.yaml: /dev/null
kubectl kustomize --load-restrictor=LoadRestrictionsNone manifests/quick-start/postgres | ./hack/manifests/auto-gen-msg.sh > manifests/quick-start-postgres.yaml
dist/manifests/%: manifests/%
@mkdir -p dist/manifests
sed 's/:latest/:$(VERSION)/' manifests/$* > $@
# lint/test/etc
.PHONE: manifests-validate
manifests-validate:
kubectl apply --server-side --validate=strict --dry-run=server -f 'manifests/*.yaml'
$(TOOL_GOLANGCI_LINT): Makefile
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v2.1.6
.PHONY: lint lint-go lint-ui
lint: lint-go lint-ui features-validate
lint-go: $(TOOL_GOLANGCI_LINT) ui/dist/app/index.html
rm -Rf v3 vendor
# If you're using `woc.wf.Spec` or `woc.execWf.Status` your code probably won't work with WorkflowTemplate.
# * Change `woc.wf.Spec` to `woc.execWf.Spec`.
# * Change `woc.execWf.Status` to `woc.wf.Status`.
@awk '(/woc.wf.Spec/ || /woc.execWf.Status/) && !/not-woc-misuse/ {print FILENAME ":" FNR "\t" $0 ; exit 1}' $(shell find workflow/controller -type f -name '*.go' -not -name '*test*')
# Tidy Go modules
go mod tidy
# Lint Go files
$(TOOL_GOLANGCI_LINT) run --fix --verbose
lint-ui: ui/dist/app/index.html
# Lint the UI
if [ -e ui/node_modules ]; then yarn --cwd ui lint ; fi
# Deduplicate Node modules
if [ -e ui/node_modules ]; then yarn --cwd ui deduplicate ; fi
# for local we have a faster target that prints to stdout, does not use json, and can cache because it has no coverage
.PHONY: test
test: ui/dist/app/index.html util/telemetry/metrics_list.go util/telemetry/attributes.go
go build ./...
env KUBECONFIG=/dev/null $(GOTEST) ./...
# marker file, based on it's modification time, we know how long ago this target was run
@mkdir -p dist
touch dist/test
.PHONY: install
install: githooks
kubectl get ns $(KUBE_NAMESPACE) || kubectl create ns $(KUBE_NAMESPACE)
kubectl config set-context --current --namespace=$(KUBE_NAMESPACE)
@echo "installing PROFILE=$(PROFILE)"
kubectl kustomize --load-restrictor=LoadRestrictionsNone test/e2e/manifests/$(PROFILE) \
| sed 's|quay.io/argoproj/|$(IMAGE_NAMESPACE)/|' \
| sed 's/namespace: argo/namespace: $(KUBE_NAMESPACE)/' \
| KUBECTL_APPLYSET=true kubectl -n $(KUBE_NAMESPACE) apply --applyset=configmaps/install --server-side --prune -f -
ifeq ($(PROFILE),stress)
kubectl -n $(KUBE_NAMESPACE) apply -f test/stress/massive-workflow.yaml
endif
ifeq ($(UI_SECURE)$(PROFILE),truesso)
KUBE_NAMESPACE=$(KUBE_NAMESPACE) ./hack/update-sso-redirect-url.sh
endif
.PHONY: argosay
argosay:
ifeq ($(DOCKER_PUSH),true)
cd test/e2e/images/argosay/v2 && \
docker buildx build \
--platform linux/amd64,linux/arm64 \
-t argoproj/argosay:v2 \
--push \
.
else
cd test/e2e/images/argosay/v2 && \
docker build . -t argoproj/argosay:v2
endif
ifeq ($(K3D),true)
k3d image import -c $(K3D_CLUSTER_NAME) argoproj/argosay:v2
endif
.PHONY: argosayv1
argosayv1:
ifeq ($(DOCKER_PUSH),true)
cd test/e2e/images/argosay/v1 && \
docker buildx build \
--platform linux/amd64,linux/arm64 \
-t argoproj/argosay:v1 \
--push \
.
else
cd test/e2e/images/argosay/v1 && \
docker build . -t argoproj/argosay:v1
endif
dist/argosay:
mkdir -p dist
cp test/e2e/images/argosay/v2/argosay dist/
.PHONY: kit
kit: Makefile
go install github.com/kitproj/kit@v0.1.79
.PHONY: start
ifeq ($(RUN_MODE),local)
start: kit
else
start: install kit
endif
@echo "starting STATIC_FILES=$(STATIC_FILES) (DEV_BRANCH=$(DEV_BRANCH), GIT_BRANCH=$(GIT_BRANCH)), AUTH_MODE=$(AUTH_MODE), RUN_MODE=$(RUN_MODE), MANAGED_NAMESPACE=$(MANAGED_NAMESPACE)"
ifneq ($(API),true)
@echo "⚠️️ not starting API. If you want to test the API, use 'make start API=true' to start it"
endif
ifneq ($(UI),true)
@echo "⚠️ not starting UI. If you want to test the UI, run 'make start UI=true' to start it"
endif
ifneq ($(PLUGINS),true)
@echo "⚠️ not starting plugins. If you want to test plugins, run 'make start PROFILE=plugins' to start it"
endif
# Check dex, minio, postgres and mysql are in hosts file
ifeq ($(AUTH_MODE),sso)
grep '127.0.0.1.*dex' /etc/hosts
endif
grep '127.0.0.1.*azurite' /etc/hosts
grep '127.0.0.1.*minio' /etc/hosts
grep '127.0.0.1.*postgres' /etc/hosts
grep '127.0.0.1.*mysql' /etc/hosts
ifeq ($(RUN_MODE),local)
env DEFAULT_REQUEUE_TIME=$(DEFAULT_REQUEUE_TIME) ARGO_SECURE=$(SECURE) ALWAYS_OFFLOAD_NODE_STATUS=$(ALWAYS_OFFLOAD_NODE_STATUS) ARGO_LOGLEVEL=$(LOG_LEVEL) UPPERIO_DB_DEBUG=$(UPPERIO_DB_DEBUG) ARGO_AUTH_MODE=$(AUTH_MODE) ARGO_NAMESPACED=$(NAMESPACED) ARGO_NAMESPACE=$(KUBE_NAMESPACE) ARGO_MANAGED_NAMESPACE=$(MANAGED_NAMESPACE) ARGO_EXECUTOR_PLUGINS=$(PLUGINS) ARGO_POD_STATUS_CAPTURE_FINALIZER=$(POD_STATUS_CAPTURE_FINALIZER) ARGO_UI_SECURE=$(UI_SECURE) PROFILE=$(PROFILE) kit $(TASKS)
endif
.PHONY: wait
wait:
# Wait for workflow controller
until lsof -i :9090 > /dev/null ; do sleep 10s ; done
ifeq ($(API),true)
# Wait for Argo Server
until lsof -i :2746 > /dev/null ; do sleep 10s ; done
endif
ifeq ($(PROFILE),mysql)
# Wait for MySQL
until (: < /dev/tcp/localhost/3306) ; do sleep 10s ; done
endif
.PHONY: postgres-cli
postgres-cli:
kubectl exec -ti svc/postgres -- psql -U postgres
.PHONY: postgres-dump
postgres-dump:
@mkdir -p db-dumps
kubectl exec svc/postgres -- pg_dump --clean -U postgres > "db-dumps/postgres-$(BUILD_DATE).sql"
.PHONY: mysql-cli
mysql-cli:
kubectl exec -ti svc/mysql -- mysql -u mysql -ppassword argo
.PHONY: mysql-dump
mysql-dump:
@mkdir -p db-dumps
kubectl exec svc/mysql -- mysqldump --no-tablespaces -u mysql -ppassword argo > "db-dumps/mysql-$(BUILD_DATE).sql"
test-cli: ./dist/argo
test-%:
E2E_WAIT_TIMEOUT=$(E2E_WAIT_TIMEOUT) go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags $* -parallel $(E2E_PARALLEL) ./test/e2e
.PHONY: test-%-sdk
test-%-sdk:
make --directory sdks/$* install test -B
Test%:
E2E_WAIT_TIMEOUT=$(E2E_WAIT_TIMEOUT) go test -failfast -v -timeout $(E2E_SUITE_TIMEOUT) -count 1 --tags $(ALL_BUILD_TAGS) -parallel $(E2E_PARALLEL) ./test/e2e -run='.*/$*'
Benchmark%:
go test --tags $(ALL_BUILD_TAGS) ./test/e2e -run='$@' -benchmem -count=$(BENCHMARK_COUNT) -bench .
# clean
.PHONY: clean
clean:
go clean
rm -Rf test-results node_modules vendor v2 v3 argoexec-linux-amd64 dist/* ui/dist
# Build telemetry files
TELEMETRY_BUILDER := $(shell find util/telemetry/builder -type f -name '*.go')
docs/metrics.md: $(TELEMETRY_BUILDER) util/telemetry/builder/values.yaml
@echo Rebuilding $@
go run ./util/telemetry/builder --metricsDocs $@
util/telemetry/metrics_list.go: $(TELEMETRY_BUILDER) util/telemetry/builder/values.yaml
@echo Rebuilding $@
go run ./util/telemetry/builder --metricsListGo $@
util/telemetry/attributes.go: $(TELEMETRY_BUILDER) util/telemetry/builder/values.yaml
@echo Rebuilding $@
go run ./util/telemetry/builder --attributesGo $@
# swagger
pkg/apis/workflow/v1alpha1/openapi_generated.go: $(TOOL_OPENAPI_GEN) $(TYPES)
# These files are generated on a v3/ folder by the tool. Link them to the root folder
[ -e ./v3 ] || ln -s . v3
$(TOOL_OPENAPI_GEN) \
--go-header-file ./hack/custom-boilerplate.go.txt \
--input-dirs github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \
--output-package github.com/argoproj/argo-workflows/v3/pkg/apis/workflow/v1alpha1 \
--report-filename pkg/apis/api-rules/violation_exceptions.list
# Force the timestamp to be up to date
touch $@
# Delete the link
[ -e ./v3 ] && rm -rf v3
# generates many other files (listers, informers, client etc).
.PRECIOUS: pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go
pkg/apis/workflow/v1alpha1/zz_generated.deepcopy.go: $(TOOL_GO_TO_PROTOBUF) $(TYPES)
# These files are generated on a v3/ folder by the tool. Link them to the root folder
[ -e ./v3 ] || ln -s . v3
bash $(GOPATH)/pkg/mod/k8s.io/code-generator@v0.21.5/generate-groups.sh \
"deepcopy,client,informer,lister" \
github.com/argoproj/argo-workflows/v3/pkg/client github.com/argoproj/argo-workflows/v3/pkg/apis \
workflow:v1alpha1 \
--go-header-file ./hack/custom-boilerplate.go.txt
# Force the timestamp to be up to date
touch $@
# Delete the link
[ -e ./v3 ] && rm -rf v3
dist/kubernetes.swagger.json: Makefile
@mkdir -p dist
# recurl will only fetch if the file doesn't exist, so delete it
rm -f $@
./hack/recurl.sh $@ https://raw.githubusercontent.com/kubernetes/kubernetes/v1.33.1/api/openapi-spec/swagger.json
pkg/apiclient/_.secondary.swagger.json: hack/api/swagger/secondaryswaggergen.go pkg/apis/workflow/v1alpha1/openapi_generated.go dist/kubernetes.swagger.json
rm -Rf v3 vendor
# We have `hack/api/swagger` so that most hack script do not depend on the whole code base and are therefore slow.
go run ./hack/api/swagger secondaryswaggergen
# we always ignore the conflicts, so lets automated figuring out how many there will be and just use that
dist/swagger-conflicts: $(TOOL_SWAGGER) $(SWAGGER_FILES)
swagger mixin $(SWAGGER_FILES) 2>&1 | grep -c skipping > dist/swagger-conflicts || true
dist/mixed.swagger.json: $(TOOL_SWAGGER) $(SWAGGER_FILES) dist/swagger-conflicts
swagger mixin -c $(shell cat dist/swagger-conflicts) $(SWAGGER_FILES) -o dist/mixed.swagger.json
dist/swaggifed.swagger.json: dist/mixed.swagger.json hack/api/swagger/swaggify.sh
cat dist/mixed.swagger.json | ./hack/api/swagger/swaggify.sh > dist/swaggifed.swagger.json
dist/kubeified.swagger.json: dist/swaggifed.swagger.json dist/kubernetes.swagger.json
go run ./hack/api/swagger kubeifyswagger dist/swaggifed.swagger.json dist/kubeified.swagger.json
dist/swagger.0.json: $(TOOL_SWAGGER) dist/kubeified.swagger.json
$(TOOL_SWAGGER) flatten --with-flatten minimal --with-flatten remove-unused dist/kubeified.swagger.json -o dist/swagger.0.json
api/openapi-spec/swagger.json: $(TOOL_SWAGGER) dist/swagger.0.json
$(TOOL_SWAGGER) flatten --with-flatten remove-unused dist/swagger.0.json -o api/openapi-spec/swagger.json
api/jsonschema/schema.json: api/openapi-spec/swagger.json hack/api/jsonschema/main.go
go run ./hack/api/jsonschema
go-diagrams/diagram.dot: ./hack/docs/diagram.go
rm -Rf go-diagrams
go run ./hack/docs diagram
docs/assets/diagram.png: go-diagrams/diagram.dot
cd go-diagrams && dot -Tpng diagram.dot -o ../docs/assets/diagram.png
docs/fields.md: api/openapi-spec/swagger.json $(shell find examples -type f) ui/dist/app/index.html hack/docs/fields.go
env ARGO_SECURE=false ARGO_INSECURE_SKIP_VERIFY=false ARGO_SERVER= ARGO_INSTANCEID= go run ./hack/docs fields
docs/workflow-controller-configmap.md: config/*.go hack/docs/workflow-controller-configmap.md hack/docs/configdoc.go
go run ./hack/docs configdoc
# generates several other files
docs/cli/argo.md: $(CLI_PKG_FILES) go.sum ui/dist/app/index.html hack/docs/cli.go
go run ./hack/docs cli
$(TOOL_MDSPELL): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
npm list -g markdown-spellcheck@1.3.1 > /dev/null || npm i -g markdown-spellcheck@1.3.1
endif
.PHONY: docs-spellcheck
docs-spellcheck: $(TOOL_MDSPELL) docs/metrics.md
# check docs for spelling mistakes
$(TOOL_MDSPELL) --ignore-numbers --ignore-acronyms --en-us --no-suggestions --report $(shell find docs -name '*.md' -not -name upgrading.md -not -name README.md -not -name fields.md -not -name workflow-controller-configmap.md -not -name upgrading.md -not -name executor_swagger.md -not -path '*/cli/*' -not -name tested-kubernetes-versions.md)
# alphabetize spelling file -- ignore first line (comment), then sort the rest case-sensitive and remove duplicates
$(shell cat .spelling | awk 'NR<2{ print $0; next } { print $0 | "LC_COLLATE=C sort" }' | uniq > .spelling.tmp && mv .spelling.tmp .spelling)
$(TOOL_MARKDOWN_LINK_CHECK): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
npm list -g markdown-link-check@3.11.1 > /dev/null || npm i -g markdown-link-check@3.11.1
endif
.PHONY: docs-linkcheck
docs-linkcheck: $(TOOL_MARKDOWN_LINK_CHECK)
# check docs for broken links
$(TOOL_MARKDOWN_LINK_CHECK) -q -c .mlc_config.json $(shell find docs -name '*.md' -not -name fields.md -not -name executor_swagger.md)
$(TOOL_MARKDOWNLINT): Makefile
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
npm list -g markdownlint-cli@0.33.0 > /dev/null || npm i -g markdownlint-cli@0.33.0
endif
.PHONY: docs-lint
docs-lint: $(TOOL_MARKDOWNLINT) docs/metrics.md
# lint docs
$(TOOL_MARKDOWNLINT) docs --fix --ignore docs/fields.md --ignore docs/executor_swagger.md --ignore docs/cli --ignore docs/walk-through/the-structure-of-workflow-specs.md --ignore docs/tested-kubernetes-versions.md
$(TOOL_MKDOCS): docs/requirements.txt
# update this in Nix when upgrading it here
ifneq ($(USE_NIX), true)
python3 -m venv $(TOOL_MKDOCS_DIR)
$(TOOL_MKDOCS_DIR)/bin/pip install --no-cache-dir -r $<
endif
.PHONY: docs
docs: $(TOOL_MKDOCS) \
docs-spellcheck \
docs-lint \
# TODO: This is temporarily disabled to unblock merging PRs.
# docs-linkcheck
# copy README.md to docs/README.md
./hack/docs/copy-readme.sh
# check environment-variables.md contains all variables mentioned in the code
./hack/docs/check-env-doc.sh
# build the docs
ifeq ($(shell echo $(GIT_BRANCH) | head -c 8),release-)
./hack/docs/tested-versions.sh > docs/tested-kubernetes-versions.md
endif
TZ=UTC $(TOOL_MKDOCS) build --strict
# tell the user the fastest way to edit docs
@echo " If you want to preview your docs, open site/index.html. If you want to edit them with hot-reload, run 'make docs-serve' to start mkdocs on port 8000"
.PHONY: docs-serve
docs-serve: docs
$(TOOL_MKDOCS) serve
# pre-commit checks
.git/hooks/%: hack/git/hooks/%
@mkdir -p .git/hooks
cp hack/git/hooks/$* .git/hooks/$*
.PHONY: githooks
githooks: .git/hooks/pre-commit .git/hooks/commit-msg
.PHONY: pre-commit
pre-commit: codegen lint docs
# marker file, based on it's modification time, we know how long ago this target was run
touch dist/pre-commit
# release
release-notes: /dev/null
version=$(VERSION) envsubst '$$version' < hack/release-notes.md > release-notes
.PHONY: checksums
checksums:
sha256sum ./dist/argo-*.gz | awk -F './dist/' '{print $$1 $$2}' > ./dist/argo-workflows-cli-checksums.txt
# feature notes
FEATURE_FILENAME?=$(shell git branch --show-current)
.PHONY: feature-new
feature-new: hack/featuregen/featuregen
# Create a new feature documentation file in .features/pending/ ready for editing
# Uses the current branch name as the filename by default, or specify with FEATURE_FILENAME=name
$< new --filename $(FEATURE_FILENAME)
.PHONY: features-validate
features-validate: hack/featuregen/featuregen
# Validate all pending feature documentation files
$< validate
.PHONY: features-preview
features-preview: hack/featuregen/featuregen
# Preview how the features will appear in the documentation (dry run)
# Output to stdout
$< update --dry
.PHONY: features-update
features-update: hack/featuregen/featuregen
# Update the features documentation, but keep the feature files in the pending directory
# Updates docs/new-features.md for release-candidates
$< update --version $(VERSION)
.PHONY: features-release
features-release: hack/featuregen/featuregen
# Update the features documentation AND move the feature files to the released directory
# Use this for the final update when releasing a version
$< update --version $(VERSION) --final
hack/featuregen/featuregen: hack/featuregen/main.go hack/featuregen/contents.go hack/featuregen/contents_test.go hack/featuregen/main_test.go
go test ./hack/featuregen
go build -o $@ ./hack/featuregen
# dev container
$(TOOL_DEVCONTAINER): Makefile
npm list -g @devcontainers/cli@0.75.0 > /dev/null || npm i -g @devcontainers/cli@0.75.0
.PHONY: devcontainer-build
devcontainer-build: $(TOOL_DEVCONTAINER)
devcontainer build \
--workspace-folder . \
--config .devcontainer/builder/devcontainer.json \
--platform $(TARGET_PLATFORM) \
--image-name $(DEVCONTAINER_IMAGE) \
--cache-from $(DEVCONTAINER_IMAGE):cache \
$(DEVCONTAINER_FLAGS)
.PHONY: devcontainer-up
devcontainer-up: $(TOOL_DEVCONTAINER)
devcontainer up --workspace-folder .

17
OWNERS Normal file
View File

@ -0,0 +1,17 @@
owners:
- joibel
- sarabala1979
- terrytangyuan
approvers:
- alexec
- alexmt
- edlee2121
- isubasinghe
- jessesuen
- juliev0
- tczhao
reviewers:
- jswxstw
- shuangkun

186
README.md
View File

@ -1,43 +1,165 @@
# Argo - The Workflow Engine for Kubernetes
<!-- markdownlint-disable-next-line MD041 -->
[![Security Status](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml/badge.svg?branch=main)](https://github.com/argoproj/argo-workflows/actions/workflows/snyk.yml?query=branch%3Amain)
[![OpenSSF Best Practices](https://bestpractices.coreinfrastructure.org/projects/3830/badge)](https://bestpractices.coreinfrastructure.org/projects/3830)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-workflows)
[![FOSSA License Status](https://app.fossa.com/api/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows.svg?type=shield)](https://app.fossa.com/projects/git%2Bgithub.com%2Fargoproj%2Fargo-workflows?ref=badge_shield)
[![Slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack)
[![X Follow](https://img.shields.io/twitter/follow/argoproj?style=social)](https://x.com/argoproj)
[![LinkedIn](https://img.shields.io/badge/LinkedIn-argoproj-blue.svg?logo=linkedin)](https://www.linkedin.com/company/argoproj/)
[![Release Version](https://img.shields.io/github/v/release/argoproj/argo-workflows?label=argo-workflows)](https://github.com/argoproj/argo-workflows/releases/latest)
[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-workflows)](https://artifacthub.io/packages/helm/argo/argo-workflows)
![Argo Image](argo.png)
## What is Argo Workflows?
## What is Argo?
Argo is an open source container-native workflow engine for developing and running applications on Kubernetes.
* Define workflows where each step in the workflow is a container.
* Run rich CI/CD workflows using Docker-in-Docker, complex testing with built in artifact management, secret management and lifecycle management of dev/test resources.
* Run compute intensive jobs in a fraction of the time using parallelize workflows.
* Build, test and deploy scalable stateful and stateless cloud-native apps and microservices.
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes.
Argo Workflows is implemented as a Kubernetes CRD (Custom Resource Definition).
## Why Argo?
* Argo is designed from the ground up for containers without the baggage and limitations of legacy VM and server-based environments.
* Argo is cloud agnostic. Today we support AWS and GKE (alpha) with additional platforms coming soon.
* Argo with Kubernetes puts a cloud-scale supercomputer at your fingertips.
* With Argo, you dont need to install or learn other tools such as Jenkins, Chef, Cloud Formation... 
* Define workflows where each step is a container.
* Model multi-step workflows as a sequence of tasks or capture the dependencies between tasks using a directed acyclic graph (DAG).
* Easily run compute intensive jobs for machine learning or data processing in a fraction of the time using Argo Workflows on Kubernetes.
## Getting started
Argo is a [Cloud Native Computing Foundation (CNCF)](https://cncf.io/) graduated project.
### Step 1: Download and install Argo
## Use Cases
https://applatix.com/open-source/argo/get-started/installation
* [Machine Learning pipelines](https://argo-workflows.readthedocs.io/en/latest/use-cases/machine-learning/)
* [Data and batch processing](https://argo-workflows.readthedocs.io/en/latest/use-cases/data-processing/)
* [Infrastructure automation](https://argo-workflows.readthedocs.io/en/latest/use-cases/infrastructure-automation/)
* [CI/CD](https://argo-workflows.readthedocs.io/en/latest/use-cases/ci-cd/)
* [Other use cases](https://argo-workflows.readthedocs.io/en/latest/use-cases/other/)
### Step 2: Create and submit jobs
## Why Argo Workflows?
https://blog.argoproj.io/argo-workflow-demo-at-the-kubernetes-community-meeting-c428c3c93f9d
* Argo Workflows is the most popular workflow execution engine for Kubernetes.
* Light-weight, scalable, and easier to use.
* Designed from the ground up for containers without the overhead and limitations of legacy VM and server-based environments.
* Cloud agnostic and can run on any Kubernetes cluster.
## Main Features
* Container-native workflows for Kubernetes.
* Each step in the workflow is a container
* Arbitrarily compose sub-workflows to create larger workflows
* No need to install or learn other tools such as Jenkins, Chef, Cloud Formation
* Configuration as code (YAML for everything)
* Built-in support for artifacts, persistent volumes, and DNS/load-balancers/firewalls.
* DinD (Docker-in-Docker) out of the box. Run docker builds and other containers from within containerized workflows.
* "Cashboard" shows cost of running a workflow. Also, spending per user and application.
* Managed fixtures.
[Read what people said in our latest survey](https://blog.argoproj.io/argo-workflows-events-2023-user-survey-results-82c53bc30543)
## Resources
* Argo website: https://argoproj.github.io/argo-site
* Argo GitHub: https://github.com/argoproj
* Argo forum: https://groups.google.com/forum/#!forum/argoproj
## Try Argo Workflows
You can try Argo Workflows via one of the following:
1. [Interactive Training Material](https://killercoda.com/argoproj/course/argo-workflows/)
1. [Access the demo environment](https://workflows.apps.argoproj.io/workflows/argo)
![Screenshot](docs/assets/screenshot.png)
## Who uses Argo Workflows?
[About 200+ organizations are officially using Argo Workflows](USERS.md)
## Ecosystem
Just some of the projects that use or rely on Argo Workflows (complete list [here](https://github.com/akuity/awesome-argo#ecosystem-projects)):
* [Argo Events](https://github.com/argoproj/argo-events)
* [Couler](https://github.com/couler-proj/couler)
* [Hera](https://github.com/argoproj-labs/hera-workflows)
* [Katib](https://github.com/kubeflow/katib)
* [Kedro](https://kedro.readthedocs.io/en/stable/)
* [Kubeflow Pipelines](https://github.com/kubeflow/pipelines)
* [Netflix Metaflow](https://metaflow.org)
* [Onepanel](https://github.com/onepanelio/onepanel)
* [Orchest](https://github.com/orchest/orchest/)
* [Piper](https://github.com/quickube/piper)
* [Ploomber](https://github.com/ploomber/ploomber)
* [Seldon](https://github.com/SeldonIO/seldon-core)
* [SQLFlow](https://github.com/sql-machine-learning/sqlflow)
## Client Libraries
Check out our [Java, Golang and Python clients](docs/client-libraries.md).
## Quickstart
* [Get started here](https://argo-workflows.readthedocs.io/en/latest/quick-start/)
* [Walk-through examples](https://argo-workflows.readthedocs.io/en/latest/walk-through/)
## Documentation
[View the docs](https://argo-workflows.readthedocs.io/en/latest/)
## Features
An incomplete list of features Argo Workflows provide:
* UI to visualize and manage Workflows
* Artifact support (S3, Artifactory, Alibaba Cloud OSS, Azure Blob Storage, HTTP, Git, GCS, raw)
* Workflow templating to store commonly used Workflows in the cluster
* Archiving Workflows after executing for later access
* Scheduled workflows using cron
* Server interface with REST API (HTTP and GRPC)
* DAG or Steps based declaration of workflows
* Step level input & outputs (artifacts/parameters)
* Loops
* Parameterization
* Conditionals
* Timeouts (step & workflow level)
* Retry (step & workflow level)
* Resubmit (memoized)
* Suspend & Resume
* Cancellation
* K8s resource orchestration
* Exit Hooks (notifications, cleanup)
* Garbage collection of completed workflow
* Scheduling (affinity/tolerations/node selectors)
* Volumes (ephemeral/existing)
* Parallelism limits
* Daemoned steps
* DinD (docker-in-docker)
* Script steps
* Event emission
* Prometheus metrics
* Multiple executors
* Multiple pod and workflow garbage collection strategies
* Automatically calculated resource usage per step
* Java/Golang/Python SDKs
* Pod Disruption Budget support
* Single-sign on (OAuth2/OIDC)
* Webhook triggering
* CLI
* Out-of-the box and custom Prometheus metrics
* Windows container support
* Embedded widgets
* Multiplex log viewer
## Community Meetings
We host monthly community meetings where we and the community showcase demos and discuss the current and future state of the project. Feel free to join us!
For Community Meeting information, minutes and recordings, please [see here](https://bit.ly/argo-wf-cmty-mtng).
Participation in Argo Workflows is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md)
## Community Blogs and Presentations
* [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo)
* [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY)
* [Argo Workflows and Pipelines - CI/CD, Machine Learning, and Other Kubernetes Workflows](https://youtu.be/UMaivwrAyTA)
* [Argo Ansible role: Provisioning Argo Workflows on OpenShift](https://medium.com/@marekermk/provisioning-argo-on-openshift-with-ansible-and-kustomize-340a1fda8b50)
* [Argo Workflows vs Apache Airflow](http://bit.ly/30YNIvT)
* [Beyond Prototypes: Production-Ready ML Systems with Metaflow and Argo](https://github.com/terrytangyuan/public-talks/tree/main/talks/kubecon-na-2023-metaflow-argo)
* [CI/CD with Argo on Kubernetes](https://medium.com/@bouwe.ceunen/ci-cd-with-argo-on-kubernetes-28c1a99616a9)
* [Define Your CI/CD Pipeline with Argo Workflows](https://haque-zubair.medium.com/define-your-ci-cd-pipeline-with-argo-workflows-25aefb02fa63)
* [Distributed Machine Learning Patterns from Manning Publication](https://github.com/terrytangyuan/distributed-ml-patterns)
* [Engineering Cloud Native AI Platform](https://github.com/terrytangyuan/public-talks/tree/main/talks/platform-con-2024-engineering-cloud-native-ai-platform)
* [Managing Thousands of Automatic Machine Learning Experiments with Argo and Katib](https://github.com/terrytangyuan/public-talks/blob/main/talks/argocon-automl-experiments-2022)
* [Revolutionizing Scientific Simulations with Argo Workflows](https://www.youtube.com/watch?v=BYVf7GhfiRg)
* [Running Argo Workflows Across Multiple Kubernetes Clusters](https://admiralty.io/blog/running-argo-workflows-across-multiple-kubernetes-clusters/)
* [Scaling Kubernetes: Best Practices for Managing Large-Scale Batch Jobs with Spark and Argo Workflow](https://www.youtube.com/watch?v=KqEKRPjy4aE)
* [Open Source Model Management Roundup: Polyaxon, Argo, and Seldon](https://www.anaconda.com/blog/developer-blog/open-source-model-management-roundup-polyaxon-argo-and-seldon/)
* [Producing 200 OpenStreetMap extracts in 35 minutes using a scalable data workflow](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/)
* [Production-Ready AI Platform on Kubernetes](https://github.com/terrytangyuan/public-talks/tree/main/talks/kubecon-europe-2024-production-ai-platform-on-k8s)
* [Argo integration review](http://dev.matt.hillsdon.net/2018/03/24/argo-integration-review.html)
* TGI Kubernetes with Joe Beda: [Argo workflow system](https://www.youtube.com/watch?v=M_rxPPLG8pU&start=859)
## Project Resources
* [Argo Project GitHub organization](https://github.com/argoproj)
* [Argo Website](https://argoproj.github.io/)
* [Argo Slack](https://argoproj.github.io/community/join-slack)
## Security
See [SECURITY.md](SECURITY.md).

View File

@ -1,67 +0,0 @@
# Roadmap
* Make it easier to use and contribute to the project.
* Install Argo on any existing k8s cluster.
* Integrate Argo with k8s RBAC & secrets.
* Support for running (KinK) Kubernetes in Kubernetes.
# History
* M1: Nov 2015
* Complete "hardwired" CI/CD workflow for a simple web application.
* Using Mesos and rabbitmq/celery for the workflow engine.
* M2: Jan 2016
* Persistent volume support using flocker.
* Initial "Cashboard" implementation.
* Automated installer for AWS.
* M3: May 2016
* GUI.
* GUI-based DSL.
* Artifacts for workflows.
* Container log management.
* Cluster autoscaling.
* Many, many volume management bugs.
* M4: Jul 2016
* Nested workflows.
* Time-based job scheduling.
* M5: Oct 2016
* Switched to K8s.
* Spot instances.
* Fixtures.
* Improve artifacts.
* YAML DSL.
* Email notificaiton.
* Non-disruptive upgrades of platform software.
* Make flocker really work on AWS
* M6: Dec 2016
* Scale AXDB.
* Scale internal event handling
* Performance
* Run chaos monkey
* Hardening.
* M7: Mar 2017
* AppStore.
* Spot instances.
* Artifact management.
* Deployment.
* Improved artifact management.
* Improve non-disruptive upgrade.
* M8: May 2017
* Persistent volumes.
* Notification center.
* Secret management.
* M9: Jun 2017
* Rolling upgrade of deployments.
* Secret management v2.
* Remove rabbitmq.
* Managed ELBs.
* Prometheus.
* Managed fixtures (RDS, VM).
* Initial GCP/GKE support.
* M10: Aug 2017
* Ready to release to the world!
* Remove rabbitmq.
* YAML checker v2
* Kubernetes 1.6.
* Dev CLI tool.
* Move to kops

42
SECURITY.md Normal file
View File

@ -0,0 +1,42 @@
# Security
## Reporting a Vulnerability
If you find a security related bug in Argo Workflows, we kindly ask you for responsible
disclosure and for giving us appropriate time to react, analyze and develop a
fix to mitigate the found security vulnerability.
Please report vulnerabilities by:
* Opening a draft GitHub Security Advisory: https://github.com/argoproj/argo-workflows/security/advisories/new
* Sending an e-mail to the following address: cncf-argo-security@lists.cncf.io
All vulnerabilities and associated information will be treated with full confidentiality.
## Public Disclosure
Security vulnerabilities will be disclosed via [release notes](CHANGELOG.md) and using the
[GitHub Security Advisories](https://github.com/argoproj/argo-workflows/security/advisories)
feature to keep our community well informed, and will credit you for your findings (unless you prefer to stay anonymous, of course).
## Vulnerability Scanning
See [static code analysis](docs/static-code-analysis.md).
## Internet Bug Bounty collaboration
We're happy to announce that the Argo project is collaborating with the great
folks over at
[Hacker One](https://hackerone.com/) and their
[Internet Bug Bounty program](https://hackerone.com/ibb)
to reward the awesome people who find security vulnerabilities in the four
main Argo projects (CD, Events, Rollouts and Workflows) and then work with
us to fix and disclose them in a responsible manner.
If you report a vulnerability to us as outlined in this security policy, we
will work together with you to find out whether your finding is eligible for
claiming a bounty, and also on how to claim it.
## Securing Argo Workflows
See [docs/security.md](docs/security.md) for information about securing your Argo Workflows instance.

230
USERS.md Normal file
View File

@ -0,0 +1,230 @@
# Argo Workflows User Community Surveys & Feedback
User community survey results are available: [2023](https://blog.argoproj.io/argo-workflows-events-2023-user-survey-results-82c53bc30543), [2021](https://blog.argoproj.io/argo-workflows-2021-survey-results-d6fa890030ee?gi=857daaa1faa9), and [2020](https://github.com/argoproj/argoproj/blob/main/community/user_surveys/ArgoWorkflows2020SurveySummary.pdf).
## Who uses Argo Workflows?
As the Argo Community grows, we'd like to keep track of our users. Please send a PR with your organization or project name in the following sections.
Argo Workflows is seeking more community involvement and ultimately more [Reviewers and Approvers](https://github.com/argoproj/argoproj/blob/main/community/membership.md) to help keep it viable.
See [Sustainability Effort](https://github.com/argoproj/argo-workflows/blob/main/community/sustainability_effort.md) for more information.
### Organizations Using Argo
Currently, the following organizations are **officially** using Argo Workflows:
1. [23mofang](https://www.23mofang.com/)
1. [4intelligence](https://4intelligence.com.br/)
1. [7shifts](https://www.7shifts.com)
1. [Acquia](https://www.acquia.com/)
1. [Adevinta](https://www.adevinta.com/)
1. [Admiralty](https://admiralty.io/)
1. [Adobe](https://www.adobe.com/)
1. [Adyen](https://www.adyen.com/)
1. [AKRA](https://www.akra.de/)
1. [Akuity](https://akuity.io/)
1. [Alibaba Cloud](https://www.alibabacloud.com/about)
1. [Alibaba Group](https://www.alibabagroup.com/)
1. [Anova](https://www.anova.com/)
1. [Ant Group](https://www.antgroup.com/)
1. [AppDirect](https://www.appdirect.com/)
1. [Arabesque](https://www.arabesque.com/)
1. [Argonaut](https://www.argonaut.dev/)
1. [ArthurAI](https://arthur.ai/)
1. [Astraea](https://astraea.earth/)
1. [Atlan](https://atlan.com/)
1. [BasisAI](https://basis-ai.com/)
1. [BEI.RE](https://www.bei.re/)
1. [bimspot](https://bimspot.io)
1. [BioBox Analytics](https://biobox.io)
1. [BlackRock](https://www.blackrock.com/)
1. [Bloomberg](https://www.bloomberg.com/)
1. [bonprix](https://en.bonprix.de/corporate/our-company/)
1. [Botkeeper](https://www.botkeeper.com/)
1. [ByteDance](https://www.bytedance.com/en/)
1. [Canva](https://www.canva.com/)
1. [Capact](https://capact.io/)
1. [Capital One](https://www.capitalone.com/tech/)
1. [Carrefour](https://www.carrefour.com/)
1. [CarTrack](https://www.cartrack.com/)
1. [Casavo](https://casavo.com/)
1. [CCRi](https://www.ccri.com/)
1. [Cisco](https://www.cisco.com/)
1. [Cloud Scale](https://cloudscaleinc.com/)
1. [CloudGeometry](https://www.cloudgeometry.io/)
1. [CloudSeeds](https://www.cloudseeds.de/)
1. [Codec](https://www.codec.ai/)
1. [Codefresh](https://www.codefresh.io/)
1. [Commodus Tech](https://www.commodus.tech)
1. [Concierge Render](https://www.conciergerender.com)
1. [Cookpad](https://cookpad.com/)
1. [Coralogix](https://coralogix.com)
1. [CoreFiling](https://www.corefiling.com/)
1. [CoreWeave Cloud](https://www.coreweave.com)
1. [Cratejoy](https://www.cratejoy.com/)
1. [Cruise](https://getcruise.com/)
1. [CVision AI](https://www.cvisionai.com)
1. [CyberAgent](https://www.cyberagent.co.jp/en/)
1. [Cyrus Biotechnology](https://cyrusbio.com/)
1. [Data4Risk](https://www.data4risk.com/)
1. [Datable](https://datable.jp/)
1. [Datadog](https://www.datadoghq.com/)
1. [DataRobot](https://www.datarobot.com/)
1. [DataStax](https://www.datastax.com/)
1. [DDEV](https://www.ddev.com/)
1. [Deutsche Telekom AG](https://telekom.com)
1. [DevSamurai](https://www.devsamurai.com/)
1. [Devtron Labs](https://github.com/devtron-labs/devtron)
1. [DLR](https://www.dlr.de/eoc/)
1. [DP Technology](https://www.dp.tech/)
1. [Dyno Therapeutics](https://dynotx.com)
1. [EBSCO Information Services](https://www.ebsco.com/)
1. [Enso Finance](https://enso.finance/)
1. [Equinor](https://www.equinor.com/)
1. [Elastic](https://www.elastic.co/)
1. [Fairwinds](https://fairwinds.com/)
1. [FOLIO](http://corp.folio-sec.com/)
1. [freee](https://corp.freee.co.jp/en/company/)
1. [FreeWheel](https://freewheel.com/)
1. [Fynd Trak](https://trak.fynd.com/)
1. [Galixir](https://www.galixir.com/)
1. [Gardener](https://gardener.cloud/)
1. [Gepardec](https://gepardec.com/)
1. [GitHub](https://github.com/)
1. [Gitpod](https://www.gitpod.io/)
1. [Gladly](https://gladly.com/)
1. [Gllue](https://gllue.com/)
1. [Glovo](https://www.glovoapp.com)
1. [Google](https://www.google.com/intl/en/about/our-company/)
1. [Graviti](https://www.graviti.com)
1. [Greenhouse](https://greenhouse.io)
1. [H2O.ai](https://h2o.ai/)
1. [Habx](https://www.habx.com/)
1. [Helio](https://helio.exchange)
1. [Hemisphere Digital](https://hemisphere.digital)
1. [HOVER](https://hover.to)
1. [HSBC](https://hsbc.com)
1. [Hydrogrid](https://hydrogrid.ai)
1. [IBM](https://ibm.com)
1. [Iflytek](https://www.iflytek.com/)
1. [Inceptio Technology](https://www.inceptio.ai/)
1. [incrmntal](https://incrmntal.com/)
1. [InsideBoard](https://www.insideboard.com)
1. [Interline Technologies](https://www.interline.io/blog/scaling-openstreetmap-data-workflows/)
1. [Intralinks](https://www.intralinks.com/)
1. [Intuit](https://www.intuit.com/)
1. [InVision](https://www.invisionapp.com/)
1. [İşbank](https://www.isbank.com.tr/en)
1. [Jellysmack](https://www.jellysmack.com/)
1. [Jungle](https://www.jungle.ai/)
1. [Karius](https://www.kariusdx.com/)
1. [Karrot](https://www.daangn.com/)
1. [KarrotPay](https://www.daangnpay.com/)
1. [Kasa](https://www.kasa.co.kr/)
1. [KintoHub](https://www.kintohub.com/)
1. [KPMG](https://kpmg.com/uk)
1. [Localytics](https://www.localytics.com/)
1. [Lumin Digital](https://lumindigital.com/)
1. [Maersk](https://www.maersk.com/solutions/digital-solutions)
1. [MariaDB](https://mariadb.com/)
1. [Marmalade](https://www.marmalade.co/)
1. [Max Kelsen](https://maxkelsen.com/)
1. [Maya](https://www.maya.ph/)
1. [Microba](https://www.microba.com/)
1. [Microblink](https://microblink.com/)
1. [Mirantis](https://mirantis.com/)
1. [Mixpanel](https://mixpanel.com)
1. [Motus](https://www.motus.com)
1. [New Relic](https://newrelic.com/)
1. [Nikkei](https://www.nikkei.co.jp/nikkeiinfo/en/)
1. [Norwegian Refugee Council](https://www.nrc.no/)
1. [nrd.io](https://nrd.io/)
1. [NVIDIA](https://www.nvidia.com/)
1. [One Concern](https://oneconcern.com/)
1. [Onepanel](https://docs.onepanel.ai)
1. [OpsMx](https://www.opsmx.io/)
1. [Oracle](https://www.oracle.com/)
1. [Orchest](https://www.orchest.io/)
1. [OVH](https://www.ovh.com/)
1. [PathAI](https://www.pathai.com)
1. [PayIt](https://payitgov.com/)
1. [PDOK](https://www.pdok.nl/)
1. [Peak AI](https://www.peak.ai/)
1. [Phrase](https://phrase.com)
1. [Pipekit](https://pipekit.io)
1. [Pismo](https://pismo.io/)
1. [Polarpoint.io](https://polarpoint.io)
1. [Pollination](https://pollination.cloud)
1. [Preferred Networks](https://www.preferred-networks.jp/en/)
1. [Priceline.com](https://www.priceline.com)
1. [Procore](https://www.procore.com)
1. [Promaton](https://www.promaton.com/)
1. [Prudential](https://www.prudential.com.sg/)
1. [Pvotal Technologies](https://pvotal.tech/)
1. [Quantibio](http://quantibio.com/us/en/)
1. [QuantumBlack](https://quantumblack.com/)
1. [Raccoon Digital Marketing](https://raccoon.ag/)
1. [Ramboll Shair](https://ramboll-shair.com/)
1. [Ravelin](https://www.ravelin.com/)
1. [Reco](https://reco.ai)
1. [Red Hat](https://www.redhat.com/en)
1. [Reserved AI](https://reserved.ai/)
1. [Riskified](https://www.riskified.com)
1. [Robinhood](https://robinhood.com/)
1. [Sage (Sage AI Labs)](https://sage.com/)
1. [SAP Concur](https://www.concur.com/)
1. [SAP Fieldglass](https://www.fieldglass.com/)
1. [SAP Hybris](https://cx.sap.com/)
1. [SAS](https://www.sas.com/)
1. [Salesforce](https://salesforce.com)
1. [Schlumberger](https://slb.com/)
1. [Securitas](https://securitas.com/)
1. [SegmentStream](https://segmentstream.com)
1. [Semgrep](https://semgrep.com)
1. [Sendible](https://sendible.com)
1. [Sidecar Technologies](https://hello.getsidecar.com/)
1. [smallcase](https://smallcase.com/)
1. [Softonic](https://hello.softonic.com/)
1. [Sohu](https://www.sohu.com/)
1. [SternumIOT](https://www.sternumiot.com)
1. [Stillwater Supercomputing, Inc](http://www.stillwater-sc.com/)
1. [StreamNative](https://streamnative.io)
1. [strongDM](https://www.strongdm.com/)
1. [Styra](https://www.styra.com/)
1. [Splunk](https://www.splunk.com/)
1. [Sutpc](http://www.sutpc.com/)
1. [Swissblock Technologies](https://swissblock.net/)
1. [Tessell](https://www.tessell.com/)
1. [Threekit](https://www.threekit.com/)
1. [Tiger Analytics](https://www.tigeranalytics.com/)
1. [Tradeshift](https://tradeshift.com/)
1. [Trendyol](https://trendyol.com)
1. [Tuhu](https://www.tuhu.cn/)
1. [Tulip](https://tulip.com/)
1. [Ubie](https://ubie.life/)
1. [UFirstGroup](https://www.ufirstgroup.com)
1. [Vispera](https://www.vispera.co)
1. [VMware](https://www.vmware.com/)
1. [Voyager](https://investvoyager.com/)
1. [Wavefront](https://www.wavefront.com/)
1. [Wellcome Trust](https://wellcome.ac.uk/)
1. [WooliesX](https://wooliesx.com.au/)
1. [Woolworths Group](https://www.woolworthsgroup.com.au/)
1. [Workiva](https://www.workiva.com/)
1. [Xueqiu](https://www.xueqiu.com/)
1. [Yubo](https://www.yubo.live/)
1. [Zhihu](https://www.zhihu.com/)
### Projects Using Argo
In addition, the following projects are **officially** using Argo Workflows:
1. [Couler](https://github.com/couler-proj/couler)
1. [Hera Workflows](https://github.com/argoproj-labs/hera-workflows)
1. [Kubeflow](https://www.kubeflow.org/)
1. [Metaflow](https://www.metaflow.org)
1. [Onepanel](https://github.com/onepanelio/onepanel)
1. [SQLFlow](https://github.com/sql-machine-learning/sqlflow)
1. [BisQue](https://github.com/UCSB-VRL/bisqueUCSB)
1. [Tator](https://www.tator.io)

11716
api/jsonschema/schema.json generated Normal file

File diff suppressed because it is too large Load Diff

15784
api/openapi-spec/swagger.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,103 @@
package openapi_spec //nolint:staticcheck
import (
"encoding/json"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
type obj = map[string]interface{}
func TestSwagger(t *testing.T) {
swagger := obj{}
data, err := os.ReadFile("swagger.json")
if err != nil {
panic(err)
}
err = json.Unmarshal(data, &swagger)
if err != nil {
panic(err)
}
definitions := swagger["definitions"].(obj)
// one definition from each API
t.Run("io.argoproj.workflow.v1alpha1.CreateCronWorkflowRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.CreateCronWorkflowRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.WorkflowCreateRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.WorkflowCreateRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateCreateRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateCreateRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.WorkflowTemplateCreateRequest")
})
t.Run("io.argoproj.workflow.v1alpha1.InfoResponse", func(t *testing.T) {
assert.Contains(t, definitions, "io.argoproj.workflow.v1alpha1.InfoResponse")
})
t.Run("io.argoproj.workflow.v1alpha1.ScriptTemplate", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.ScriptTemplate"].(obj)
assert.NotContains(t, definition["required"], "name")
})
t.Run("io.argoproj.workflow.v1alpha1.CronWorkflow", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.CronWorkflow"].(obj)
assert.NotContains(t, definition["required"], "status")
})
t.Run("io.argoproj.workflow.v1alpha1.Workflow", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Workflow"].(obj)
assert.NotContains(t, definition["required"], "status")
})
t.Run("io.argoproj.workflow.v1alpha1.Parameter", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Parameter"].(obj)
properties := definition["properties"].(obj)
assert.Equal(t, "string", properties["default"].(obj)["type"])
assert.Equal(t, "string", properties["value"].(obj)["type"])
})
t.Run("io.argoproj.workflow.v1alpha1.Histogram", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Histogram"].(obj)
buckets := definition["properties"].(obj)["buckets"].(obj)
assert.Equal(t, "array", buckets["type"])
assert.Equal(t, obj{"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.Amount"}, buckets["items"])
})
t.Run("io.argoproj.workflow.v1alpha1.Amount", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Amount"].(obj)
assert.Equal(t, "number", definition["type"])
})
t.Run("io.argoproj.workflow.v1alpha1.Item", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.Item"].(obj)
assert.Empty(t, definition["type"])
})
t.Run("io.argoproj.workflow.v1alpha1.ParallelSteps", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.ParallelSteps"].(obj)
assert.Equal(t, "array", definition["type"])
assert.Equal(t, obj{"$ref": "#/definitions/io.argoproj.workflow.v1alpha1.WorkflowStep"}, definition["items"])
})
// this test makes sure we deal with `inline`
t.Run("io.argoproj.workflow.v1alpha1.UserContainer", func(t *testing.T) {
definition := definitions["io.argoproj.workflow.v1alpha1.UserContainer"].(obj)
properties := definition["properties"]
assert.Contains(t, properties, "image")
})
// yes - we actually delete this field
t.Run("io.k8s.api.core.v1.Container", func(t *testing.T) {
definition := definitions["io.k8s.api.core.v1.Container"].(obj)
required := definition["required"]
assert.Contains(t, required, "image")
assert.NotContains(t, required, "name")
})
// this test makes sure we can deal with an instance where we are wrong vs Kubernetes
t.Run("io.k8s.api.core.v1.SecretKeySelector", func(t *testing.T) {
definition := definitions["io.k8s.api.core.v1.SecretKeySelector"].(obj)
properties := definition["properties"]
assert.Contains(t, properties, "name")
})
// this test makes sure we can deal with an instance where we are wrong vs Kubernetes
t.Run("io.k8s.api.core.v1.Volume", func(t *testing.T) {
definition := definitions["io.k8s.api.core.v1.Volume"].(obj)
properties := definition["properties"]
assert.Contains(t, properties, "name")
assert.NotContains(t, properties, "volumeSource")
})
}

View File

@ -1,32 +0,0 @@
#!/bin/bash
# Shell script to process a kubeconfig and embed the certificate and token
# for the purposes of an argo install
set -e
kubeconfig=$1
output_config=$2
if [[ -z "${kubeconfig}" || -z "${output_config}" ]] ; then
echo "Usage ${0} ~/.kube/config ~/.kube/processed_config"
exit 1
fi
# read necessary information about current context
current_context=`kubectl --kubeconfig=$1 config current-context`
secret_name=`kubectl --kubeconfig=$1 get serviceaccount default --namespace default -o jsonpath="{.secrets[0].name}"`
token=`kubectl --kubeconfig=$1 get secrets --namespace default ${secret_name} -o custom-columns=:data.token | base64 --decode`
server_ip=`kubectl --kubeconfig=$1 config view -o jsonpath="{.clusters[?(@.name == \"${current_context}\")].cluster.server}"`
temp_crt_file="/tmp/${current_context}_ca.crt"
rm -f ${temp_crt_file}
kubectl --kubeconfig=$1 get secrets --namespace default ${secret_name} -o custom-columns=:data."ca\.crt" | base64 --decode > ${temp_crt_file}
# write the new kubeconfig
kubectl config --kubeconfig=$2 set-cluster ${current_context} --server=${server_ip} --embed-certs=true --certificate-authority=${temp_crt_file}
kubectl config --kubeconfig=$2 --server=${server_ip} set-credentials ${current_context} --token ${token}
kubectl config --kubeconfig=$2 --server=${server_ip} set-context --cluster ${current_context} --user ${current_context} ${current_context}
kubectl config --kubeconfig=$2 --server=${server_ip} use-context ${current_context}
kubectl config --kubeconfig=$2 --server=${server_ip} set-cluster ${current_context}
rm -f ${temp_crt_file}

View File

@ -1,355 +0,0 @@
#!/usr/bin/env bash
#
# Copyright 2015-2016 Applatix, Inc. All rights reserved.
#
#
# Source this file in your .bash_profile or .bashrc
# For even cooler bash prompts source bash-powerline.sh
#
# Set AX_NAMESPACE and AX_VERSION
# Usage: ksetv image-namespace image-version
ksetv()
{
export AX_NAMESPACE=$1
export AX_VERSION=$2
}
# Unset AX_NAMESPACE and AX_VERSION env, and kmanager will use default values
kunsetv()
{
unset AX_NAMESPACE
unset AX_VERSION
}
kcluster()
{
kmanager bash
}
kctl()
{
CLUSTER=$1
NAMESPACE=$2
shift 2
COMMAND=$@
eval ${KUBECTL} --kubeconfig=${HOME}/.kube/cluster_${CLUSTER}.conf --namespace ${NAMESPACE} ${COMMAND}
}
kdef()
{
if [ -z "$1" ]
then
echo "Usage $0 clustername-prefix [namespace]"
return 1
fi
if [[ -z "$(which kubectl-1.6.7)" ]]
then
echo
echo "You need to install kubectl client 1.6.7."
echo
fi
cluster=`ls -rt $HOME/.kube/cluster_$1* 2>/dev/null | tail -n 1 | sed 's#.*cluster_##g' | sed 's#\.conf##g'`
if [ -z "$cluster" ]
then
echo "Cluster with prefix $1 not found. Assuming $1 is the full name."
cluster=$1
fi
export DEFAULT_KCLUSTER=$cluster
if [ -z "$2" ]
then
export DEFAULT_NAMESPACE=axsys
else
export DEFAULT_NAMESPACE=$2
fi
export KUBECTL=kubectl-1.6.7
}
kundef()
{
unset DEFAULT_NAMESPACE
unset DEFAULT_KCLUSTER
}
k()
{
if [ -z "$DEFAULT_KCLUSTER" -o -z "$DEFAULT_NAMESPACE" ]
then
echo "Usage: Set default cluster using kdef command"
return 1
fi
kctl $DEFAULT_KCLUSTER $DEFAULT_NAMESPACE $@
}
kp()
{
k "get" "pods" $@
}
kdesc()
{
if [ -z "$1" ]
then
echo "Usage: kdesc pod-name-prefix"
return 1
fi
pod=`kp | grep $1 | cut -d " " -f 1`
shift
echo "Using pod: " $pod
k "describe" "pods" $pod $@
}
kdp()
{
pod=$1
shift
k "delete" "pod" $pod $@
}
kddp()
{
deployment=$1
shift
k "delete" "deployment" $deployment $@
}
kdds()
{
daemonset=$1
shift
k "delete" "daemonset" $daemonset $@
}
kn()
{
k "get" "nodes" $@
}
ks()
{
k "get" "svc" $@
}
kl()
{
k "logs" $@
}
kj()
{
k "get" "jobs" $@
}
kssh()
{
if [ -z "$1" ]
then
echo "Usage: $0 nodename from kn command"
return 1
fi
NODE=$1
if [ "${NODE:0:4}" = "gke-" ] ; then
shift
if [ -z "$*" ] ; then
gcloud compute ssh $NODE
else
gcloud compute ssh $NODE --command "$*"
fi
else
IP=`kn $NODE -o jsonpath="'{.status.addresses[2].address}'"`
shift
ssh -i $HOME/.ssh/kube_id_${CLUSTER} admin@$IP $@
fi
}
km()
{
ssh -o StrictHostKeyChecking=no -i $HOME/.ssh/kube_id_${DEFAULT_KCLUSTER} admin@$(grep "server:" $HOME/.kube/cluster_$DEFAULT_KCLUSTER.conf | sed 's#.*//##g') "$@"
}
kmdownload()
{
scp -o StrictHostKeyChecking=no -i $HOME/.ssh/kube_id_${DEFAULT_KCLUSTER} admin@$(grep "server:" $HOME/.kube/cluster_$DEFAULT_KCLUSTER.conf | sed 's#.*//##g'):$1 $2
}
kmupload()
{
temp=`mktemp`
scp -o StrictHostKeyChecking=no -i $HOME/.ssh/kube_id_${DEFAULT_KCLUSTER} $1 admin@$(grep "server:" $HOME/.kube/cluster_$DEFAULT_KCLUSTER.conf | sed 's#.*//##g'):$temp
ssh -o StrictHostKeyChecking=no -i $HOME/.ssh/kube_id_${DEFAULT_KCLUSTER} admin@$(grep "server:" $HOME/.kube/cluster_$DEFAULT_KCLUSTER.conf | sed 's#.*//##g') sudo mv $temp $2
}
kshell()
{
if [ \( "$1" = "" \) -o \( "$2" = "" \) ]; then
echo "Usage: kshell <pod> <shell> [<containername>]"
return 1
fi
CONTAINERSHELL=""
if [ "$3" != "" ]; then
CONTAINERSHELL=" -c $3"
fi
COLUMNS=`tput cols`
LINES=`tput lines`
TERM=xterm
k "exec" "-i" "-t" "$1" "env" "COLUMNS=$COLUMNS" "LINES=$LINES" "TERM=$TERM" "$2" "$CONTAINERSHELL"
}
kns()
{
if [ -z "$1" ]
then
echo "Usage: kns <name-of-namespace>"
echo "Current namespaces:"
k get namespaces
return 1
fi
local all_namespaces=`$KUBECTL --kubeconfig=${HOME}/.kube/cluster_${DEFAULT_KCLUSTER}.conf get namespaces | grep -v NAME | cut -d " " -f 1| tr '\r\n' ' '`
if [[ ! " ${all_namespaces[@]} " =~ " $1 " ]]; then
echo "No namespace named $1"
return 1
fi
unset DEFAULT_NAMESPACE
export DEFAULT_NAMESPACE=$1
}
kpassword()
{
CLUSTER=$1
if [ -z "$1" ]
then
CLUSTER=${DEFAULT_KCLUSTER}
fi
echo "ClusterID: ${CLUSTER}"
echo "Access info in ~/.argo"
}
kui()
{
# Open the Argo cluster UI in the default browser.
elb=`k "get svc axops --namespace=axsys -o wide" | grep elb | cut -d " " -f 9`
python -m webbrowser "https://$elb"
}
kpf()
{
if [ -z "$1" ]
then
echo "Usage: kpf pod-name-prefix"
return 1
fi
pod=`kp | grep $1 | cut -d " " -f 1`
echo "Using pod: " $pod
k port-forward $pod $2
}
kall()
{
ips=`kn | cut -d " " -f 1 | grep -v NAME`
for ip in $ips; do
echo "$(tput setaf 3)$ip $(tput setaf 7)"
kssh $ip "$@"
done
}
fdp()
{
if [ -z "$1" ]
then
echo "Usage: fdp pod-name-prefix"
return 1
fi
pod=`kp | grep $1 | cut -d " " -f 1`
kdp $pod
}
fl()
{
if [ -z "$1" ]
then
echo "Usage: fl pod-name-prefix [-f]"
return 1
fi
pod=`kp | grep $1 | cut -d " " -f 1`
kl $pod "$@"
}
export AWS_OUTPUT_FORMAT=table
export ACMD_AWS_PROFILE=default
aformat()
{
export AWS_OUTPUT_FORMAT=$1
echo "Output format for a commands set to $1"
}
aprofile()
{
export ACMD_AWS_PROFILE=$1
echo "Using aws profile $ACMD_AWS_PROFILE"
}
an()
{
if [ -z "$DEFAULT_KCLUSTER" ]; then
echo "Lists all ec2 instances in given k8s cluster. Run kdef first"
return 1
fi
aws --profile $ACMD_AWS_PROFILE ec2 describe-instances --output $AWS_OUTPUT_FORMAT --filters Name=tag:Name,Values="$DEFAULT_KCLUSTER*" --query 'Reservations[].Instances[].[Tags[?Key==`Name`] | [0].Value, InstanceId, State.Name, PublicIpAddress, PrivateDnsName, InstanceLifecycle]'
}
atags()
{
if [ -z "$1" ]; then
echo "Lists all tags of given EC2 instance. Usage $FUNCNAME instance-id"
return 1
fi
aws --profile $ACMD_AWS_PROFILE ec2 describe-tags --filters "Name=resource-id,Values=$1" --output $AWS_OUTPUT_FORMAT
}
avs()
{
if [ -z "$DEFAULT_KCLUSTER" ]; then
echo "Lists all volumes in given k8s cluster. Run kdef first"
return 1
fi
aws --profile $ACMD_AWS_PROFILE ec2 describe-volumes --output $AWS_OUTPUT_FORMAT --filters Name=tag:Name,Values="$DEFAULT_KCLUSTER*" --query 'Volumes[].[Tags[?Key==`Name`] | [0].Value, VolumeId, AvailabilityZone, Size, Attachments[0].InstanceId]'
}
avtags()
{
if [ -z "$1" ]; then
echo "Lists all tags of given EBS volume. Usage $FUNCNAME volume-id"
return 1
fi
aws --profile $ACMD_AWS_PROFILE ec2 describe-tags --filters "Name=resource-type,Values=volume,Name=resource-id,Values=$1" --output $AWS_OUTPUT_FORMAT
}
avpcs()
{
# Lists all VPCs
aws --profile $ACMD_AWS_PROFILE ec2 describe-vpcs --query 'Vpcs[].[Tags[?Key==`KubernetesCluster`] | [0].Value, VpcId, CidrBlock]' --output $AWS_OUTPUT_FORMAT
}
avpc()
{
if [ -z "$DEFAULT_KCLUSTER" ]; then
echo "Lists all resources in given K8S cluster's VPC. Run kdef first"
return 1
fi
aws --profile $ACMD_AWS_PROFILE ec2 describe-tags --filters "Name=resource-type,Values=vpc,Name=value,Values=$DEFAULT_KCLUSTER" --output $AWS_OUTPUT_FORMAT
}
aasgs()
{
if [ -z "$DEFAULT_KCLUSTER" ]; then
echo "Lists all autoscaling groups in given K8S cluster. Run kdef first"
return 1
fi
aws --profile $ACMD_AWS_PROFILE autoscaling describe-tags --filters "Name=value,Values=$DEFAULT_KCLUSTER" --query 'Tags[?Key==`KubernetesCluster`].[ResourceId, Value]' --output $AWS_OUTPUT_FORMAT
}

Some files were not shown because too many files have changed in this diff Show More