Compare commits

...

506 Commits

Author SHA1 Message Date
Tommy Li 0b89419544
chore(owners): Promote rafalbigaj as the approver (#1484)
Signed-off-by: tomcli <tommy.chaoping.li@ibm.com>
2024-04-17 22:13:39 +00:00
Tommy Li 3e7950ffd3
chore(deps): sync kfp deps with the latest commit (#1485)
Signed-off-by: tomcli <tommy.chaoping.li@ibm.com>
2024-04-16 23:32:26 +00:00
Tommy Li bb47bcd892
chore(ci): Add clean up step for tekton ci (#1480) 2024-03-27 17:36:21 +00:00
Tommy Li c0d25310d5
chore(kfp-task): Update driver package to 2.1.0 release (#1478) 2024-03-27 00:11:19 +00:00
Tommy Li b49f959db9
chore(tekton-driver): Update tekton v2 driver to support the latest k8s spec from upstream (#1464)
Signed-off-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2024-03-06 03:38:07 +00:00
Helber Belmiro 16e781dce9
fix(docs): Updated legal info due to migration from CLA to DCO (#1463)
* Updated legal info due to migration from CLA to DCO

Signed-off-by: hbelmiro <helber.belmiro@gmail.com>

* Fixed TOC

Signed-off-by: hbelmiro <helber.belmiro@gmail.com>

---------

Signed-off-by: hbelmiro <helber.belmiro@gmail.com>
2024-03-05 17:18:07 +00:00
Tommy Li 803377e899
chore(sdk): Add sdk 1.9.3 release (#1462)
Signed-off-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2024-02-27 23:12:59 +00:00
Tommy Li db6d85ece6
feat(sdk): add verify_ssl flag to support self cert (#1461)
Signed-off-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2024-02-27 22:52:59 +00:00
Tommy Li 9f568f2a72
feat(ci): Update github actions to also test python 3.12 (#1456)
* Update github actions to also test python 3.12

* Update setup.py

* Update setup.py

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml

* Update README.md
2024-02-13 18:35:05 +00:00
Tommy Li a9d7df96d2
fix(README): Update instructions to only use kustomize (#1455) 2024-02-13 00:07:04 +00:00
Tommy Li b77e6f38d5
Chore(docs) Update kfp_tekton_install V2 instructions for KFP-Tekton 2.0.5 release (#1453) 2024-01-18 17:35:59 +00:00
Tommy Li bb06e5e721
chore(release): add kfp-tekton backend 1.9.2 release (#1451) 2024-01-18 00:21:58 +00:00
Tommy Li 550a827b05
feat(tekton-kfptask): Update kfptask to support pod metadata (#1449)
* update kfptask to support pod metadata

* fix type
2024-01-17 08:53:54 +00:00
Tommy Li d5fc9fd5c9
chore(README): Remove deprecated MLX wording (#1447) 2024-01-10 22:50:11 +00:00
Tommy Li a71ba164ad
feat(pipelineloop): Update pipelineloop v2 to have failed and cancelled status (#1445)
* update pipelineloop v2 to have failed and cancelled status

* fix execution id update bug
2024-01-05 19:25:09 +00:00
Tommy Li ff8bb50dc4
chore(tests): add unit tests for tekton template v1 (#1444)
* add unit tests for tekton template v1

* update license
2024-01-03 23:34:11 +00:00
Tommy Li 08e438099a
feat(pipelineloop): Optimize pipelineloop get performance with Lister (#1443) 2024-01-02 20:32:14 +00:00
Tommy Li 9d36c8c32d
chore(release): Add sdk 1.9.2 release (#1441) 2023-12-26 17:34:09 +00:00
Tommy Li cab410f886
feat(tekton-catalog): Add basic test to kfptask (#1439) 2023-12-26 14:19:08 +00:00
Tommy Li 21a0171bd0
chore(release): Add kfp-tekton 1.9.1 release (#1437) 2023-12-21 21:17:04 +00:00
Tommy Li 09b39c7a9f
fix(tekton-catalog): Refactor kfptask (#1435)
* add driver to pipelineloop reconcile

* update pipelineloop startup args

* update klog error

* update driver logic

* update image build and fix update logic

* update kfptask to be more module friendly

* revert deps

* add const
2023-12-19 22:37:19 +00:00
dependabot[bot] f93c8d2888
chore(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /tekton-catalog/pipeline-loops (#1434)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-18 22:32:38 +00:00
dependabot[bot] 6d30324b85
chore(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /tekton-catalog/tekton-kfptask (#1433)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-18 22:31:37 +00:00
dependabot[bot] 747e6640e7
chore(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /tekton-catalog/tekton-driver (#1432)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-18 22:30:38 +00:00
Tommy Li b6fcff9f0e
feat(pipeline-loop v2): Merge loop driver and publisher into the pipelineloop controller logic (#1428)
* add driver to pipelineloop reconcile

* update pipelineloop startup args

* update klog error

* update driver logic

* update image build and fix update logic
2023-12-18 22:13:37 +00:00
Tommy Li 11964891c2
chore(README.md): Update kfp upstream version for v2 branch (#1430) 2023-12-18 22:00:37 +00:00
Tommy Li 066940a0f1
fix(sdk): Fix datetime import (#1429) 2023-12-18 21:59:37 +00:00
Tommy Li eaa2de687c
feat(tekton-kfptask): Update kfptask to publish completed dag status (#1426)
* update kfptask to update completed dag

* update comment

* update error message
2023-12-07 17:35:56 +00:00
Tommy Li 191ad3907d
chore(README): update v2 info and new diagrams (#1423)
* update v2 info and new diagrams

* Update README.md

* fix grammar
2023-12-04 21:01:06 +00:00
Tommy Li a75d4b3711
chore(release): Add kfp-tekton 1.9.0 release (#1422) 2023-11-27 19:50:01 +00:00
Tommy Li e7d0cd0680
chore(backend): update to tekton 0.53.2 (#1420) 2023-11-22 22:39:57 +00:00
Tommy Li 509f53c809
chore(ci): remove driver build code since it is no longer necessary (#1418) 2023-11-22 01:55:56 +00:00
Tommy Li 1811dc7fb4
chore(docs): Update v2 dev branch version in main readme (#1414)
* Update v2 dev branch version in main readme

* Update blog link to the latest
2023-11-09 17:41:51 +00:00
Tommy Li 8a7b8b3905
chore(docs): Update kfp-tekton v2 version in the docs (#1413)
* Update kfp-tekton v2 version in the docs

* Update guides/kfp_tekton_install.md

Co-authored-by: Yihong Wang <yh.wang@ibm.com>

---------

Co-authored-by: Yihong Wang <yh.wang@ibm.com>
2023-11-08 01:14:08 +00:00
Tommy Li fcc7996028
feat(tekton-catalog): merge driver and kfptask into one controller (#1411)
* merge driver and kfptask into one controller

* update parameters

* update parameters

* update parameters to hyphen
2023-11-02 22:07:51 +00:00
Tommy Li d183b563fa
fix(tekton-catalog): sync pipeline package to 2.0.3 release (#1405) 2023-10-27 17:10:10 +00:00
Tommy Li 0010c60efa
chore(deps): update golang grpc deps to 1.56.3 (#1404) 2023-10-26 05:15:09 +00:00
Tommy Li bb9806f8be
chore(docs): Update kfp-tekton install versions (#1397) 2023-10-23 22:48:07 +00:00
Humair Khan 333c1c9ede
feat(backend): Add CA injection to step-copy-artifacts step. Fixes #1394. (#1395)
Signed-off-by: Humair Khan <HumairAK@users.noreply.github.com>
2023-10-20 18:25:05 +00:00
Tommy Li 6dc3fd9db0
chore(docs): Update main readme entrypoint (#1393) 2023-10-19 22:01:03 +00:00
Tommy Li 01f1d11f98
feat(tekton-catalog): Move kfp-tekton v2 driver to master branch (#1391) 2023-10-19 21:50:03 +00:00
Tommy Li f3e6a8e7f3
fix(tekton-catalog): fix kfptask custom task (#1389) 2023-10-18 21:46:56 +00:00
dependabot[bot] 943f982fc6
chore(deps): bump urllib3 from 1.26.17 to 1.26.18 in /sdk/python (#1388)
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.17 to 1.26.18.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.17...1.26.18)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-18 16:07:57 +00:00
Tommy Li 15bffccc5b
chore(docs): update manual release process and minor ci display name change (#1387)
* update manual release process and minor ci display name change

* fix script
2023-10-17 21:55:55 +00:00
Tommy Li 96d5205326
feat(Tekton CI): add v2 ci image publish pipeline tasks (#1386)
* add v2 ci image publish pipeline tasks

* update tekton catalog version
2023-10-17 20:51:56 +00:00
Tommy Li f038e03625
chore(release): Add kfp-tekton 1.8.1 release (#1385)
* add kfp-tekton 1.8.1 release

* update install docs
2023-10-17 18:37:56 +00:00
Tommy Li e624e9f067
fix(deps): Update to Tekton 0.50.2 to fix regression and security bugs (#1382) 2023-10-17 17:17:56 +00:00
Tommy Li 44df2424d1
chore(github): Update github action deps (#1383) 2023-10-17 17:11:56 +00:00
Tommy Li b9607d5ca9
chore(deps): Fix golang.org/x/net vulnerability (#1377)
* fix golang.org/x/net vulnerability

* fix typo

* fix spacing on makefile
2023-10-12 00:23:21 +00:00
Tommy Li 964c6e53f8
feat(tekton-catalog): Add V2 Tekton kfptask to Tekton catalog (#1373)
* add tekton kfptask to tekton catalog

* fix controller path
2023-10-11 22:26:20 +00:00
dependabot[bot] d41081b459
chore(deps): bump certifi from 2022.12.7 to 2023.7.22 in /sdk/python (#1372)
Bumps [certifi](https://github.com/certifi/python-certifi) from 2022.12.7 to 2023.7.22.
- [Commits](https://github.com/certifi/python-certifi/compare/2022.12.07...2023.07.22)

---
updated-dependencies:
- dependency-name: certifi
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-11 17:04:21 +00:00
dependabot[bot] bbd47b9edf
chore(deps): bump urllib3 from 1.26.15 to 1.26.17 in /sdk/python (#1356)
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.15 to 1.26.17.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.15...1.26.17)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-11 16:53:20 +00:00
Tommy Li cf4d57974a
chore(cleanup): remove old manifests and tools to reduce security checklist (#1371) 2023-10-11 16:48:20 +00:00
Tommy Li f94859acae
feat(tekton-catalog): Move v2 tekton-exithandler custom task to tekton catalog (#1370)
* move v2 tekton-exithandler custom task to tekton catalog

* rename podman to docker

* add go.sum
2023-10-11 16:47:20 +00:00
Tommy Li a3c899a1b8
chore(docs): Add SECURITY.md to tell supporting kfp-tekton version (#1368)
* Add SECURITY.md to tell supporting kfp-tekton version

* Update SECURITY.md
2023-10-10 16:45:21 +00:00
Tommy Li fd3b3b1466
chore(docs): Update kfp_tekton_install.md with kfp-tekton v2 install instructions (#1367)
* Update kfp_tekton_install.md with kfp-tekton v2 install instructions

* Update kfp_tekton_install.md
2023-10-10 16:29:20 +00:00
Tommy Li c1510f09a8
fix(manifests): Update istio-authorization-config.yaml for multi-user (#1362) 2023-10-04 19:37:15 +00:00
Tommy Li b0a2ac97d9
fix(sdk): Fix nested loop with cel outputs (#1351) 2023-09-28 14:58:42 +00:00
Tommy Li 8b93ce6719
fix(docs) Update Kind install command to use kubectl -k (#1348) 2023-09-21 17:27:38 +00:00
Tommy Li b71d07b02e
chore(docs): Add grpc gateway version to user guides. (#1346) 2023-09-19 23:17:29 +00:00
Humair Khan 79272ef439
fix(backend): add PR name substitution for scheduled runs. (#1344)
Signed-off-by: Humair Khan <HumairAK@users.noreply.github.com>
2023-09-11 18:21:36 +00:00
Tommy Li 84f2a85023
feat(api): upgrade grpc-gateway to v2 with protoname (#1337)
* upgrade grpc-gateway to v2

* fix common client code for grpc gateway v2

* fix license and test case

* regenerate python client

* fix python http client code

* update healthz class to new python openapi client

* update new python client function class name to new openapi standard;

* overwrite upstream old client function convention

* fix python upload class

* fix python upload class

* update api class

* update upload api swagger codegen manually

* revert old fixes

* fix new sdk payload parameter mapping

* update names to protoname

* revert backend changes to use protoname

* update proto spec

* update tests

* update sdk client to handle new json name

* fix typo
2023-09-01 22:22:26 +00:00
Tommy Li 0fe70dac08
feat(sdk): update new image config via env variables (#1336)
* update new image config via env variables

* add env var for condition task image

* update readme
2023-08-31 21:31:34 +00:00
Tommy Li 7576a6ce47
fix(sdk): update pyyaml dependencies (#1340) 2023-08-31 20:46:08 +00:00
Tommy Li 972c8817f1
feat(sdk): add bash script name config (#1334)
* add bash script name config

* add bash script name config
2023-08-25 21:56:33 +00:00
Tommy Li 4ccd1867a0
chore(cleanup): clean up visualization repo to reduce false positive scan (#1333)
* clean up visualization repo to reduce false positive scan

* clean up visualization repo to reduce false positive scan

* clean up visualization repo to reduce false positive scan

* clean up visualization repo to reduce false positive scan
2023-08-23 20:29:46 +00:00
Giulio Frasca a2007f7389
feat(backend): Source ObjStore Creds from Env in Tekton Template (#1259) 2023-08-22 21:38:45 +00:00
Tommy Li 5007b6bab5
chore(release): kfp-tekton 1.8 release patch and tekton 0.50.1 patch (#1331) 2023-08-22 21:01:09 +00:00
Tommy Li 2f99e8bad3
fix(pipelineloop): update pipelineloop v1 bug (#1330) 2023-08-18 18:16:49 +00:00
Tommy Li 73fb66aa93
chore(test): update ci script (#1329)
* update ci script

* update pipelineloop test for ci
2023-08-17 20:29:38 +00:00
Tommy Li f8741801cb
chore(build): Update .readthedocs.yml to build.os format to avoid warning. (#1328) 2023-08-16 18:19:04 +00:00
Tommy Li 746c570e53
feat(sdk): add tekton pipeline config into sdk client function (#1327) 2023-08-16 16:05:05 +00:00
Tommy Li dc413a30b9
chore(docs): Update dead links and v2 information (#1325)
* Update dead links and v2 information

* Update README.md
2023-08-15 20:50:03 +00:00
Tommy Li dc7dcdd2de
feature(backend): update backend to be backward compatible with old status (#1324) 2023-08-15 15:14:03 +00:00
Tommy Li 136e7a9352
feature(pipelineloop): Tekton v1 migration pipelineloop (#1312)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme

* migrate backend services to tekton v1 api

* add v1beta1 pipelinerun conversion for backward compatibility

* fix new python lint

* add new function to cover basic pipeline conversion

* update prune script to get more resources

* migrate sdk generation code to tekton v1 only

* migrate pipelineloop to tekton v1 pipeline spec
2023-08-03 16:04:20 +00:00
Tommy Li 213a50874d
feature(sdk): Tekton v1 migration sdk (#1311)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme

* migrate backend services to tekton v1 api

* add v1beta1 pipelinerun conversion for backward compatibility

* fix new python lint

* add new function to cover basic pipeline conversion

* update prune script to get more resources

* migrate sdk generation code to tekton v1 only
2023-08-02 21:54:18 +00:00
Tommy Li fde417c41a
feature(backend): Tekton v1 migration (#1309)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme

* migrate backend services to tekton v1 api

* add v1beta1 pipelinerun conversion for backward compatibility

* fix new python lint

* add new function to cover basic pipeline conversion

* update prune script to get more resources

* update status to store in annotations
2023-08-02 18:31:40 +00:00
Tommy Li 933f4afe81
backend(chore): update backend to tekton 0.50 (#1297)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme
2023-08-01 15:17:08 +00:00
Tommy Li 05617f3f86
chore(readme): Add kfp-tekton v2 branch link (#1305)
* add kfp-tekton v2 branch link

* Update README.md
2023-07-27 18:23:51 +00:00
Tommy Li 77e4754987
chore(samples): Add readme for prompt tuning samples (#1304) 2023-07-27 18:12:51 +00:00
Tommy Li 1f6ed5ee76
fix(ci): fix end to end github action test (#1300)
* test-ci

* trigger backend test

* update prune script

* revert test changes

* update comments
2023-07-26 20:24:33 +00:00
Tommy Li 4fe9611d2b
chore(release): add 1.7.1 backend release (#1295) 2023-07-21 23:34:26 +00:00
Greg Sheremeta 7afbf94753
fix(doc): fix typo tekton -> openshift (#1290) 2023-07-18 18:05:22 +00:00
Tommy Li 2b0e3adcd0
chore(docs): Update kfp-tekton openshift instructions for openshift v1.11 (#1278) 2023-07-10 23:34:07 +00:00
Tommy Li c007dced10
chore(sdk): Add sdk 1.7.1 release (#1267) 2023-06-27 21:24:15 +00:00
Tommy Li 7780820ce4
feat(sdk): Parametrize Tekton condition "task" image name (#1265)
* make condition task parametized

* update docs
2023-06-26 22:41:14 +00:00
Tommy Li 691f225cf4
fix(fronted): Fix Tensorboard.tsx endpoint (#1263) 2023-06-26 15:56:15 +00:00
Tommy Li 66a129c63f
feat(samples): add peft sample with modelmesh (#1258)
* add peft sample with kserve

* lint files
2023-06-20 21:08:40 +00:00
Yihong Wang 4377646b99
samples(fix): update lightweight component sample (#1257)
use `create_component_from_func` instead

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-06-20 20:07:40 +00:00
Tommy Li 7b916ced28
Upload huggingface demo for tutorial (#1256)
* Add files via upload

* Rename samples/prompt-tuning-demo.py to samples/huggingface-prompt-tuning/prompt-tuning-demo.py
2023-06-16 20:20:37 +00:00
Yihong Wang ca7514c33e
fix(test): update docker image for dind (#1254)
use new docker image for dind

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-06-15 18:40:36 +00:00
Tommy Li 64bf7dba36
Fix(backend): Upgrade Tekton 0.47.1 to address timeout bug (#1253)
* upgrade tekton 0.47.1 to address timeout bug

* upgrade tekton 0.47.1 to address timeout bug
2023-06-08 22:35:58 +00:00
Tommy Li 032db82204
chore(docs): Update kfp_tekton_install.md with new compatibility map (#1249) 2023-05-30 21:25:04 +00:00
Tommy Li f8ebbd47b3
feat(backend): Optimize persistence agent to pass yaml directly to db and minio client (#1244)
* optimize persisten agent to pass yaml directly to db and minio client

* add legacy mode for users to revert back to old kfp behavior
2023-05-24 19:59:19 +00:00
Yihong Wang 3ca939f162
feat(backend): Use Informer in persistence agent (#1247)
use SharedInformer to optimize the query performance

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-05-24 19:58:20 +00:00
Tommy Li 1188e82d7c
chore(sdk): bump kfp sdk version to 1.8.22 (#1241) 2023-05-17 22:54:06 +00:00
Tommy Li 95e23f723a
fix(manifests): Update manifests to make it work on both k8s and openshift (#1239)
* refactor manifests to separate k8s and openshift deployment

* update install readme

* fix lint
2023-05-15 17:33:31 +00:00
Tommy Li 76181ac287
chore(release): Add KFP-Tekton 1.7.0 release files (#1237)
* add 1.7.0 release files

* fix readme
2023-05-11 21:30:30 +00:00
Tommy Li de76970835
chore(release): Add 1.6.6-backend release (#1236) 2023-05-08 18:02:55 +00:00
Tommy Li 48d983b7d0
fix(sdk): Update wait_for_run_completion function to handle tekton status; (#1234)
* update wait_for_run_completion function to handle tekton status;

* optimize status check
2023-05-05 19:08:24 +00:00
Tommy Li 63e1e76a1e
fix(sdk): Update kfp version to fix appengine bug (#1235)
* update kfp version to fix appengine bug

* update tests
2023-05-05 18:40:24 +00:00
Tommy Li a6029c73f0
feat(manifests): opt-out sidecar injection feature flag to increase performance (#1230) 2023-05-05 17:43:24 +00:00
Tommy Li 08819b551f
fix(manifests): fix unsaved kustomization.yaml for openshift pipelines (#1229) 2023-05-04 17:49:45 +00:00
Tommy Li e7b5231c9a
fix(backend): Fix global cache flag (#1224)
* fix global cache flag

* update deprecated flag name
2023-05-03 23:03:45 +00:00
Tommy Li 1c1db1d320
feat(backend): Add metadata field to scheduled workflow (#1221)
* add metadata field to scheduled workflow

* add backend changes

* downgrade urilib3 from to 1.x
2023-05-03 20:28:27 +00:00
Yihong Wang 2dc9314d8d
fix(backend): add license files (#1222)
Add license/version files to modules under
tekton-catalog directory, including:
- pipeline-loops
- cache
- objectstore

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-05-02 22:30:14 +00:00
Tommy Li 5ce5507434
feat(manifest): Add openshift pipelines kustomize integration (#1220)
* update deployment manifests to work well on the latest openshift pipelines

* fix readme

* add unsaved files
2023-05-02 20:20:44 +00:00
Yihong Wang 030a7c9a12
fix(test): update ibmcloud command (#1217)
remove depreciated options for ibmcloud CLI
2023-04-27 18:23:39 +00:00
Tommy Li 08044c99fe
chore(docs): Remove inactive community links (#1218) 2023-04-27 18:13:39 +00:00
Tommy Li c72f05d93d
fix(backend): Fix integration test template object bugs (#1216) 2023-04-26 22:34:35 +00:00
Tommy Li 2d7cc156d1
feat(backend):Update backend to support Tekton 0.47 (#1215)
* initial tekton 0.47 patch

* update licenses

* update pipelineloop deps to tekton 0.47
2023-04-25 20:46:45 +00:00
Tommy Li 1bca3555ce
chore(release): Add backend 1.6.5 release (#1214) 2023-04-24 21:55:44 +00:00
Tommy Li 6c5f7f7714
chore(requirements.txt): Remove unused lock files to better scan security vulnerability (#1213) 2023-04-24 18:31:44 +00:00
Tommy Li 98023e4aec
chore(manifests): update mysql log config to align with upstream (#1212) 2023-04-24 16:00:44 +00:00
Yihong Wang 92a09f1fa5
fix(backend): Use childReference instead of taskRuns (#1211)
After tekton 0.45. status.taskRuns will be removed.
Switch to status.childReferences since we can use
them to get all taskRuns and their pipelineTaskNames.
2023-04-19 17:18:55 +00:00
Tommy Li 4597d98715
chore(deps): Update go deps to fix high cves (#1210)
* update go deps to fix high cves

* update license
2023-04-18 23:13:54 +00:00
Tommy Li 647d6b9003
chore(samples): Update data passing samples to a more meaningful folder (#1209) 2023-04-14 20:56:18 +00:00
Tommy Li b0875fac5a
fix(python): Remove python 3.7 support (#1207)
* remove python 3.7 support

* update broken links
2023-04-12 21:55:42 +00:00
Tommy Li 2fdcd81947
fix(sdk): Update sdk to 1.6.5 with new kfp 1.8.20 to address kubernetes cves (#1206)
* update sdk to 1.6.5 with new kfp 1.8.20 to address kubernetes cves

* fix tests to use the latest k8s version
2023-04-11 19:45:55 +00:00
Tommy Li 57d2847bd5
chore(samples): Remove deprecated samples (#1205) 2023-04-11 18:40:54 +00:00
Tommy Li 446e8d751b
chore(cleanup): Remove deprecated api directory (#1204) 2023-04-11 18:38:55 +00:00
Tommy Li e0aecb9bfc
chore(release): Add backend 1.6.4 release (#1203) 2023-04-10 21:01:54 +00:00
Tommy Li 77ebb59f65
fix(deps): Tekton 0.44.2 patch and dependencies update (#1202)
* tekton 0.44.2 patch and dependencies update

* update license and missing tekton package dependencies
2023-04-08 05:54:51 +00:00
Tommy Li d5eb860720
feat(CI): Create periodic codeql code scan to detect possible static bugs (#1201)
* Create codeql.yml

* Update codeql.yml

add comments
2023-04-05 01:12:02 +00:00
Yihong Wang 4764448e76
fix(test): enhance build scripts (#1200)
* increase retry for many edge task

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* fix(test): update the logic of waiting pods

Don't fail the script if there is no pod after the deployment.
Instead, do the check again after the sleep.

* test(fix): randomize the pipeline name

use randomized pipeline name to support retry

---------

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-04-03 20:21:00 +00:00
Tommy Li 1f24ba5bb9
fix(sdk): Remove print statement for compiler to reduce log size (#1198) 2023-03-30 20:04:05 +00:00
Tommy Li 89ccf0bf39
chore(docs): Update kfp-tekton openshift instruction to also include Tekton SCC (#1197) 2023-03-24 15:59:23 +00:00
Tommy Li 70533aab4b
chore(release): Add 1.6.3 backend and 1.6.4 sdk release (#1196)
* add 1.6.3 backend and 1.6.4 sdk release

* add 1.6.3 release yaml
2023-03-22 19:53:43 +00:00
Yihong Wang fd1f6e53ab
fix(test): update GH action and toolchain task (#1195)
update the GH action script to use default kustomize 5.0.0.
update the toolchain task to use newer image as well.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-22 15:37:42 +00:00
Tommy Li 0c91fc1df7
fix(backend): Fix metadata writer dependencies (#1193)
* fix metadata writer dependencies

* fix metadata writer dependencies
2023-03-21 19:15:42 +00:00
Yihong Wang 21eaa6df5d
feat(manifests): use kustomize v5 (#1194)
* use kustomize v5

remove deprecated fields in manifests, including:
- base
- vars
- patchesStrategicMerge
- patchesJson6903

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update manifests for all envs

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

---------

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-21 19:02:43 +00:00
Yihong Wang 819005040e
fix(test): fix typo in the toolchain task (#1190)
fix the typo of one of the params in the deploy task

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-17 16:13:08 +00:00
Tommy Li 82793c595a
chore(tekton-catalog): Remove legacy v1alpha1 condtion since it is no longer supported (#1188) 2023-03-16 16:33:28 +00:00
Yihong Wang 7414106c1d
fix(test): update toolchain image/script (#1189)
update the toolchain image and update the script to
be able to specify the url of public container registry
and scripts

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-16 16:17:29 +00:00
Tommy Li 3b006b98b4
fix(sdk): Update aipipeline images to quay (#1186) 2023-03-16 08:37:28 +00:00
Tommy Li d8d6e4a4ad
chore(docs): Update docs on custom task parameters (#1187) 2023-03-15 20:35:11 +00:00
Tommy Li b7a4c03b40
chore(release): Add 1.6.2 backend release, 1.6.3 sdk release (#1184)
* add 1.6.2 backend release, 1.6.3 sdk release

* add release yaml

* update image to quay

* remove tests
2023-03-14 22:52:10 +00:00
Tommy Li c595c32b79
fix(sdk): fix v1 api package typo (#1180) 2023-03-10 22:07:21 +00:00
Tommy Li b766fb4751
fix(sdk): fix v1 api sdk client package bug (#1179) 2023-03-09 17:43:09 +00:00
Tommy Li d196a94d5e
feat(sdk): make metadata component spec gen flag configurable on pipeline level (#1177) 2023-03-06 17:15:37 +00:00
Tommy Li 4e5a49e01f
chore(cleanup): Remove unnecessary cloud build files from google and travis (#1178) 2023-03-02 22:47:00 +00:00
Tommy Li e069b75a37
chore(manifests): upgrade mysql image to kfp v2 standards for cves mitigation (#1175) 2023-03-02 16:47:04 +00:00
Tommy Li ce4ce6f203
Fix(any-sequencer): Make any sequencer able to detect child reference status (#1172)
* Make any sequencer able to detect child reference status

* update test script
2023-02-28 17:47:02 +00:00
Yihong Wang f4298068c5
fix(test): call kfp api instead of kube api (#1171)
Use kfp api to get run details instead of kube api

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-02-27 23:35:40 +00:00
Tommy Li 40a52874b6
chore(manifests): Update embedded status to minimal (#1168) 2023-02-24 17:22:25 +00:00
Yihong Wang 6fe856aa67
fix(backend): proper handle customrun status (#1167)
When setting embedded-status feature flag to minimal,
customrun could have either customRef or customSpec where stores the
kind information. Improve the logic to handle the customruns' statuses
in these cases.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-02-23 15:58:24 +00:00
Tommy Li a04cdae330
chore(sdk): update package dependency source (#1166)
* update package dependencies

* fix lint
2023-02-21 17:42:40 +00:00
Tommy Li 7210555e8f
Chore(release): Add 1.6.1 release patch (#1165)
* add 1.6.1 release patch

* add release yaml
2023-02-20 22:46:39 +00:00
Tommy Li 034d427d38
Chore(backend) update kube and python dependency for backend (#1162)
* update kube and python dependency for backend

* update kind dep
2023-02-20 18:43:39 +00:00
Yihong Wang ac757b44c0
fix(backend): Add the permissions to access customruns (#1161)
Add the permissions to access customruns for the following
roles/clusterroles:
- ml-pipeline
- ml-pipeline-persistenceagent-role
- ml-pipeline-scheduledworkflow-role

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-02-17 21:56:36 +00:00
Tommy Li 1816fc14db
chore(test): add python 3.10 into github actions (#1160)
* add python 3.10 into github actions

* update param to string to prevent github trimming
2023-02-16 17:56:06 +00:00
Tommy Li dbbcd94d4a
chore(release): Update release version to 1.6.0 (#1157) 2023-02-15 22:29:05 +00:00
Tommy Li 1de489cf2c
fix(sdk): optimize find item search for large amount of params (#1159) 2023-02-15 22:24:05 +00:00
Tommy Li 07f3cf7cfc
feat(sdk): Add task workspace support (#1153)
* add task workspace support

* fix lint and docs

* remove unnecessary workspaces annotations
2023-02-13 23:29:24 +00:00
Prashant Sharma cdfdcdd0f1
Feat(pipelineloop-cli): support customruns validation as well. (#1152)
* Feat(pipelineloop-cli): support customruns validation as well.

* fix seg fault.

* fix seg fault.
2023-02-10 17:58:03 +00:00
Tommy Li 464197b4c9
feat(sdk): Add pipeline workspace config (#1151) 2023-02-10 13:18:01 +00:00
Tommy Li ea5821fc68
chore(manifests): Fix broken github link (#1147) 2023-02-07 19:08:32 +00:00
Tommy Li 850e086469
Chore: Update readme and license headers (#1146)
* update readme and license header

* update readme and license header

* update license headers
2023-02-06 22:49:31 +00:00
Tommy Li e4d837ad9b
feat(sdk): Add env support for podtemplate in sdk (#1145)
* add env support for podtemplate in sdk

* update readme usage

* address comment
2023-02-06 21:59:31 +00:00
Tommy Li 37f45c0dde
chore(backend): Update backend to Tekton 0.44 (#1144)
* update backend to tekton 0.44

* update license
2023-02-06 20:10:01 +00:00
Tommy Li fd17202e7c
fix(pipelineloop): Update security context with group id (#1142)
* Update 500-webhook.yaml

* Update 500-controller.yaml
2023-02-02 22:33:01 +00:00
Prashant Sharma 8897d88a97
Feat(pipelineloop): Migration guide for migrating from v1alpha1.Run t… (#1141)
* Feat(pipelineloop): Migration guide for migrating from v1alpha1.Run to v1beta1.CustomRun.

* Update guides/Custom-run-migration-guide.md

---------

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2023-02-02 17:21:30 +00:00
Tommy Li 1fc5d56d50
fix(tests): update e2e tests with new api commands (#1140) 2023-02-01 21:30:29 +00:00
Prashant Sharma cdbe855444
Custom run migration for examples of PipelineLoop custom task. (#1139)
* wip

* Fixed examples to run with tekton v0.44.0

* Fixed test
2023-02-01 19:32:32 +00:00
Prashant Sharma 4637dcb1e3
Feat: switch to v1 API paths for backend. (#1121)
* switch to v1 API paths for backend.

* python api update.

* updated deployment to query v1

* generated swagger api for frontend and replaced previous Api with V1.

* changed remaining API -> V1 api

* Generated python api client

1. from modified swagger definition and python generator script.
2. Also deleted old python client files.

* Updating the requirements to import the kfp-tekton-server-api package containing the V1 API.

* fixed the lint

* add sdk v0 api patch

---------

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2023-02-01 10:12:30 -08:00
Tommy Li aca2280383
Fix(pipelineloop): Manifest flag patch and api version change for pipelineloop (#1137)
* update custom task default version

* update new rbac for customrun

* update owner reference
2023-01-27 22:52:03 +00:00
Tommy Li 5512f1bfb2
Revert "Revert "Feat(pipelineloop): upgrade to tekton version 0.43.2, tekton v1beta1 customRun ."" (#1136)
This reverts commit b3579dd88c.
2023-01-27 17:52:11 +00:00
Yihong Wang 7a45ca124a
fix(backend): Add format arg to printf command (#1135)
add format arg to printf command to avoid cached string
being interpreted.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-01-27 00:17:10 +00:00
Tommy Li 9720b3f534
fix(sdk): add command to resource op spec (#1134) 2023-01-26 21:54:10 +00:00
Tommy Li d37f21d713
chore(release): add 1.5.1 release patch (#1133) 2023-01-26 18:51:10 +00:00
Yihong Wang a7db1c189e
fix(backend): Add securitycontext for k8s 1.25 (#1132)
For k8s 1.25, a securityContext definition is needed for a pod.
Add proper security context to pipelineloop controler and webhook

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-01-26 18:11:10 +00:00
Tommy Li b3579dd88c
Revert "Feat(pipelineloop): upgrade to tekton version 0.43.2, tekton v1beta1 customRun ." (#1131)
This reverts commit 3425f03a4e.
2023-01-26 01:47:09 +00:00
Tommy Li 95f0abeada
chore(sdk): Update sdk comments (#1128)
* update sdk comments

* fix lint
2023-01-24 21:35:52 +00:00
Tommy Li 218a608ef8
chore(sdk): Update sdk requirement packages (#1127) 2023-01-24 18:35:25 +00:00
Prashant Sharma 3425f03a4e
Feat(pipelineloop): upgrade to tekton version 0.43.2, tekton v1beta1 customRun . (#1119)
* Tekton v1alpha.Run to v1beta1.CustomRun

* support both runs and customruns
2023-01-24 17:07:25 +00:00
Tommy Li 33fa86d314
chore(backend): Update Tekton version to 0.43.2 (#1123)
* tekton 0.43 patch

* update licenses

* update licenses

* update readme and tekton install
2023-01-18 08:23:22 +00:00
Prashant Sharma 2aa67806ed
Feat: Add V1 endpoint support. (#1085)
* wip: applying patch1

* generating v1 APIs

* code gen

* delete patch reject fiiles.

* WIP: creating both v1 and v1beta1 endpoints.

* removed v1beta1

* updated-license
2023-01-12 17:13:01 +00:00
Tommy Li 08da8cc771
chore(release): Add 1.5.0 release patch (#1118) 2023-01-10 23:42:15 +00:00
Tommy Li 3a20c43dd1
chore(release): Add 1.4.1 release patch (#1117)
* add 1.4.1 release patch

* update kubectl-wrapper to latest to avoid image version conflicts
2023-01-10 21:45:15 +00:00
Tommy Li 1ec34bfd95
feat(backend): Update db table config with Primary key to improve db read performance (#1116) 2023-01-09 22:48:57 +00:00
Tommy Li 466cde61bf
fix(sdk): Update custom task arg parsing for dash arguments (#1114) 2023-01-09 20:26:57 +00:00
Tommy Li fb32672585
feat(sdk): add pipeline_conf.timeout sdk support (#1108)
* add pipeline_conf.timeout sdk support

* update unit test script to take kfp config object
2022-12-14 20:22:21 +00:00
Yihong Wang 05baded0ef
feat(backend): specify the image for result step (#1104)
Allow users to specify the image for `step-move-all-results-to-tekton-home`
step. Add an env variable for the custom image: `MOVERESULTS_IMAGE`
in the api server.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-12-13 19:08:29 +00:00
Tommy Li d39886de10
chore(sdk): update sdk version to 1.4.1 (#1093) (#1106) 2022-12-13 19:04:29 +00:00
Tommy Li e6027dd14c
fix(sdk): fix nested param logic for pl (#1091) 2022-11-29 21:31:15 +00:00
Tommy Li 0ab85f9762
fix(sdk): fix nested param generation issue (#1089)
* fix nested param generation issue

* change logic to only apply to parent counter
2022-11-28 22:17:42 +00:00
Tommy Li a7f2b04cf0
fix(sdk): Fix inline spec for iterateParamPassStyle and itemPassStyle (#1087)
* cheerypick(sdk): Fix counter param cherrypick (#1080) (#1082)

* fix nested loop counter param bug

* Update _tekton_handler.py

* fix inline spec for new features

* Update _tekton_handler.py
2022-11-21 18:50:13 +00:00
Tommy Li 1a70d222f0
chore(v2): cleanup old v2 content from v1 branch (#1083) 2022-11-15 20:11:27 +00:00
Tommy Li 57676731e3
fix(sdk): fix nested loop counter param bug (#1080)
* fix nested loop counter param bug

* address comments
2022-11-14 23:06:27 +00:00
Tommy Li 6117df5a7e
update tekton version to 0.41 (#1070)
* update tekton 0.41

* Update Dockerfile

* update license

* update license

* update tekton 0.41 manifests

* update kind kustomize

* update kind kubectl ver

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml
2022-11-14 21:18:27 +00:00
Prashant Sharma 5c20b80f05
chore(PipelineLoop): upgrade to tekton v0.41.0 (#1072)
* upgrade to tekton v0.40.0

* fix tests
2022-11-11 21:42:23 +00:00
Tommy Li 1ba195e4dd
fix(sdk): add default labels for caching (#1078) 2022-11-11 21:39:23 +00:00
Tommy Li 2ec7b6e988
fix(bug): fix underscore param by checking pipeline param instead of task param (#1077)
* fix underscore param by checking pipeline param instead of task param

* update test cases
2022-11-11 17:15:51 +00:00
Tommy Li b4e322aa66
fix(sdk): fix nested loop underscore param (#1076) 2022-11-09 21:32:59 +00:00
Yihong Wang 880f37e775
update toolchain image (#1071)
update pkg, utilities, commands to newer version, including:
- base image ubuntu:20.04
- kubectl 1.25.3
- ibmcloud
- node 18.6
- go v1.19.2

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-11-02 23:16:03 +00:00
Tommy Li a517d08c24
chore(CI): Update go version in CI to 1.19.x (#1069) 2022-11-02 21:11:02 +00:00
Tommy Li 93f726f60e
chore(docs): Move loop to custom feature since it's not Tekton native (#1067) 2022-10-28 17:19:47 +00:00
Tommy Li de32d586a6
fix(sdk): update python protobuf package for security update (#1066) 2022-10-28 17:11:46 +00:00
Tommy Li 25d116603d
feat(backend): add new annotation flags to enable artifact tracking at pipeline and task level (#1065)
* add new annotation flags to enable artifact tracking at pipeline and task level

* fix artifact metrics bug for new tekton version

* fix path bug

* fix path bug

* fix path bug

* fix path

* fix feature flag

* add feature doc
2022-10-28 17:10:47 +00:00
Tommy Li f1ed822920
chore(OWNERS): Remove Animesh as reviewer (#1064) 2022-10-26 21:13:20 +00:00
Tommy Li c49a005304
chore(release): add 1.4.0 release changes (#1063) 2022-10-26 21:12:18 +00:00
Tommy Li 96e573ecf6
fix(sdk): update loop dsl to handle same argument in nested loop (#1052)
* update loop dsl to handle same argument in nested loop

* fix lint

* fix sdk dependency
2022-10-24 21:43:12 +00:00
Prashant Sharma 98a2332cc0
fix(pipelineloop): caching should include the params for making caching key. (#1056)
* fix(pipelineloop): caching should include the params for makeing caching key.

* Get params from run spec itself.

* Migrated cache for custom task controllers to gorm v2.

* code cleanup.

* Added retry for cache connect until timoeut.

* improved tests to be able to detect config maps. Better error reporting.
2022-10-24 21:39:12 +00:00
Tommy Li 9b9b932ae0
chore(sdk): Remove unnecessary code (#1062) 2022-10-20 23:01:10 +00:00
Tommy Li 7e8d5ade7c
feat(sdk): add iterateParamPassStyle and itemPassStyle params to loop (#1059)
* add iterateParamPassStyle and itemPassStyle params to loop

* update new fields to store in extra configs

* update dsl to take extra_fields as new arguments

* make extra field values configurable

* update error message
2022-10-20 16:21:33 +00:00
Tommy Li d0a0706eaa
feat(sdk): add custom task param support in command spec (#1061) 2022-10-19 21:21:31 +00:00
Tommy Li d005b7bad6
fix(manifests): Patch cache config and break ct to default pl deployment (#1057) 2022-10-12 00:15:00 +00:00
Yihong Wang 11ff90e43a
feature(backend): Add QPS and Burst settings (#1055)
Add QPS and Burst settings for the RestClient
in api-server and tekton-pipelines-controllers.
Set both values to 50 for a medium workload.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-10-07 17:24:25 +00:00
Tommy Li 1f3093c9d0
Update kfp-sdk to 1.8.14 (#1050) 2022-10-05 20:33:22 +00:00
Yihong Wang 5a96b111c1
test: Add a test case for the validation (#1051)
Add a test to verify the performance of webhook
validation. huge amount of edges in DAG causes
a performance issue in tekton earlier. adding
this test case to avoid the performance degradation.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-09-30 20:33:55 +00:00
Prashant Sharma 30eb17811a
feat(pipelineloop): Support last_idx and last_elem (#1044)
* WIP: Support last_idx and last_elem

* WIP: SDK changes to support last-idx variable.

* fix

* fix sdk param

* fix controller to correctly update results

* fix tests

* fix unit tests and lint

* fix test cases to be executable (#5)

* fix unit tests and lint

* fix test cases to be executatble

* removed unused code and added test

* fix test

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2022-09-30 16:15:55 +00:00
Tommy Li 69b84e84fb
fix(backend): Update Tekton to 0.38.4 (#1049)
* update tekton to 0.38.4

* update tekton to 0.38.4
2022-09-28 23:30:20 +00:00
Tommy Li 42a433e88e
fix(sdk): Fix nested numeric parameters (#1047)
* fix nested numeric parameters

* fix conflict
2022-09-19 23:54:31 +00:00
Tommy Li 33e3ab2a82
feat(sdk): Remove timeout since we moved to default unlimited timeout (#1043)
* remove timeout since we moved to default unlimited timeout

* remove old global flag

* fix python lint
2022-09-09 19:29:30 +00:00
Yihong Wang bbd72037b0
feat(backend): handle pipelineloop status (#1039)
* handle pipelineloop status

When using `embedded-status: minimal`, the nested pipelineloop status
is missing. Add logic in persistence agent to retrieve runs and taskruns
status for nested pipelineloop.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Add an arg to specify the kinds

Use an arg to specify the kinds which contain
childReferences information. Persistence agent
uses this list to retrieve taskrun/run status
and embedded them into the final PipelineRun yaml.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-09-01 17:43:56 +00:00
Tommy Li b3c1ec417c
Update tekton default timeout in global config (#1042) 2022-08-30 21:49:55 +00:00
Yihong Wang 37efcf09c9
fix(backend): add check for casting (#1040)
Add check for type cast. make sure the watcher
receives Pod event.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-08-29 20:26:53 +00:00
Tommy Li 51e7623c6b
optimize(sdk): Further moving common caching annotations to pipeline level (#1038) 2022-08-26 16:08:50 +00:00
David Wang 5e6e4cf36e
do not mark failed until all running PRs are done (#1037) 2022-08-25 16:07:09 +00:00
Tommy Li 24afc390b6
Add finally context var test case for #1035 (#1036) 2022-08-23 21:09:05 +00:00
Michalina Kotwica 2900f9e772
fix(sdk) include finally tasks in placeholder indir (#1035) 2022-08-23 16:27:25 +00:00
Tommy Li 8dbce394f4
Update kfp-tekton release to 1.3.1 (#1033) 2022-08-22 22:07:25 +00:00
Tommy Li aa3366f28d
Update pipelinerun timeout format to timeouts (#1031) 2022-08-22 19:24:30 +00:00
Tommy Li 5f2e4a3e12
feat(sdk): Add alpha env flag to opt out artifact tracking annotations (#1032)
* add alpha env flag to opt out artifact tracking annotations

* reduce annotations further
2022-08-19 16:53:54 +00:00
Tommy Li 9df587341e
optimize(sdk): Rewrite data passing script into more optimized code (#1029) 2022-08-16 23:04:50 +00:00
Tommy Li 71f1cc1da6
sdk(feat): add metadata support for loop (#1028) 2022-08-15 22:49:05 +00:00
Tommy Li 364d10fb3f
Remove google and argo specific tests to reduce repo size (#1027)
* remove google and argo specific tests

* update removed links
2022-08-15 17:51:33 +00:00
Yihong Wang b2971bf50c
Add test cases for sequence and cache (#1026)
add two test cases to verify
- taskruns/runs execution sequence
- caching function

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-08-15 16:15:34 +00:00
Tommy Li 68bf69758f
Update tekton to 0.38.3 to resolve pipeline cancel issue with custom task (#1025)
* init commit for tekton-0.38.3 dependency

* update tekton 0.38.3 changes

* update pipelineloop to remove old Tekton alpha resources

* update pipelineloop to remove old Tekton alpha resources

* update pipelineloop to remove old Tekton alpha resources

* update missing licenses due to local network issue

* fix step injection bug for custom task
2022-08-12 19:36:54 +00:00
Tommy Li 5d46f229a9
KFP 1.8.4 Rebase (#1024)
* KFP 1.8.4 Rebase

* KFP 1.8.4 Rebase

* KFP  Rebase

* KFP  Rebase

Co-authored-by: kfp-tekton-bot <65624628+kfp-tekton-bot@users.noreply.github.com>
2022-08-11 16:39:03 +00:00
Tommy Li 1abffb662f
optimize(sdk): Move common task labels to pipeline labels to reduce yaml size (#1020)
* move common task labels to pipeline labels

* move common task labels to pipeline labels

* update new python lint requirements
2022-08-03 16:17:42 +00:00
Tommy Li d0079acbe0
Update s3 csi deployment (#1019) 2022-07-29 21:46:45 +00:00
Tommy Li 42408909ad
fix(backend): fix cache server output (#1018) 2022-07-29 00:11:43 +00:00
Tommy Li ea126bf2fa
fix(sdk): fix unit test merging conflicts (#1017) 2022-07-28 21:02:57 +00:00
Tommy Li 144f9e9881
Add kfp-tekton 1.3.0 release template (#1011) 2022-07-28 20:38:55 +00:00
Tommy Li b219b2ee1e
fix(sdk): Add new logic to cover new tekton runafter condition (#1016)
* add new logic to cover new tekton runafter condition

* fix loop condition error
2022-07-28 20:30:55 +00:00
Tommy Li 909c4f2953
fix(sdk): fix exit handler param with empty exit task (#1015) 2022-07-27 19:18:11 +00:00
Tommy Li eaac53de79
Update result name to be case sensitive (#1013)
* update result name to be case sensitive

* add custom task test case
2022-07-26 21:03:16 +00:00
Tommy Li e2aa560a4c
Add alpha feature to output big data passing file path instead of task run name (#993)
* stage

* regenerate tests

* regenerate tests

* update comments

* resolve conflicts

* fix immediate bug

* parametize path suffix name
2022-07-19 22:20:20 +00:00
Tommy Li 26b445551b
fix(sdk): Fix big data passing with multiple type of volume templates (#1006)
* fix big data passing with multiple type of volume templates

* fix lint and license

* fix multi input step bug

* refactor update volume code

* add missing tests
2022-07-19 17:53:51 +00:00
Federica Filippini a9f148343b
fix(sdk): Support passing directories in step-copy-results-artifacts and step-copy-artifacts. Fixes #996 (#997)
* Added support to directories in step-copy-results-artifacts script

Check if the provided result (src) is a directory; if so, create a tar.gz archive and compute the ARTIFACT_SIZE on {src}.tar.gz.
Moreover, print on {dst} file the list of files in {src}.tar.gz (namely, the result of `tar -tzf {src}.tar.gz`) instead of copying directly {src} into {dst}

* Add support to directories in step-copy-artifacts script

Added check [ -d "$2" ] to allow directories to be transmitted as artifacts

* Update unit tests

* Update sdk/python/kfp_tekton/compiler/_data_passing_rewriter.py

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

* reducing script size

* generated tests

* removed whitespace identified by lint test

* fixed error (missing suffix)

* generated tests

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2022-07-19 17:51:49 +00:00
Tommy Li 8d694c01a2
Remove old build dependencies (#1005) 2022-07-19 15:51:49 +00:00
Tommy Li 7d9f8c9a30
Add alpha preview feature into compiler (#991)
* add alpha preview feature into compiler

* address comments

* fix typo
2022-07-18 17:07:09 +00:00
Ricardo Martinelli de Oliveira 852b9ac2af
chore(apiserver): Object Storage connection abstraction (#977) 2022-07-14 21:10:34 +00:00
Tommy Li 126021b9d0
sdk(feat): Add big data passing with custom volume on kfp-tekton (#995)
* add big data passing with custom volume on kfp-tekton

* fix vscode typos and address errors

* Update compiler.py
2022-07-13 21:15:33 +00:00
Tommy Li 00339aa99f
Update sdk version to 1.2.3 (#1002)
* update sdk version to 1.2.3

* Update README.md
2022-07-12 21:26:02 +00:00
Alan Chin 243c15c87f
`chore(sdk) Bump kfp dependency to 1.8.13` (#999) 2022-07-12 17:03:31 +00:00
Yihong Wang 933945480e
eliminate the use of `taskSpec.script` (#992)
when using script, it generates an init container to process the script.
we'd like to avoid the init container in pipelinerun.
remove `script` and use `command` and `arg` instead.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-07-08 18:43:52 +00:00
Maximilien de Bayser bce9814651
fix(sdk): Verify binary data in step copy-results-artifacts. Fixes #984 (#985)
* Add binary character check to copy-results-artifacts

* Update unit tests
2022-07-06 19:46:45 +00:00
Tommy Li 52f06b66bf
feat(sdk): Add labels for artifact outputs when tasks are not referred (#967)
* add labels for artifact outputs when tasks are not referred

* do not generate noninline test

* add option to also do individual artifactoutput

* add option to also do individual artifactoutput

* remove all output to artifact label

* fix sorting for test

* fix upstream conflicts
2022-06-20 16:20:37 +00:00
Michalina Kotwica b899035336
fix(backend): nil ptr exception for empty run status in pipeline-loops, fixes #981 (#982) 2022-06-20 16:09:37 +00:00
Tommy Li 7c0cc3a365
Update sdk dependency and release version to address private python package pull (#979)
* update sdk dependency and releases

* update sdk dependency and releases
2022-06-17 22:32:34 +00:00
Yihong Wang 5fd2b73022
feature(backend): support embedded-status: minimal (#976)
* feature(backend): support embedded-status: minimal

Add logic to support embedded-status feature when using
minimal setting. For now, the TaskRun/Run status is retrieved
and inserted to PipelineRun.Status and stored into ml-pipeline
backend storage.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Add embedded-status feature flag

Use `full` as the default setting for embedded-status
feature flag.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-06-16 20:15:42 +00:00
Yihong Wang 66c09441a9
fix(sdk): update condition template (#975)
eliminate the use of `script`, replace it with
command and args. this also removes the init container that
prepares the script.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-06-07 22:11:15 +00:00
David Wang f043833b3f
make cache to support multiple containers in task pod (#974)
* support multiple containers in task pod

* support multiple containers in task pod
2022-06-07 16:04:31 +00:00
Tommy Li 0b00328838
fix(docs): Add explicit commands for Tekton versoned deployment to avoid doc con… (#973) 2022-06-07 14:57:30 +00:00
Tommy Li 58e029797c
fix(pipelineloop): Add param with empty space test (#970)
* add param with empty space test

* add param with empty space test
2022-06-06 13:15:29 +00:00
Yihong Wang 80739c0239
upgrade tekton to 0.36.0 (#938)
* upgrade tekton to 0.35.1

Upgrade tekton to 0.35.1, including:
- tekton manifests
- go.mod to use new pkg and update all related pkgs
- update pipeline-loops to also use new tekton pkg

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update tekton to 0.36

update tekton, license files, and cache server

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* fix compile error on test code

for taskrun's step, it doesn't use container data struct
any more

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update compiler

add `type` for the taskSpec.results and update all
yaml files accordingly

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-06-03 21:57:26 +00:00
David Wang 7e6cabce39
handle step from task results (#964)
* handle step from task results

* address review comments
2022-06-01 16:33:49 +00:00
Joanna 514da93c52
update install guide (#965)
* update install guide

* update
2022-05-31 20:41:11 +00:00
Rafał Bigaj 65e203c6ea
fix(pipeline-loops): PipelineLoop consumes significant amount of memory (#963)
Storing PipelineSpec and TaskSpec in PipelineRunStatus is a source of significant memory consumption and OOM failures.
PipelineSpec and TaskSpec seems to be redundant in this place.

Issue: https://github.com/kubeflow/kfp-tekton/issues/962
2022-05-27 18:23:13 +00:00
Tommy Li 566b587a9d
Add pipelinerun pod template support for security context and automou… (#955)
* add pipelinerun pod template support for security context and automount_service_account_token

* update flag to the correct type

* update flag to the correct type

* update flag to the correct type

* update usage docs
2022-05-27 17:34:13 +00:00
David Wang 6b3ef559ec
fix 954, trim whitespace and newline char for range input (#961)
* fix 954, trim whitespace and newline char for range input

* address review comments
2022-05-27 12:14:12 +00:00
Yihong Wang a84363c388
feat(test): add backend integration test (#956)
* [test] tryout kind on github

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* build images

build and use the images inside the kind cluster

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* remove unnecessary step

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* build multiple images in a script

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* check if any change for backend files

check changes for backend files and trigger the integration
testing if any.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-26 15:06:05 +00:00
Yihong Wang faa409aa61
fix(backend): refine copy-artifacts step (#942)
* fix(backend): refine copy-artifacts step

to avoid resullts duplication in copy-artifacts step, add a step
to move result files to /tekton/home/tep-results.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* disable eof strip if results are moved

when results are moved to /tekton/home/tep-results,
the eof strip shalln't be applied. disable it in
this case for now.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-24 21:14:33 +00:00
Yihong Wang ca3c417a53
feat(backend): update go version and license files (#949)
update the go version in Dockerfiles, go-license tool,
and license files. these changes come from upstream and
reduce the build time.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-24 21:13:33 +00:00
Tommy Li 00eaffc4ea
fix(sdk): Fix param prefix replacement (#952) 2022-05-24 15:36:57 +00:00
Tommy Li 9307b361fc
Update manifests with 1.2.1 release (#948) 2022-05-23 17:50:01 +00:00
Tommy Li 7a0e6bc3e9
Store the uploaded pipeline as original yaml format (#945) 2022-05-23 16:24:01 +00:00
David Xia 16157a021f
fix: typo in frontend message (#946) 2022-05-23 16:05:01 +00:00
David Wang 3ab41efc92
iteration limit support, default limit is 10000 (#944) 2022-05-20 16:11:25 +00:00
Prashant Sharma 27db2e348f
Loop ranges should handle from = to, and generate one iteration. (#941)
* Loop ranges should handle start = 0 and end  = 0, should generate one iteration.

* When from = to, then generate one iteration. emulating how enumerate works.

* Fixed edge cases.

* if from and to are unset, we should not pick defaults either.
2022-05-18 16:50:26 +00:00
Yihong Wang 8f034ffd9f
[build] update github workflow to use go v1.17 (#939)
the whole repo needs go v1.17 now.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-13 16:06:34 +00:00
Yihong Wang 3f1d589892
[build] update toolchain image (#937)
upgrade the golang to 1.17.6 and other
ibmcloud plugins

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-13 16:05:35 +00:00
David Wang 425d4be4ee
wait for pipelineloop-break-operation task final status (#936) 2022-05-12 16:11:04 +00:00
Yihong Wang 282353d3f9
feat(sdk): Add helper class AddOnGroup (#920)
* feat(sdk): Add helper class AddOnGroup

add a helper class for users to write a custom
OpsGroup which maps to a custom task in Tekton.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* move finally under pipelineSpec

`finally` shall be  under `pipelineSpec`.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Enhance params of AddOnGroup

make `params` argument of AddOnGroup class support
static and dynamic values.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Add post_param() api in AddOnGroup

Add `post_param()` api to update the params of
the task

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Handle intermediate params

Create a dedicated API to create intermediate params
which are only used by downstream Ops/OpsGroup, but not
list in the spec.params

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* fix incorrect param value

properly handle params that are created
from AddOnGroups in downstream Ops

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* handle nested AddOnGroup case

An AddOnGroup can contains another AddOnGroup
as downstream OpsGroup. The group name of an AddOnGroup
needs to follow the correct name pattern to allow
the compiler to calculate the nested case properly.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* clean up params inside the taskSpec.pipelineSpec

only keep params that are used by downstream tasks in
taskSpec.pipelineSpec.params

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* support annotations and labels

Add properties in AddOnGroup to store annotations
and labels. Then add their key/value pairs to
metadata.annotation and metadata.labels.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* rename the annotation property to annotations

amend the property name to plural

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-10 17:51:07 +00:00
Tommy Li 1ef7b03af4
Update sdk release to 1.2.1 (#929) 2022-05-06 15:04:50 +00:00
Tommy Li ea1221a264
Fix multiple nested condition in loops issue (#928)
* fix multiple nested condition in loops issue

* update test cases to work with community cel custom task;
2022-05-04 20:58:33 +00:00
Michalina Kotwica 284891c8b0
fix(sdk) add test for any-seq inside of loop (#927)
* add test for any-seq inside of loop

* fix style: newline at the end of file

* recur when handling tekton pipeline variables

* update test name

* refactor adding a param

* add type to extracted task params inside loop

* fix style: no spaces in single-elem dict
2022-05-04 16:10:32 +00:00
Tommy Li c854c27108
Add necessary env variables for resource op owner reference (#925) 2022-04-29 19:52:00 +00:00
Tommy Li 7dd94985b3
Add resource op owner reference mapping (#922) 2022-04-28 16:27:32 +00:00
Tommy Li 8ba08e28ef
fix(SDK): Update sanitization logic to sync with kfp dsl (#919)
* map sanitization logic to original kfp

* update test data to reproduce sanitization bug

* fix upstream dsl bugs on undeterministic params on long names
2022-04-22 18:17:41 +00:00
David Wang 892bbaa8e6
Support the comma as default string param separator (#921)
* support the default string param separator comma

* support the default string param separator comma
2022-04-22 15:25:40 +00:00
Prashant Sharma c7dfca07a7
feat: Support loop ranges with -ve values. (#911)
* feat: Support loop ranges with -ve values.

* Test cases from github issue.
2022-04-15 17:02:40 +00:00
Yihong Wang b14f952ceb
fix(sdk): Apply node_selector in PipelineConf (#918)
* fix(sdk): Apply node_selector in PipelineConf

Apply the node_selector in PipelineConf to spec.podTemplate
instead of taskPodTemplate of each task.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update Makefile under pipelineloop

when running cli, it should do update target first

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-04-13 19:46:53 +00:00
Tommy Li 51c5cdd3a7
fix(sdk): fix nested loop global param bug (#917)
* fix nested loop global param bug

* fix nested loop global param bug

* add license

* fix recursion bug
2022-04-13 15:44:54 +00:00
Yihong Wang eac02f0633
fix(sdk): handle the node selectors (#916)
Handle the node selector info from PipelineConf in
compiler. Node selector info in Op level could overide
PipelinConf.
settings.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-04-12 20:43:56 +00:00
David Wang f083d6f0f2
pass down the podTemplate and serviceAccount from run to loopSpec (#909) 2022-04-07 20:11:15 +00:00
Tommy Li 219f2d0904
Update Tekton client and manifests to v0.31.4 (#905)
* update tekton client to 0.31.4

* update tekton manifests

* add missing go mod files
2022-04-07 00:09:14 +00:00
Prashant Sharma e9f61fcdea
IterateNumeric behavior changed to contain the value of current interation item. (#904) 2022-04-04 23:32:36 +00:00
Yihong Wang 53fa9e7521
feat(sdk): extension function `Loop.enumerate()` (#901)
* feat(SDK): extension function `Loop.enumerate()`

Add helper function: `enumerate()` in Loop to
return indices and item. It could be used to get
current iteration number and item value.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* add logic to handle nested loop

Add logic to handle nested loop and test cases

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-04-01 22:30:05 +00:00
Tommy Li 9adc788bb4
Parameterize kubelet path for s3-csi manifests (#900) 2022-04-01 22:29:05 +00:00
Prashant Sharma 7b692b6def
feat: New variable iterationNumberParam to track current item. (#898)
* New variable iterationNumberParam to track current item.

* New variable iterationNumberParam to track iteration no.

* New variable iterationNumberParam to track iteration no.

* review comments
2022-04-01 22:22:05 +00:00
Tommy Li 82aea99741
Add missing csi plugin for multi-user manifests (#899) 2022-03-31 04:29:30 +00:00
Tommy Li aa90528bc8
Add missing openshift standalone permission (#897) 2022-03-29 16:47:22 +00:00
Prashant Sharma 06487acc90
fix(controller): Added fields serviceAccountName and PodTemplate to PipelineLoop. (#891)
* wip

* Added fields serviceAccountName and PodTemplate to PipelineLoop.

* review comment

* Update pipelineloop_types.go
2022-03-28 22:28:21 +00:00
Michalina Kotwica b85d4e146f
fix(sdk) add tests for non-literal separator (#896)
* add tests for non-literal separator

* plug-in the tests

* process 'separator' like 'from'-'step'-'to'

* explicitly allow PipelineParam as a separator
2022-03-28 16:21:51 +00:00
Tommy Li f4086039b2
Add yhwang as approver (#893) 2022-03-26 05:22:14 +00:00
David Wang a7f6c0a634
Add current iteration item to the PipelineLoopPipelineRunStatus (#894) 2022-03-25 18:36:12 +00:00
Prashant Sharma 6a9f88fa8e
fix: objectstore refactoring (#887) 2022-03-24 15:19:47 +00:00
Yihong Wang fed74c1bca
[test] Update mnist-e2e sample (#888)
Use KServe for the model serving

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-03-22 21:12:50 +00:00
Prashant Sharma 763d42c1d4
chore: Objectstore package refactoring. (#884) 2022-03-22 01:30:30 +00:00
Rafał Bigaj 41e50fa61a
fix(PipelineLoop): integer divide by zero (#883)
- Use default `step` value: `1` in case of `0` provided in `PipelineLoop`.

Issue: https://github.com/kubeflow/kfp-tekton/issues/882
2022-03-18 17:53:40 +00:00
Michalina Kotwica d5e2e43992
fix(sdk) add test for custom-tasks with literals. Fixes #880 (#881)
* add test for custom-tasks with literals

* fix orig_params check for custom tasks

* fix: style: min 2 spaces before inline comment

* fix: style: 2 empty line before function decl
2022-03-17 16:19:10 +00:00
Prashant Sharma 3ab7a40055
feat(objectstore) Metadata object store (#876)
* Objectstore v1

* Objectstore v1

* Objectstore v1
2022-03-15 23:11:36 +00:00
Tommy Li be74f6612d
Add 1.2 release changes (#875)
* add 1.2 release changes

* update install readme
2022-03-10 22:46:54 +00:00
Tommy Li b302354298
Patch kfp 1.8.1 new config (#872) 2022-03-09 20:24:11 +00:00
Prashant Sharma b00e8ccf82
Break task sdk update (#871)
* Update SDK to support break task as custom task.

* Added tests

* updated testdata
2022-03-08 22:24:10 +00:00
Tommy Li 94dbd2a65a
Add caching labels for inline loops and custom tasks (#870) 2022-03-08 00:25:28 +00:00
Tommy Li 119e6acf62
Update kfp sdk dependency to 1.8.10+ (#868)
* update kfp sdk dependency to 1.8.10+

* Remove local env details

* update upper bound kfp dependency to 1.8.11 for more stable releases
2022-03-04 18:06:59 +00:00
Prashant Sharma dc2eeff827
Feat(pipelineloop): Break task as custom task. (#854)
* wip

* break task status update

* failed to force update a run.

* wip

* Own a break task and then update it to succeed.

* removed unused code
2022-03-04 17:26:59 +00:00
Tommy Li 559f8b6df0
Kfp 1.8.0 api and frontend rebase (#862)
Co-authored-by: kfp-tekton-bot <65624628+kfp-tekton-bot@users.noreply.github.com>
2022-03-03 00:58:28 +00:00
Tommy Li 9843872b3a
Fix Tekton conditional dependency on sub-dag (#866)
* Fix Tekton conditional dependency on sub-dag

* fix tekton conditional dependency on group ops
2022-03-02 17:35:04 +00:00
Prashant Sharma 50eb94ae76
feat(pipelineloop): Add caching configuration support through config-map. (#861)
* caching flags

* fix tests

* fix the broken link for kustomize.
2022-03-01 20:02:03 +00:00
Michalina Kotwica 42cc312ad0
fix(sdk) add test for empty loops (#864)
* add test for empty loops

* treat no-tasks and tasks-list-empty the same

* add licenses
2022-02-28 17:07:37 +00:00
Yihong Wang 1997919632
[test] fix typo in pipeline-loop example (#860)
add double quotes to the value of `true`
2022-02-24 02:15:04 +00:00
Prashant Sharma 14c619598c
feat: caching for custom task. (#852)
* WIP: caching for custom task.

* Extracted cache as a standalone package.

* added go.mod

* fix tests

* feedback

* Added environment variable check, tests and refactored code.

* updated readme

* Apply suggestions from code review

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

* Update pipelineloop_types.go

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2022-02-22 19:26:24 +00:00
Tommy Li acd5d53717
Add KFP-Tekton 1.1.1 release (#847)
* add 1.1.1-release patch

* add 1.1.1-release patch

* update patche for openshift permission
2022-02-10 22:27:40 +00:00
Yihong Wang 293b5ca467
[test] replace the cache-server image (#848)
* [test] replace the cache-server image

Replace cache-server image while running testing

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* [test] Replace frontend image

Replace frontend image while running testing

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-02-10 18:04:16 +00:00
Tommy Li c0bf2e970e
Update manifests for multi-users k8s 1.22 support (#846) 2022-02-09 23:28:32 +00:00
Tommy Li 6166c7c2cc
Add loop numeric sdk support (#838)
* add loop numeric sdk support

* address comments

* fix comments

* add pipelineparam as input type for loop range function
2022-02-04 17:52:44 +00:00
Michalina Kotwica eab7b4c606
fix(sdk) separator param. Part of #791 (#842)
* m: typo in a comment

* remove separator param in loop spec

* update test
2022-02-01 17:53:59 +00:00
Tommy Li 0163d4c989
Remove unsynced components (#840)
* remove unsynced components

* add back filesystem components for running unit tests
2022-01-31 18:08:32 +00:00
Tommy Li ca81bf0c75
Add support to parse pipeline param in json string (#841)
* add support to parse pipeline param in json string

* update license
2022-01-27 19:47:11 +00:00
Tommy Li 2923a1e802
Update KFP-Tekton installation to give guidance on GPU worker nodes. (#836) 2022-01-26 23:16:10 +00:00
Michalina Kotwica dc74cb2374
feat(sdk): implement loop DSL ext: from_string. Part of #791 (#837)
* handle DSL for loop item separator

* handle compilation for separator

* add tests for loop with separator

* style: self -> cls in classmethods

* fix: dsl+compile

* update test results

* style: remove unused import

* style: blank lines

* add license to tests

* fix tests: no value passing of str loop src
2022-01-25 17:38:20 +00:00
Tommy Li 89f4e418bc
Update performance script to include more metrics (#832) 2022-01-22 02:34:46 +00:00
Prashant Sharma 4b532e3041
feat(objectstorelogger): objectstorelogger as a separate module. (#829)
* cos-logger as a separate module.

* temp

* temporary hack: using scrapcodes as repository.

* correction of import in README.md

* found a golang trick to use local packages.

* naming changes

* renaming update to README.md
2022-01-21 18:14:21 +00:00
ted chang 793854829e
Additional param to enable running copies of same pipeline concurrently (#828)
Signed-off-by: ted chang <htchang@us.ibm.com>
2022-01-20 18:50:53 +00:00
Tommy Li afe05fba76
Clean up owners file (#827) 2022-01-19 18:27:28 +00:00
Tommy Li 1f7507120b
Enable pipelineparam for container env (#825) 2022-01-19 18:26:28 +00:00
Prashant Sharma 6d2682dad6
fix(pipelineloop): break loop should cancel all pipeline runs. (#824)
* fix break loop

* fixed the break loop logic.

* run status added back
2022-01-19 18:16:28 +00:00
Tommy Li b1bb32b4e9
kfp-tekton 1.1 release (#814)
* add kfp-tekton 1.1 release

* update manifest with the latest commits

* migrating rest of v1beta1 to v1 api
2022-01-12 19:11:10 +00:00
Prashant Sharma 50c172fcce
feat(pipelineloop): Support logging to Object store. Built as an extension for zap. (#806)
* Object store logging as an extension for zap.

* Do not add object store logger if it is not enabled.
2022-01-11 23:22:09 +00:00
Tommy Li ed7673cd37
Add option to produce pipelinerun taskspec as taskref (#810)
* add option to produce taskspec as taskref

* add rbac for task templates
2022-01-11 01:19:42 +00:00
Tommy Li c8b258cf5c
Move postcss to version 8 and fix Tekton UI dependency chain (#815)
* move postcss to version 8

* move postcss to version 8
2022-01-11 00:58:42 +00:00
Tommy Li 3d09ad20a8
Update pipelineloop and manifests with k8s 1.22 API (#813)
* update pipelineloop and manifests with k8s 1.22 API

* remove preserveUnknownFields since it already enabled by default
2022-01-10 14:45:17 +00:00
Tommy Li 3bb2767d1b
Chore: regenerate python tests (#812) 2022-01-06 22:01:41 +00:00
Prashant Sharma 4fa2dd97cb
fix(pipelineloop): Nested call stack depth limit (#808)
* wip

* recursion depth cannot be set on the nested pipelineloop resource.

* recursion depth can be set on the nested pipelineloop resource.

* removed an extraneous log statement.

* review feedback - change default stack limit to 30

* fixed tests
2022-01-06 18:58:35 +00:00
Tommy Li 2fac70f1d4
Reduce component spec annotations (#805)
* reduce component spec annotations

* add option to opt out component spec annotations

* fix lint
2022-01-06 18:28:36 +00:00
Tommy Li 77fcaaa34a
update pipelineloop operator to check break tasks with taskrun (#809) 2022-01-06 06:10:36 +00:00
Prashant Sharma 2a65c33b79
feat(pipelineloop): Break operation for pipelineloop. (#807)
* Break operation for pipelineloop.

* Break operation for pipelineloop.
2022-01-04 18:29:53 +00:00
Tommy Li b16bd8863a
Add tekton loop dsl extension skeleton (#799) 2021-12-16 01:55:35 +00:00
Tommy Li c4f52e5f1c
Add taskrun name and uid into big data passing midpath (#771)
* add uid into big data passing midpath and update uid replacement to also replace with Tekton uid variable

* use taskrun name as midpath

* regenerate tests

* fix tests

* fix tests

* fix lint

* fix tests

* update midpath from uid to original pipelinerun

* fix lint

* replace original pipelinerun name in api to make metadata writer to work. Also add big data passing format to pipeline annotations

* update sdk loop logic to pass workspaces to sub-pipelines

* fix tests
2021-12-09 01:32:55 +00:00
Tommy Li e50795f125
Upgrade tekton api client and manifest to 0.30.0 & update default timeout (#798)
* upgrade tekton api client to 0.30.0

* update default timeout to non-zero
2021-12-08 20:41:59 +00:00
Tommy Li 88de0c42a9
Fix performance test script and update testing examples (#795)
* fix performance test script

* fix lint

* add missing license

* revert script changes

* update testing example
2021-12-01 22:45:04 +00:00
Christian Kadner 2c61c8ade5
Load performance test pipelines from files (#796)
* Add perf_test_config.yaml with paths of test pipelines
* Add a 'Status' column to output CSV file
* Update compile_pyfile to remove loaded pipeline module after
  compilation to allow subsequent imports of another module
  with the same name

Resolves #778

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-12-01 21:38:03 +00:00
Yudong Wang 0bb387382d
Update workflow.go (#794)
* Update workflow.go

In verifyParameters, check if the parameter is on the list of unused parameters, and if so --- log a warning instead of raising a hard error.
If there are too many parameters, it is hard to determine which one is in use, which one is unused, so here just a warning will be better for this case.

* Update workflow.go

* Update workflow.go
2021-11-30 07:56:30 +00:00
David Wang 6551207729
support separator string for iteration string param (#790)
* support separator string for iteration string param

* address review comments
2021-11-30 02:04:30 +00:00
Tommy Li 61e4f36578
Move loop retry test as a different example (#789) 2021-11-24 22:39:34 +00:00
Yihong Wang 54af3c90ad
[test] Add taskRun and run into the report (#785)
Get taskRun and run details into the output CSV

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-11-24 20:55:47 +00:00
Prashant Sharma e0e60bdc55
feat(pipelineloop): Support workspaces for pipelineLoop. (#787)
* Support workspaces for pipelineLoop.

* fix doc link test
2021-11-24 19:17:48 +00:00
Prashant Sharma 1a861306d8
feat(pipelineloop): Implement retry for pipelineloop controller. (#781)
* update to the latest tekton API i.e. 0.30

* Implement retry for pipelineloop controller.

* fix test after rebase.
2021-11-23 22:23:47 +00:00
Michalina Kotwica e2ea6bf1f1
feat(sdk): factor out the images to use in copying steps (#782) 2021-11-23 16:50:06 +00:00
David Wang a871eba224
expose current iteration item to pipelinerun's annotation (#776)
* expose current iteration item to pipelinerun's annotation

* corret typo
2021-11-22 01:32:36 +00:00
Tommy Li 6dc9d61061
Fix archived logs for custom tasks (#777) 2021-11-19 23:02:34 +00:00
Christian Kadner 121c40f9dc
Add performance test script (#773)
* Add performance test script

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Enable script in single-user mode, unauthenticated

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Use explicit names for duplicate pipeline function names

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Run performance tests in parallel

Pipeline compilation still runs in sequence due to
compiler implementation limitations.

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Improve readability of console output

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Use decorator to track method execution times

This allows accurately recording compile times when running tests
in parallel, despite the restriction that pipeline compilation is
synchronized.

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Assert presence of 'pipeline_name' keyword argument

Instead of raising a ValueError in '@time_it' decorator

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-11-19 23:01:35 +00:00
Tommy Li 5dfc36bb41
Add UI backward compatibility for dsl.container syntax (#768)
* add ui backward compat for dsl.container syntax

* add ui backward compat for dsl.container syntax
2021-11-09 11:58:05 -08:00
Tommy Li 424c79eee8
Add csi-s3 storage class as big data passing storage (#753)
* add csi-s3 as big data passing storage

* update storageclass to kfp-csi-s3 to avoid conflicts with datashim

* Update README.md
2021-11-02 05:08:00 -07:00
Tommy Li c87e70c638
Fix a typo for the openshift docs (#763) 2021-10-28 13:28:51 -07:00
Andrew Butler 27504c3089
Add tag to frontend code with changes for Tekton (#764)
Signed-off-by: Andrew-Butler <Andrew.Butler@ibm.com>
2021-10-28 10:10:26 -07:00
Michalina Kotwica 3e4f4d8d39
fix(sdk): "when" in some ParallelFor loops (#762)
* fix: "when" in some ParallelFor loops

* refactor: split long line
2021-10-27 09:17:28 -07:00
Tommy Li 99d28bbca1
Update trusted AI pipeline to have unique job names (#760) 2021-10-26 03:57:42 -07:00
Prashant Sharma 1eea71606b
feat(sdk): Add an API to return prepared workflow as python dicts. (#757)
* Add a API to return prepared workflow as python dicts.

* Added guide and improved the prepare workflow signature.
2021-10-22 10:20:03 -07:00
Jenna Ritten fc7b05e107
Update README.md typos. (#756)
* Update README.md typos.

Signed-off-by: Jenna Ritten <jritten@ibm.com>

* Update README.md typos.

Signed-off-by: Jenna Ritten <jritten@ibm.com>

* Update README.md

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

* Apply suggestions from code review

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-10-22 10:17:03 -07:00
Yihong Wang 478df66d43
[test] use emptyDir for mysql and minio (#759)
for testing, no persistent storage is needed.
Use `emptyDir:{}` for minio and mysql

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-10-22 09:08:03 -07:00
Prashant Sharma 3f13201efe
Proposal: Adding ScrapCodes as approver. (#754)
Some of the recent PRs.

#750 #709 #706 #633 #636 #638 #646
2021-10-20 14:57:37 -07:00
Yihong Wang d3ff4acc46
[test] update build-image-dind.sh (#758)
listing images shouldn't fail the build. update
the script to ignore the return value of those
commands.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-10-20 10:12:14 -07:00
Prashant Sharma 0ba4831d5d
Remove noninlined files for those tests, it is not required. Added a CI check. (#755)
* Remove noninlined files for those tests, it is not required. Added a CI check.

* added the check to CI

* fixed the build

* fixed the build
2021-10-19 22:02:14 -07:00
Tommy Li f2fb08477c
Add artifact mid path to big data passing (#751)
* add artifact mid path to big data passing

* add artifact mid path to big data passing

* address comments
2021-10-19 04:04:33 -07:00
Tommy Li e2468832da
Update install readme with KF 1.4.0 deployment (#752)
* Update install readme with KF 1.4.0 deployment

* Update kfp_tekton_install.md
2021-10-15 10:42:37 -07:00
Prashant Sharma cb7abf1535
Support data passing pipeline. (#750) 2021-10-14 09:44:08 -07:00
Gang Pu 01ed23392b
Add doc for break feature for loop (#749)
* Add doc for break feature for loop

* Fix typos

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-10-14 09:12:08 -07:00
Shrey Shrivastava e9749dc2de
Install links update (#741)
* updated troubleshooting links in Installation readme

* updated install links

* Update guides/kfp_tekton_install.md

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>
2021-10-12 08:27:44 -07:00
Tommy Li eb44737195
Enable strip eof and archive logs as default (#747) 2021-10-08 23:06:43 -07:00
Tommy Li 04ce4b08fa
Relax kfp sdk dependencies (#746)
* relax kfp sdk dependencies

* update readme version reference
2021-10-08 17:49:43 -07:00
Yihong Wang c835d1470b
[test] fix variable overriding (#743)
rename the variable in the script to avoid using the variable in
the build.properties
also add more time for waiting the kfp-tekton deployment

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-10-04 12:40:17 -07:00
Michalina Kotwica d15acfd472
fix(sdk) orig_params being overwritten in custom-tasks (#742) 2021-10-04 09:33:50 -07:00
huixa b5c907d380
[Only review]Fix #733 the pipeline parameter consumed as file (#740) 2021-09-29 15:36:10 -07:00
Michalina Kotwica a9684171c1
fix(sdk) sanitize paths in artifact_items. Fixes #738 (#739)
* sanitize paths in artifact_items

* update tests
2021-09-27 01:36:40 -07:00
Tommy Li 418528496b
Optimize kfp-tekon SDK compile time (#736)
* optimize kfp-tekon compile time

* fix lint
2021-09-22 18:42:35 -07:00
Tommy Li 043d734147
Fix(sdk): chain nested conditions to work on non-dependable condition tasks. (#732)
* chain nested conditions

* add missing license

* add dependency check to remove unnecessary when expression

* minor improvement to check for the exact condition rather than just the condition task
2021-09-15 15:08:55 -07:00
Yihong Wang 1e526f9141
[test] update sample for katib (#730)
Update pip packages and remove the usage of
`dsl.ContainerOp`.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-09-13 17:32:31 -07:00
Tommy Li 5daa744199
Remove Argo template UI reference to avoid UX conflicts with v2 compatible mode (#723)
* remove argo template reference

* address the suggestions
2021-09-01 12:17:31 -07:00
Christian Kadner 5d3c041586
Update SDK versions (#724)
* Reference to `kfp` SDK from 1.7.1 to 1.7.2 in sdk/python/README.md
* PyPI distro comment in sdk/python/setup.py

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-08-31 21:10:44 -07:00
Animesh Singh 264eb0240e
Update reference to 1.0 release (#722) 2021-08-31 11:15:44 -07:00
Tommy Li 8caa81e5a4
Update CI pipeline with new dependency flow (#721) 2021-08-30 23:09:03 -07:00
Tommy Li 1fc59e636b
Fix(sdk): fix KFP 1.7.2 SDK dependency (#719)
* skip license check due to go vanity website is down

* regenerate sdk dependency

* skip copy from genereated license

* add back license to verify
2021-08-30 19:06:03 -07:00
Tommy Li e291986106
Update KFP SDK version to 1.7.2, create KFP-TEKTON 1.0.0 release (#718)
* update KFP SDK version to 1.7.2, create KFP-TEKTON 1.0.0 release

* update standalone deployment readme
2021-08-30 17:23:03 -07:00
kfp-tekton-bot 0708a5699b
KFP 1.7.0 Rebase (#713)
* KFP 1.7.0-rc.4 Rebase

* Resolve API and Backend conflicts

* Apply 1.7.0 changes since rc.4

* Revert unwanted SDK changes

* Revert unwanted samples changes

* Revert unwanted components changes

* Fix 1.7.0 backend conflicts

* Revert unwanted SDK changes (2)

* Fix SDK bugs in backend/Dockerfile

* Fix frontend conflicts
2021-08-30 15:02:25 -07:00
Animesh Singh 17ddecab15
name consistency (#717)
refer the project as Kubeflow Pipelines on Tekton, plus rearrange a bit
2021-08-27 16:15:22 -07:00
Christian Kadner 9938d93271
Update KFP Rebase guide (#716)
* Exclude files/folders that should not get rebased
* Add paragraph on finding undesired code changes
* Add reference to previous rebase PRs for guidance

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-08-26 17:14:05 -07:00
Tommy Li 225877d49e
fix(samples): Fix e2e-mnist sample for kfp-tekton (#714)
* fix e2e-mnist sample for kfp-tekton

* address comments

* update comment for adding auth token
2021-08-25 17:27:04 -07:00
Christian Kadner 1e57f41886
Add KFP Rebase Guide (#712)
* Add KFP Rebase Guide

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Remove Troubleshooting section

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-08-25 12:49:46 -07:00
Prashant Sharma cdbb588c52
feat(sdk): Update SDK to support custom task timeout on Tekton 0.27 (#709) 2021-08-23 17:52:39 -07:00
Tommy Li 386ad234a4
Remove containerOp for pipelineparam and raw input samples (#708)
* remove containerOp for pipelineparam and raw input samples

* fix-typo
2021-08-23 14:13:39 -07:00
Tommy Li 9fbccd6276
Fix(sdk): Tekton copy result step sanitization (#711) 2021-08-23 13:51:39 -07:00
Prashant Sharma 3cb7187d8c
feat(pipelineloop): Upgrade Tekton to 0.27 for pipelineloop controller. (#703) (#706)
* Upgrade Tekton to 0.27 for pipelineloop controller. (#703)

* Upgrade Tekton to 0.27 for pipelineloop controller. (#703)

* Update pipelinelooprun.go
2021-08-23 10:43:57 -07:00
Tommy Li b5fe66cd9c
Make results to store in multi-steps if needed (#702)
* make results to store in multi-steps if needed

* add warning messages on oversized parameter

* regenerate tesrs

* fix copy-results format

* optimize the bin packing problem

* add docs and warning test cases
2021-08-20 10:55:54 -07:00
David Wang 0f6fd191b0
define cancel patch as global var and initialize it at package init (#707) 2021-08-20 00:36:54 -07:00
Tommy Li 1482b8e2ad
Move kfp sdk dependency to 1.7.1 (#705) 2021-08-19 11:16:54 -07:00
Tommy Li 1c7e70eb87
Upgrade Tekton to 0.27 (#700) 2021-08-16 16:03:41 -07:00
Tommy Li fba9ee2241
Update kfp sdk dependency to 1.7 (#699) 2021-08-13 11:53:04 -07:00
Gang Pu 0ff4a8d08b
Fit unittest for loop (#698) 2021-08-13 09:08:55 -07:00
Tommy Li 87d12ce04e
Fix e2e test case (#696)
* fix e2e test case

* fix e2e test case
2021-08-12 15:10:54 -07:00
Tommy Li 8d8d2b3c6d
Migrate volume and loop unit tests to component.yaml and verify with v2 (#692)
* migrate unit tests to component.yaml and verify with v2

* update readme to remove volumesnapshot

* address comments
2021-08-10 13:21:25 -07:00
Yihong Wang 9d4643bd66
remove ContainerOp from test cases - part 3 (#693)
eliminate the usage of ContainerOp in testcases and
update the testcase accordingly, including filenames starting
with `o` to `u`

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-08-10 10:23:25 -07:00
Yihong Wang 6c7c5d24fe
remove ContainerOp from test cases - part 2 (#691)
eliminate the usage of ContainerOp in testcases and
update the testcase accordingly.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-08-09 17:10:27 -07:00
Tommy Li 37d0021580
Add kfp name and id example (#690)
* add kfp name and id example

* address comments

* fix minor wording
2021-08-06 10:23:43 -07:00
Yihong Wang d463f16450
remove ContainerOp from test cases - part 1 (#685)
eliminate the usage of ContainerOp in testcases and
update the testcase accordingly.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-08-05 11:58:42 -07:00
Tommy Li 58b01f9281
Enhance unit tests with local Tekton pipeline spec validation (#689) 2021-08-04 18:21:18 -07:00
Michalina Kotwica 982ad5565d
fix(sdk): add test for taskRef-vs-taskSpec params (#688)
* add test for taskRef-vs-taskSpec params

* fix: taskSpec accepting params

* add license headers

* m: no spaces around "buzz"

* fix: test file custom_task_spec.yaml

* m: follow links on newlines

* fix: big_data_passing test result yamls

* fix: yamls of other tests

* factor out conditions to process params

* fix big data rewrite for regular tasks
2021-08-03 15:01:41 -07:00
Tommy Li 186a9e301f
Update code to limit loop resource group name (#686)
* update code to limit loop resource group name

* add license
2021-08-02 19:57:11 -07:00
Christian Kadner ba1a9d52dd
Update Argo link (#684)
Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-07-30 12:59:36 -07:00
Tommy Li 008855a3f0
Update custom task to work with v2 compiler (#680)
* update custom task to work with v2 compiler

* fix broken links
2021-07-28 20:22:19 -07:00
Gang Pu 18c9cc8bcc
Change to use component to replace containerOp for any sequencer (#675)
* Change to use component to replace containerOp for any sequencer

* Remove context input and change results path

* 1: Update to use component rather than containerOp
2: Fix lint/license issue
2021-07-28 19:39:19 -07:00
Gang Pu afc8460ef1
Fix broken link (#681) 2021-07-28 08:51:45 -07:00
Andrew Butler 5aa55a5eee
Add check for missing pod in cache (#679) 2021-07-26 16:37:11 -07:00
Tommy Li 2ddcec757e
Convert loop and lightweight example to component yaml (#677)
* convert loop and lightweight example to component yaml

* Update python values to float
2021-07-26 16:36:11 -07:00
Yihong Wang d9496dfc6a
remove ContainerOp in some samples (#676)
update flip-coin, nested-pipeline and trusted-ai

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-07-26 13:04:11 -07:00
Tommy Li df63b7853a
Fix merging issue from #512 (#674) 2021-07-22 12:42:06 -07:00
Tommy Li a90d7a348c
Convert user custom task from containerOp to component yaml (#671) 2021-07-21 18:56:25 -07:00
Tommy Li e1b2d20480
Update sdk and release tags to 0.9.0 (#667)
* update sdk and release tags to 0.9.0

* regenerate tests

* update kubeclient source version

* generate new deployment manifests
2021-07-21 16:33:25 -07:00
Tommy Li d9b8539b0d
feature(backend): Allow KFP apiserver to create recursion pipelineLoop CRs for the users (#512)
* add auto apply custom task CRs before pipelineRun creation

* fix lint

* store loop resource in ordered dict

* Update kfp-admin-guide.md

* sorted pipeline params to avoid randomness

* sorted pipeline params to avoid randomness

* regenerate tests

* convert template string from yaml to json

* regenerate tests

* clean up unnecessary helper function

* resolve merge conflicts

* resolve merge conflicts

* remove unnecessary go mod package

* only opt-in to embed the recursion loop annotations

* only opt-in to embed the recursion loop annotations

* enable opt-in only via package variable

* add example for how to inline recursive loops

* resolve conflicts

* regenerate tests

* make auto generate and apply recursive pipelineloop in annotations as default

* move parameter sorting to the end

* add parameter sorting for recursion loops

* sort recursive task parameters

* add more details on how to opt-out auto apply custom resource
2021-07-21 00:26:50 -07:00
Prashant Sharma f1f1a1cae6
fix(pipelineloop): sort params to fix flakiness in tests. (#669) 2021-07-19 09:18:17 -07:00
Tommy Li 1a29e1dedd
Regenerate tests for #665 (#666) 2021-07-14 19:24:50 -07:00
Tommy Li 42ee2fa20f
Update loop instructions to use inline spec (#658) 2021-07-14 19:21:51 -07:00
Gang Pu 592b5d2205
Fix the yaml dump issue of boolean value (#665)
* Fix the yaml dump issue of boolean value

* update conditions_with_global_params.py file
2021-07-12 22:12:17 -07:00
Prashant Sharma 9d79f8438c
feat(sdk): Add pipelineloop validation for testdata yamls. (#660) 2021-07-08 10:53:49 -07:00
Prashant Sharma 0ad68be3b3
fix(pipelineloop): restored pipelineloop conditions test. (#663) 2021-07-08 10:07:49 -07:00
Prashant Sharma bd2f5f669e
fix(sdk): sort params to fix flaky tests. (#662) 2021-07-08 06:35:49 -07:00
Tommy Li d80fbedada
Make default terminate API configurable to support graceful termination (#631)
* make terminate API status configurable

* make terminate API status configurable
2021-07-06 19:44:21 -07:00
Prashant Sharma da32ba6404
Add Prashant(scrapcodes) to reviewers (#655) 2021-07-06 12:34:20 -07:00
Michalina Kotwica 95a961c2d1
fix(sdk): loop argument jsonified, not stringified (#657)
Signed-off-by: Michalina Kotwica <michalina.kotwica.ibm@gmail.com>
2021-07-06 09:32:47 -07:00
Jin Chi He 77963c3cea
Enhance Any Sequencer to hanle cases that some tasks skipped (#653) 2021-07-05 01:25:51 -07:00
Tommy Li 54e8aaf59f
Fix argo redirect link in the docs (#654) 2021-07-02 17:32:49 -07:00
Prashant Sharma dbd92cb7ec
feat(sdk): inline user defined custom tasks (#636) 2021-07-02 10:26:11 -07:00
Prashant Sharma 3611c44b81
Fix: go tests for pipelineLoop controller. (#646)
* Fix: go tests for pipelineLoop controller.

* Skip adding timeout, if custom task.
2021-07-02 09:49:10 -07:00
Gang Pu 791fe5304a
Fix the issue when loop/graph inside a graph (#650) 2021-07-01 17:09:10 -07:00
Tommy Li 2cfc78da65
Remove runid labels from pipelineloop's child pipelinerun (#648) 2021-06-30 19:44:23 -07:00
Animesh Singh 3163801e6b
add mlx-reference (#649) 2021-06-30 18:00:42 -07:00
Prashant Sharma 64d4324f2b
feat(sdk): Sdk ParallelFor task spec inline (#633)
* TODO1: PipelineLoop inlined including nested loops and added tests.

* Supported recursive and parallel tasks inlining.

    For recursive tasks, we simply skip them.
    For Parallel tasks, relevant tests were added.

* Run all tests against inlined taskSpec and normal taskRefs

* Added CLI Flag disable/enable spec inlining.
2021-06-29 19:07:36 -07:00
Gang Pu 2fc23fa87d
Add Gang Pu as approver (#645) 2021-06-29 11:46:35 -07:00
Jin Chi He 8bfe60d0f1
Add jinchihe in the OWNER list (#643) 2021-06-29 11:44:36 -07:00
Tommy Li 6d16a95e89
Update manifests and standalone deployment to 0.9.0-rc0 (#635) 2021-06-28 16:20:45 -07:00
Prashant Sharma 12acff8af5
fix: PipelineLoop nested validation logic fixed. (#638)
* PipelineLoop nested validation logic fixed.

It was missing enablement of feature-flag enable-custom-task.
Also added nested task validation.

* added an example.

* add feature flag to reconciler context

* Added test, passing

Co-authored-by: tomcli <tommy.chaoping.li@ibm.com>
2021-06-28 10:12:15 -07:00
Jin Chi He da87237b4d
Support custom image for any Seq (#642) 2021-06-28 02:19:14 -07:00
Jin Chi He ad25492212
Enhance Any Sequencer (#641) 2021-06-27 23:46:14 -07:00
Tommy Li f76951e34f
Update Github Action PR trigger (#632) 2021-06-22 12:28:31 -07:00
Tommy Li aae4671e7b
Fix nested recursion with condition custom task and parameter mapping (#625)
* fix nested recursion with condition custom task and parameter mapping

* fix alpine commands and add licenses
2021-06-18 14:56:14 -07:00
Prashant Sharma 078c7428d2
fix(pipelineLoop): Update to latest tekton release v0.25.0. (#623) 2021-06-18 13:54:14 -07:00
Tommy Li 67c6d43d13
Migrate Travis tests to Github Actions (#619)
* add init github actions

* Remove old comments

* Update test_kfp_samples.sh

* add condition for venv
2021-06-18 12:00:14 -07:00
Tommy Li 15918434c4
Make kustomize patch backward compatible with v3.2 (#629) 2021-06-17 11:06:14 -07:00
Jin Chi He 39ba87c832
Add original PipelineRun name (#628) 2021-06-17 01:51:28 -07:00
Tommy Li f444a31bea
Update tekton client and deployment to 0.25.0 (#626) 2021-06-16 20:22:28 -07:00
Prashant Sharma 87dd3a728d
fix(scripts): deploy-ibm-vpc.sh public gateway detection. (#622)
A VPC may have more than one public gateways and they may be in
different cluster zones. So now filtering on cluster_zone and
picking the first public gateway.
2021-06-16 09:12:10 -07:00
Abiola Asojo 22354b364a
doc: Update README.md typos (#616)
* Update README.md

* Update README.md
2021-06-14 15:33:37 -07:00
Prashant Sharma b507dca6d6
feat(pipelineLoop): Create a validation CLI for commonly used Tekton custom task CRDs (#614) 2021-06-14 10:16:37 -07:00
Christian Kadner 5a3ef4d6fc
Upgrade Python SDK to kfp==1.6.3 (#617)
Resolves #595

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-06-14 10:03:37 -07:00
Yihong Wang be2f5b97e7
[test] Add pipeline-loop flag (#613)
Add a flag to turn on/off the pipeline-loop e2e test case.
By default, the pipeline-loop test case is turned on. Use
`skip-pipeline-loop:1` to turn it off.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-06-08 06:51:46 -07:00
Tommy Li af9f03d000
fix(sdk): Fix nested recursion runafter and param mapping; (#609) 2021-06-03 20:07:37 -07:00
Prashant Sharma 052726a2f0
feat(pipelineloop) : Support latest tektoncd pipeline version. (#602)
* Support latest tektoncd pipeline version.

* update readme for support latest tektoncd pipeline version.
2021-06-02 08:13:02 -07:00
Gang Pu 7c42346afa
Change status for successful from "Succeeded" to "Completed" (#604) 2021-06-01 09:10:04 -07:00
Gang Pu a033f33b30
Fix and verify nested recursive workflow (#600)
* Fix and verify nested recursive workflow

* Fix unit_test error

* Update compiler.py

* Try to fix travis error

* Refresh yaml

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-06-01 00:25:04 -07:00
Gang Pu 9a83eb9a43
Support expose status for any sequencer(backend) (#597) 2021-05-24 19:29:08 -07:00
Tommy Li 5b29a7f1b5
Add Custom Task Docs and Usage (#588)
* add initial custom task docs

* remove regex

* add custom task condition benefits

* add toc

* Apply suggestions from code review

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>

* add hyperlinks

* add descriptions for custom tasks

* Update advanced_user_guide.md

* Update README.md

* Update advanced_user_guide.md

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>
2021-05-24 13:15:08 -07:00
Gang Pu c05fc4bbeb
Support expose status for any sequencer(DSL side) (#598) 2021-05-24 03:55:32 -07:00
Gang Pu 0ebcd7e1b3
Permit pipeline-runner to operate on runs (#596) 2021-05-22 14:36:30 -07:00
Tommy Li 73d5445482
Add support to generate custom task with custom task cr (#593) 2021-05-22 02:53:30 -07:00
Gang Pu 95557c6d36
Support parallelism feature on DSL side (#592) 2021-05-20 00:51:46 -07:00
Gang Pu 7a6423edbc
Add Parallelism support on backend. (#589)
- Parallelism is defaulted to 1
- if Parallelism is bigger then total iteration numbers, then start all
- normally start the same number of pipelineruns as Parallelism value
2021-05-18 22:52:42 -07:00
Tommy Li 1bf5aa5d13
Fix custom task condition I/O behaviors on recursion and for loop (#587)
* fix custom task I/O behaviors on recursion and while loop

* Fix comment typo
2021-05-15 02:00:29 -07:00
Yihong Wang 4ae035e7f6
[test] Add trusted-ai into e2e testing (#586)
Add trusted-ai into the e2e testing and a flag (extra-test-cases)
to enable it. Also refactor the e2e-test task to support more test cases.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-05-13 10:47:27 -07:00
Jin Chi He 8c22f682dc
Enhance Any Sequencer to support watching run (#583) 2021-05-07 06:31:06 -07:00
Tommy Li 4c09f46de6
Fix /logs api and add tests (#582) 2021-05-06 16:33:06 -07:00
Tommy Li c1b4d84809
Add instructions on upgrade kfp-tekton multi-users and fix new multi-users permission (#581)
* add instrctions on upgrade kfp-tekton and fix multi-users permission

* Apply suggestions from code review

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>

* Fix markdown links after suggestions

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>
2021-05-06 14:06:06 -07:00
Tommy Li 633848134d
Sanitize Tekton result parameters when passing to custom task inputs (#579) 2021-05-06 12:07:06 -07:00
Andrew Butler de00ccadef
Add taskSpec check to account for custom tasks without a taskSpec (#576) 2021-05-04 16:27:05 -07:00
Andrew Butler 8b289ceba4
fix(ui, backend) Add pipelineRun to cache to assist in finding artifact data (#574)
* Add pipelineRun to cache for finding artifact data

* Remove extra container and account for custom tasks in frontend

* Apply suggestions from code review

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-05-03 13:01:04 -07:00
Prashant Sharma 26eaee2eb6
feat(script): Deploy kubeflow to IBM Cloud IKS vpc-gen2, using this script. (#500)
* Deploy to IBM Cloud IKS vpc-gen2, using this script.

* added user guide

* Added kf deploy.

* Fixed bugs while testing.

* Based on feedback, fixed the script. Improved documentation to clarify the behaviour.

* Changed the handling of config file to be per cluster/vpc.

* Added an option to choose, whether to deploy kubeflow or not.

* Improved the script to wait for the cluster delete and improved UX.
Updated README.md accordingly.

* Suppressing errors for cluster resources delete.

* Review feedback.

* Review feedback.
2021-04-30 10:01:31 -07:00
Tommy Li 8db274ada2
Update release manifests and openshift docs (#575)
* update release manifests and openshift docs

* update release manifests and openshift docs

* add optional kustomization.yaml
2021-04-29 15:46:30 -07:00
Tommy Li 76e249d4d9
Enhance error messages for resource op and artifact tracking (#572)
* enchance error messages

* update logs

* update logs
2021-04-29 14:53:30 -07:00
Christian Kadner 3179556504
Add bagdes to SDK Readme (#573) 2021-04-29 14:42:30 -07:00
Christian Kadner 649152cddd
Regenerate E2E test logs for release 0.8.0 (#566)
* Regenerate E2E test logs for release 0.8.0

* Regerate "golden" log files

* Regenerate "golden" logs after PR #567

* Update list of ignored tests

* Add cond_recur.yaml to list of ignored tests

* Regenerate E2E logs after updating "golden" YAML files of unit tests

* Regenerate E2E logs after enabling auto-strip EOF

* Regenerate "golden" YAML for unit tests

* Rename loop-static CRDs

* Regenerate E2E logs, enable EOF stripping only for E2E test
2021-04-29 13:00:30 -07:00
Yihong Wang 6672261f76
[test] move kubeclient test case (#571)
move kubeclient test case to e2e toolchain pipelines
2021-04-28 12:34:29 -07:00
Yihong Wang bfa869c179
[test] add more test cases to toolchain pipelines (#568)
Add pipelineloop-controller, pipelineloop-webhook and
anysequencer to the toolchain pipelines
2021-04-26 10:20:46 -07:00
Tommy Li a9d49bca81
Clean up and revert cache image env on manifests (#567) 2021-04-23 16:18:44 -07:00
Gang Pu 23e4acc79c
Apply to be reviewer (#565) 2021-04-23 11:49:43 -07:00
Gang Pu 98b356cbb1
Fix the issue when there's a conditional loop (#562) 2021-04-23 09:44:43 -07:00
Andrew Butler 1d5582fae7
(ui) Update any-sequencer dependency parsing (#564)
* Update any-sequencer dependency parsing

* Update formatting

* Add checks for tasks that have not ran yet
2021-04-23 09:36:43 -07:00
Tommy Li 8f2d07b5e3
Fix broken Kubeflow doc links to map with the KF 1.3 website (#563) 2021-04-22 12:24:42 -07:00
Gang Pu adc87507ee
Fix multiple any-sequencer issues (#557)
2: Fix the issue when task list is not provided.
3: Fix the issue when there's multiple results provided
4: Update the document
2021-04-22 10:29:43 -07:00
Tommy Li a68b6c172e
Create release 0.8.0 standalone install (#559) 2021-04-22 02:39:42 -07:00
Yihong Wang cf5910259c
Update katib sample (#560)
Update the notebook to use newer packages and image.
Also add instructions for multi-user environment.
2021-04-21 17:02:42 -07:00
kfp-tekton-bot 1e91649958
KFP 1.5.0 Rebase (#555)
* Merge KFP changes between 1.5.0-rc.2 and 1.5.0

Resolves #554

* Resolve backend and manifest conflicts

* Resolve frontend conflicts
2021-04-21 12:06:42 -07:00
Tommy Li e68e03e6c3
Fix recursive loop bug with no sub_group.ops (#556) 2021-04-21 06:15:41 -07:00
Tommy Li 103ae3b79b
Fix long param compile errors (#549) 2021-04-20 10:13:41 -07:00
Gang Pu 8bbcd26a53
Adjust the any sequencer example to make it more reasonable (#550) 2021-04-20 09:29:40 -07:00
Gang Pu ae56385ab2
Support the simple condition for any sequencer (#539) 2021-04-19 06:32:16 -07:00
Tommy Li 0f1147e55e
Support Tekton PipelineRunStopping status (#545) 2021-04-16 08:30:14 -07:00
Andrew Butler bf46e8edd1
(ui) Update UI to render custom tasks (#544)
* Add parsing for custom tasks

* Account for empty custom task runs and add formatting

* Add check for pipeline entirely made of custom tasks
2021-04-15 15:57:13 -07:00
Tommy Li e3c3013891
Fix viper flags and labels/annotations injections on custom task (#537) 2021-04-15 14:44:13 -07:00
Peng Li 7dd6e00374
Add extra label to sub pr (#543) 2021-04-15 05:53:12 -07:00
Michalina Kotwica b370cbd1bc
fix(sdk): fix artifacts of ops with long names (#541)
* add test for artifacts of ops with long names

* add test on correct artifact_items

* fix: stepTemplate generated

* fix: artifact_items name collision

* refactor: mount_path to a separate variable

* add license to the new test file

* m: two free lines after imports in testcase

* m: other linting
2021-04-14 12:29:12 -07:00
Yihong Wang 24c2e5bcac
[test] Remove tekton installation task (#538)
the kfp-tekton install manifest already includes
tekton, therefore, remove the tekton deploy/undeploy tasks
also add tasks to build cacheserver and frontend
2021-04-13 16:19:11 -07:00
Tommy Li 6adfcc64f3
feature(sdk): Optimize unnecessary condition task when using User definable CEL Custom task (#478)
* add cel custom task poc

* apply suggestions with minor fixes

* add initial version of plugable cel template

* fix lint

* update staging code for converting containerOp to custom task

* update staging code for converting containerOp to custom task

* remove duplicate cel conditions

* update flip-coin example to use custom task

* update flip-coin example to use custom task

* remove dsl dependencies;

* add POC comments

* fix lint

* fix self-defined CEL custom task mapping

* move custom task condition example to a separate folder

* fix tests

* make the new custom task spec and CEL condition feature as opt-in

* update instructions and generalize custom task images

* only let custom task result comparasion to skip the condition container to provide backward compatibility

* only let custom task result comparasion to skip the condition container to provide backward compatibility

* update example

* update tests

* fix comment
2021-04-13 09:43:11 -07:00
3349 changed files with 577884 additions and 306152 deletions

View File

@ -1,264 +0,0 @@
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test before submit:
# gcloud builds submit --config=.cloudbuild.yaml --substitutions=COMMIT_SHA="$(git rev-parse HEAD)" --project=ml-pipeline-test
steps:
# Build the Python SDK
- name: 'python:3-alpine'
entrypoint: '/bin/sh'
args: ['-c', 'cd /workspace/sdk/python/; python3 setup.py sdist --format=gztar; cp dist/*.tar.gz /workspace/kfp.tar.gz']
id: 'preparePythonSDK'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp.tar.gz']
id: 'copyPythonSDK'
waitFor: ['preparePythonSDK']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://$PROJECT_ID/builds/latest/kfp.tar.gz']
id: 'copyPythonSDKToLatest'
waitFor: ['preparePythonSDK']
# Build the Python Component SDK
- name: 'python:2-alpine'
entrypoint: '/bin/sh'
args: ['-c', 'cd /workspace/components/gcp/container/component_sdk/python;python setup.py sdist --format=gztar; cp dist/*.tar.gz /workspace/kfp-component.tar.gz']
id: 'preparePythonComponentSDK'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp-component.tar.gz']
id: 'copyPythonComponentSDK'
waitFor: ['preparePythonComponentSDK']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://$PROJECT_ID/builds/latest/kfp-component.tar.gz']
id: 'copyPythonComponentSDKToLatest'
waitFor: ['preparePythonComponentSDK']
# Build the pipeline system images
- name: 'gcr.io/cloud-builders/docker'
entrypoint: /bin/bash
args:
- -ceux
- |
sed -i -e "s/ARG DATE/ENV DATE \"$(date -u)\"/" /workspace/frontend/Dockerfile
docker build -t gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA \
--build-arg COMMIT_HASH=$COMMIT_SHA \
--build-arg TAG_NAME="$(cat /workspace/VERSION)" \
-f /workspace/frontend/Dockerfile \
/workspace
id: 'buildFrontend'
waitFor: ['-']
- name: 'gcr.io/cloud-builders/docker'
entrypoint: /bin/bash
args:
- -ceux
- |
docker build -t gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA \
--build-arg COMMIT_SHA=$COMMIT_SHA \
--build-arg TAG_NAME="$(cat /workspace/VERSION)" \
-f /workspace/backend/Dockerfile /workspace
id: 'buildApiServer'
waitFor: ['copyPythonSDK']
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.scheduledworkflow', '/workspace']
id: 'buildScheduledWorkflow'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.viewercontroller', '/workspace']
id: 'buildViewerCrdController'
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.persistenceagent', '/workspace']
id: 'buildPersistenceAgent'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', '-f',
'/workspace/proxy/Dockerfile', '/workspace/proxy']
id: 'buildInverseProxyAgent'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.visualization', '/workspace']
id: 'buildVisualizationServer'
waitFor: ["-"]
- id: 'buildMetadataWriter'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/metadata_writer/Dockerfile', '/workspace']
waitFor: ["-"]
- id: 'buildCacheServer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.cacheserver', '/workspace']
waitFor: ["-"]
- id: 'buildCacheDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/src/cache/deployer/Dockerfile', '/workspace']
waitFor: ["-"]
# Build marketplace deployer
- id: 'buildMarketplaceDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/manifests/gcp_marketplace/deployer/Dockerfile', '/workspace/manifests/gcp_marketplace']
waitFor: ["-"]
# Build the Kubeflow-based pipeline component images
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-deployer:$COMMIT_SHA',
'/workspace/components/kubeflow/deployer']
id: 'buildDeployer'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/launcher && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildLauncher'
waitFor: ["-"]
- id: 'buildCpuTrainer'
name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer -b 2.3.0']
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer-gpu -b 2.3.0-gpu']
id: 'buildGpuTrainer'
waitFor: ["-"]
# Build the Generic GCP component image
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/gcp/container/ && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildGcpGenericComponent'
waitFor: ["-"]
# Build the local pipeline component images
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/local/confusion_matrix && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildConfusionMatrix'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/local/roc && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildROC'
waitFor: ["-"]
# Build third_party images
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', '-f',
'/workspace/third_party/metadata_envoy/Dockerfile', '/workspace']
id: 'buildMetadataEnvoy'
# Pull third_party images
# ! Sync to the same MLMD version:
# * backend/metadata_writer/requirements.in and requirements.txt
# * @kubeflow/frontend/src/mlmd/generated
# * .cloudbuild.yaml and .release.cloudbuild.yaml
# * manifests/kustomize/base/metadata/base/metadata-grpc-deployment.yaml
# * test/tag_for_hosted.sh
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1']
id: 'pullMetadataServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance']
id: 'pullMinio'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/mysql:5.7']
id: 'pullMysql'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/cloudsql-docker/gce-proxy:1.14']
id: 'pullCloudsqlProxy'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/argoexec:v2.12.9-license-compliance']
id: 'pullArgoExecutor'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/workflow-controller:v2.12.9-license-compliance']
id: 'pullArgoWorkflowController'
# Tag for Hosted - SemVersion to Major.Minor parsing
- id: "parseMajorMinorVersion"
waitFor: ["-"]
name: gcr.io/cloud-builders/docker
entrypoint: /bin/bash
args:
- -ceux
- |
# Parse major minor version and save to a file for reusing in other steps.
# e.g. 1.0.0-rc.1 and 1.0.1 are parsed as 1.0
cat /workspace/VERSION | sed -e "s#\([0-9]\+[.][0-9]\+\)[.].*#\1#" > /workspace/mm.ver
# Tag for Hosted - Tag to hosted folder with MKP friendly name
- id: 'tagForHosted'
waitFor: ['parseMajorMinorVersion', 'buildFrontend', 'buildApiServer', 'buildScheduledWorkflow',
'buildViewerCrdController', 'buildPersistenceAgent', 'buildInverseProxyAgent', 'buildVisualizationServer',
'buildMetadataWriter', 'buildCacheServer', 'buildCacheDeployer', 'buildMetadataEnvoy',
'buildMarketplaceDeployer', 'pullMetadataServer', 'pullMinio', 'pullMysql', 'pullCloudsqlProxy',
'pullArgoExecutor', 'pullArgoWorkflowController']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
/workspace/test/tag_for_hosted.sh $PROJECT_ID $COMMIT_SHA $(cat /workspace/VERSION) $(cat /workspace/mm.ver)
images:
# Images for the pipeline system itself
- 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA'
# Images for Marketplace
- 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA'
# Images for the Kubeflow-based pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-deployer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer-gpu:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tfjob:$COMMIT_SHA'
# Images for the GCP generic pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-gcp:$COMMIT_SHA'
# Images for the local components
- 'gcr.io/$PROJECT_ID/ml-pipeline-local-confusion-matrix:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-local-roc:$COMMIT_SHA'
# Images for the third_party components
- 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA'
timeout: '3600s'
options:
diskSizeGb: 300
machineType: 'N1_HIGHCPU_8'
tags:
- build-each-commit

View File

@ -1,4 +1,6 @@
.git
bower_components
dist
node_modules
**/node_modules
backend/build
v2/build

View File

@ -23,9 +23,3 @@ Resolves #
* `test: fix CI failure. Part of #1234`
Use `part of` to indicate that a PR is working on an issue, but shouldn't close the issue when merged.
-->
- [ ] Do you want this pull request (PR) cherry-picked into the current release branch?
[Learn more about cherry-picking updates into the release branch](https://github.com/kubeflow/pipelines/blob/master/RELEASE.md#cherry-picking-pull-requests-to-release-branch).
<!--
**(Recommended.)** Ask the PR approver to add the `cherrypick-approved` label to this PR. The release manager adds this PR to the release branch in a batch update before release.
-->

27
.github/semantic.yml vendored Normal file
View File

@ -0,0 +1,27 @@
# Configuration for https://github.com/zeke/semantic-pull-requests
#
# Kubeflow Pipelines PR title convention is documented in
# https://github.com/kubeflow/pipelines/blob/master/CONTRIBUTING.md#pull-request-title-convention
# Always validate the PR title, and ignore the commits.
titleOnly: true
# TODO: define a list of valid scopes.
# scopes:
# - scope1
# - scope2
# ...
# By default types specified in commitizen/conventional-commit-types is used.
# See: https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json
types:
- feat
- fix
- docs
- style
- refactor
- perf
- test
- build
- chore
- revert

72
.github/workflows/codeql.yml vendored Normal file
View File

@ -0,0 +1,72 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
schedule:
# Every Friday at 19:39
- cron: '39 19 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Use only 'java' to analyze code written in Java, Kotlin or both
# Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

View File

@ -0,0 +1,182 @@
name: KFP Tekton Unit Tests
on:
push:
branches: [master]
# Run tests for any PRs.
pull_request:
env:
GITHUB_ACTION: "true"
jobs:
python-unittest:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: Unit Tests
run: VENV=$VIRTUAL_ENV make ci_unit_test
validate-testdata:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: "Generate testdata yaml files."
run: VENV=$VIRTUAL_ENV make unit_test GENERATE_GOLDEN_YAML=True
- name: "Test only required noninled yaml files are generated."
run: make validate-generated-test-yamls
- name: "Tekton validation for testdata."
run: make validate-testdata
- name: "Validation for examples data."
run: make validate-pipelineloop-examples
progress-report:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: "Progress report on compiling KFP DSL test scripts"
run: VENV=$VIRTUAL_ENV make report
python-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: "Lint Python code with flake8"
run: VENV=$VIRTUAL_ENV make lint
check-license:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: "Verify source files contain the license header"
run: make check_license
check-mdtoc:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: "Verify Markdown files have current table of contents"
run: make check_mdtoc
check-doc-links:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: "Verify Markdown files have valid links"
run: make check_doc_links
run-go-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v4
- name: "run go unit tests"
run: make run-go-unittests
- name: "Verify apiserver, agent, and workflow build"
run: make build-backend
run-pipelineloop-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v4
- name: "run go pipelineLoop unit tests"
run: cd tekton-catalog/pipeline-loops && make test-all
run-v2-custom-controller-image-builds:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: changes
uses: jitterbit/get-changed-files@v1
with:
format: 'json'
- name: backend changes
id: backend-changes
run: |
readarray -t backend_files <<<"$(jq -r '.[]|select(startswith("tekton-catalog"))|select(endswith("README.md")|not)' <<<'${{ steps.changes.outputs.all }}')"
echo "::set-output name=all::"
if [[ ${#backend_files[@]} -gt 0 ]]; then
if [[ -n "${backend_files[0]}" ]]; then
echo "::set-output name=all::yes"
fi
fi
- name: build images
if: ${{ steps.backend-changes.outputs.all }}
run: make build-v2-custom-controller-images
backend-integration:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: changes
uses: jitterbit/get-changed-files@v1
with:
format: 'json'
- name: backend changes
id: backend-changes
run: |
readarray -t backend_files <<<"$(jq -r '.[]|select(startswith("backend") or startswith("go.mod"))|select(endswith("README.md")|not)' <<<'${{ steps.changes.outputs.all }}')"
echo "::set-output name=all::"
if [[ ${#backend_files[@]} -gt 0 ]]; then
if [[ -n "${backend_files[0]}" ]]; then
echo "::set-output name=all::yes"
fi
fi
- name: Create k8s Kind Cluster
if: ${{ steps.backend-changes.outputs.all }}
uses: container-tools/kind-action@v2
with:
cluster_name: kfp-tekton
kubectl_version: v1.26.1
version: v0.17.0
node_image: kindest/node:v1.26.0
- name: build images
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/build-images.sh
- name: Set up Python 3.9
if: ${{ steps.backend-changes.outputs.all }}
uses: actions/setup-python@v4
with:
python-version: 3.9
- name: "deploy kfp-tekton"
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/deploy-kfp.sh
- name: Install sdk
if: ${{ steps.backend-changes.outputs.all }}
run: python -m pip install -e sdk/python
- name: "flip coin test"
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/e2e-test.sh

13
.gitignore vendored
View File

@ -69,7 +69,7 @@ frontend/test/ui/visual-regression/screenshots/screen
# go vendor
vendor
tekton-catalog/**/go.sum
tekton-catalog/pipeline-loops/go.sum
# Go module cache
backend/pkg/mod/cache
@ -104,3 +104,14 @@ docs/_build
# python sdk package
*.tar.gz
# Copy from kubeflow/frontend
coverage/
# Python cache
__pycache__
.pytest_cache
# Coverage
.coverage
.coverage*

1
.kfp-rebase-version Normal file
View File

@ -0,0 +1 @@
1.8.4

View File

@ -2,7 +2,7 @@
version: 2
sphinx:
configuration: docs/conf.py
python:
version: 3.7
install:
- requirements: sdk/python/requirements.txt
build:
os: "ubuntu-22.04"
tools:
python: "3.10"

View File

@ -1,653 +0,0 @@
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
steps:
# Marketplace Major.Minor parsing
- id: "parseMajorMinorVersion"
name: gcr.io/cloud-builders/docker
entrypoint: /bin/bash
args:
- -ceux
- |
# Parse major minor version and save to a file for reusing in other steps.
# e.g. 1.0.0-rc.1 and 1.0.1 are parsed as 1.0
echo $TAG_NAME | sed -e "s#\([0-9]\+[.][0-9]\+\)[.].*#\1#" > /workspace/mm.ver
# Pull and retag images for pipeline components
- id: 'retagComponentImages'
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
waitFor: ['-']
args:
- -ceux
- |
images=(
"ml-pipeline-kubeflow-deployer"
"ml-pipeline-kubeflow-tf-trainer"
"ml-pipeline-kubeflow-tf-trainer-gpu"
"ml-pipeline-kubeflow-tfjob"
"ml-pipeline-local-confusion-matrix"
"ml-pipeline-local-roc"
"ml-pipeline-gcp"
)
for image in "${images[@]}"
do
from_image="gcr.io/$PROJECT_ID/$image:$COMMIT_SHA"
target_image="gcr.io/ml-pipeline/$image:$TAG_NAME"
docker pull $from_image
docker tag $from_image $target_image
docker push $target_image
done
# Pull and retag the images for the pipeline system
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA']
id: 'pullFrontend'
- id: 'tagFrontendForMarketplaceMajorMin'
waitFor: ['pullFrontend', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/frontend:$COMMIT_SHA
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/frontend:$COMMIT_SHA
docker push gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA']
id: 'pullAPIServer'
- id: 'tagAPIServerForMarketplaceMajorMinor'
waitFor: ['pullAPIServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/api-server:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/api-server:$COMMIT_SHA'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
docker push 'gcr.io/ml-pipeline/api-server:$TAG_NAME'
docker push 'gcr.io/ml-pipeline/api-server:$COMMIT_SHA'
docker push 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME'
docker push 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME'
docker push gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA']
id: 'pullScheduledworkflow'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowVersionNumber'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/scheduledworkflow:$COMMIT_SHA']
id: 'tagScheduledworkflowCommitSHA'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowForMarketplace'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowForMarketplaceTest'
waitFor: ['pullScheduledworkflow']
- id: 'tagScheduledworkflowForMarketplaceMajorMinor'
waitFor: ['pullScheduledworkflow', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA']
id: 'pullViewerCrdController'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/viewer-crd-controller:$TAG_NAME']
id: 'tagViewerCrdControllerVersionNumber'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/viewer-crd-controller:$COMMIT_SHA']
id: 'tagViewerCrdControllerCommitSHA'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/viewercrd:$TAG_NAME']
id: 'tagViewerCrdControllerForMarketplace'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME']
id: 'tagViewerCrdControllerForMarketplaceTest'
waitFor: ['pullViewerCrdController']
- id: 'tagViewerCrdControllerForMarketplaceMajorMinor'
waitFor: ['pullViewerCrdController', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA']
id: 'pullPersistenceagent'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentVersionNumber'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/persistenceagent:$COMMIT_SHA']
id: 'tagPersistenceagentCommitSHA'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentForMarketplace'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentForMarketplaceTest'
waitFor: ['pullPersistenceagent']
- id: 'tagPersistenceagentForMarketplaceMajorMinor'
waitFor: ['pullPersistenceagent', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA']
id: 'pullInverseProxyAgent'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/inverse-proxy-agent:$TAG_NAME']
id: 'tagInverseProxyAgentVersionNumber'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/inverse-proxy-agent:$COMMIT_SHA']
id: 'tagInverseProxyAgentCommitSHA'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/proxyagent:$TAG_NAME']
id: 'tagInverseProxyAgentForMarketplace'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME']
id: 'tagInverseProxyAgentForMarketplaceTest'
waitFor: ['pullInverseProxyAgent']
- id: 'tagInverseProxyAgentForMarketplaceMajorMinor'
waitFor: ['pullInverseProxyAgent', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA']
id: 'pullVisualizationServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/visualization-server:$TAG_NAME']
id: 'tagVisualizationServerVersionNumber'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/visualization-server:$COMMIT_SHA']
id: 'tagVisualizationServerCommitSHA'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/visualizationserver:$TAG_NAME']
id: 'tagVisualizationServerForMarketplace'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME']
id: 'tagVisualizationServerForMarketplaceTest'
waitFor: ['pullVisualizationServer']
- id: 'tagVisualizationServerForMarketplaceMajorMinor'
waitFor: ['pullVisualizationServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
# ! Sync to the same MLMD version:
# * backend/metadata_writer/requirements.in and requirements.txt
# * @kubeflow/frontend/src/mlmd/generated
# * .cloudbuild.yaml and .release.cloudbuild.yaml
# * manifests/kustomize/base/metadata/base/metadata-grpc-deployment.yaml
# * test/tag_for_hosted.sh
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1']
id: 'pullMetadataServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1', 'gcr.io/ml-pipeline/google/pipelines/metadataserver:$TAG_NAME']
id: 'tagMetadataServerForMarketplace'
waitFor: ['pullMetadataServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1', 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME']
id: 'tagMetadataServerForMarketplaceTest'
waitFor: ['pullMetadataServer']
- id: 'tagMetadataServerForMarketplaceMajorMinor'
waitFor: ['pullMetadataServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1 gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:0.25.1 gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
- id: 'pullMetadataWriter'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagMetadataWriterVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplaceMajorMinor'
waitFor: ['pullMetadataWriter', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
- id: 'pullCacheServer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagCacheServerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-server:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-server:$COMMIT_SHA']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/cacheserver:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplaceMajorMinor'
waitFor: ['pullCacheServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/cacheserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cacheserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$(cat /workspace/mm.ver)
- id: 'pullCacheDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagCacheDeployerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-deployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-deployer:$COMMIT_SHA']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/cachedeployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplaceMajorMinor'
waitFor: ['pullCacheDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/cachedeployer:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cachedeployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA']
id: 'pullMetadataEnvoy'
- id: 'tagMetadataEnvoyVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-envoy:$TAG_NAME']
waitFor: ['pullMetadataEnvoy']
- id: 'tagMetadataEnvoyCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-envoy:$COMMIT_SHA']
waitFor: ['pullMetadataEnvoy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME']
id: 'tagMetadataEnvoyForMarketplace'
waitFor: ['pullMetadataEnvoy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME']
id: 'tagMetadataEnvoyForMarketplaceTest'
waitFor: ['pullMetadataEnvoy']
- id: 'tagMetadataEnvoyForMarketplaceMajorMinor'
waitFor: ['pullMetadataEnvoy', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance']
id: 'pullMinio'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/minio:$TAG_NAME']
id: 'tagMinioForMarketplace'
waitFor: ['pullMinio']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME']
id: 'tagMinioForMarketplaceTest'
waitFor: ['pullMinio']
- id: 'tagMinioForMarketplaceMajorMinor'
waitFor: ['pullMinio', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/mysql:5.7']
id: 'pullMysql'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/mysql:5.7', 'gcr.io/ml-pipeline/google/pipelines/mysql:$TAG_NAME']
id: 'tagMySqlForMarketplace'
waitFor: ['pullMysql']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/mysql:5.7', 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME']
id: 'tagMySqlForMarketplaceTest'
waitFor: ['pullMysql']
- id: 'tagMySqlForMarketplaceMajorMinor'
waitFor: ['pullMysql', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/mysql:5.7 gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/mysql:5.7 gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/cloudsql-docker/gce-proxy:1.14']
id: 'pullCloudsqlProxy'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.14', 'gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$TAG_NAME']
id: 'tagCloudSqlProxyForMarketplace'
waitFor: ['pullCloudsqlProxy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.14', 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME']
id: 'tagCloudSqlProxyForMarketplaceTest'
waitFor: ['pullCloudsqlProxy']
- id: 'tagCloudSqlProxyForMarketplaceMajorMinor'
waitFor: ['pullCloudsqlProxy', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/cloudsql-docker/gce-proxy:1.14 gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
docker tag gcr.io/cloudsql-docker/gce-proxy:1.14 gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/argoexec:v2.12.9-license-compliance']
id: 'pullArgoExecutor'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/argoexec:v2.12.9-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME']
id: 'tagArgoExecutorForMarketplace'
waitFor: ['pullArgoExecutor']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/argoexec:v2.12.9-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME']
id: 'tagArgoExecutorForMarketplaceTest'
waitFor: ['pullArgoExecutor']
- id: 'tagArgoExecutorForMarketplaceMajorMinor'
waitFor: ['pullArgoExecutor', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/argoexec:v2.12.9-license-compliance gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/argoexec:v2.12.9-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/workflow-controller:v2.12.9-license-compliance']
id: 'pullArgoWorkflowController'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v2.12.9-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME']
id: 'tagArgoWorkflowControllerForMarketplace'
waitFor: ['pullArgoWorkflowController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v2.12.9-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME']
id: 'tagArgoWorkflowControllerForMarketplaceTest'
waitFor: ['pullArgoWorkflowController']
- id: 'tagArgoWorkflowControllerForMarketplaceMajorMinor'
waitFor: ['pullArgoWorkflowController', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/workflow-controller:v2.12.9-license-compliance gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/workflow-controller:v2.12.9-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
# Marketplace specific deployer and specific primary image
- id: 'pullMarketplaceDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagMarketplaceDeployerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplaceDeployerVersionNumberTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplaceDeployerVersionNumberMajorMinor'
waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
- id: 'tagMarketplacePrimaryVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplacePrimaryVersionNumberTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplacePrimaryVersionNumberMajorMinor'
waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
# Copy the Python SDK
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp.tar.gz', '/workspace/']
id: 'copyPythonSDKLocal'
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://ml-pipeline/release/$TAG_NAME/kfp.tar.gz']
id: 'copyPythonSDK'
waitFor: ['copyPythonSDKLocal']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://ml-pipeline/release/latest/kfp.tar.gz']
id: 'copyPythonSDKToLatest'
waitFor: ['copyPythonSDKLocal']
# Copy the Python Component SDK
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp-component.tar.gz', '/workspace/']
id: 'copyPythonComponentSDKLocal'
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://ml-pipeline/release/$TAG_NAME/kfp-component.tar.gz']
id: 'copyPythonComponentSDK'
waitFor: ['copyPythonComponentSDKLocal']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://ml-pipeline/release/latest/kfp-component.tar.gz']
id: 'copyPythonComponentSDKToLatest'
waitFor: ['copyPythonComponentSDKLocal']
images:
- 'gcr.io/ml-pipeline/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/scheduledworkflow:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/persistenceagent:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/viewer-crd-controller:$TAG_NAME'
- 'gcr.io/ml-pipeline/viewer-crd-controller:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/inverse-proxy-agent:$TAG_NAME'
- 'gcr.io/ml-pipeline/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/visualization-server:$TAG_NAME'
- 'gcr.io/ml-pipeline/visualization-server:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/metadata-envoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/metadata-envoy:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME'
- 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/cache-server:$TAG_NAME'
- 'gcr.io/ml-pipeline/cache-server:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/cache-deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/cache-deployer:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/viewercrd:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/proxyagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/visualizationserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadataserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/minio:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/mysql:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cacheserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cachedeployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME'
timeout: '1200s'
tags:
- release-on-tag

View File

@ -1,5 +1,5 @@
[style]
based_on_style = google
dedent_closing_brackets = true
coalesce_brackets = true
split_before_first_argument = true
based_on_style = yapf
indent_width = 4
split_before_named_assigns = true
column_limit = 80

View File

@ -30,6 +30,13 @@ spec:
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr-username
description: username for docker hub
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public container registry
default: docker.io
- name: tekton-version
description: tekton version
- name: tekton-ns
@ -46,6 +53,33 @@ spec:
- name: slack-channel
description: slack channel
default: ""
- name: publish-to-public-cr
description: publish images to public container registry
default: "0"
- name: extra-test-cases
description: run extra test cases
default: "0"
- name: skip-pipeline-loop
descript: skip the pipeline loop test case
default: "0"
- name: image-tag
description: image tag
default: "nightly"
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
- name: images
description: a image list for publishing
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend pipelineloop-controller pipelineloop-webhook kubeclient"
- name: v2-images
description: a v2 image list for publishing
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook tekton-driver"
- name: many-edge-duration
description: duration threshold for many edge pipeline
value: "7"
- name: v2-image-tag
description: v2 tekton catalog image tag
default: "nightly"
resourcetemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
@ -86,6 +120,10 @@ spec:
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: public-cr-username
value: $(params.public-cr-username)
- name: public-cr-password
value: $(params.public-cr-password)
- name: resource-group
value: $(params.resource-group)
- name: org
@ -104,6 +142,26 @@ spec:
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: image-tag
value: $(params.image-tag)
- name: public-cr
value: $(params.public-cr)
- name: public-cr-namespace
value: $(params.public-cr-namespace)
- name: publish-to-public-cr
value: $(params.publish-to-public-cr)
- name: extra-test-cases
value: $(params.extra-test-cases)
- name: skip-pipeline-loop
value: $(params.skip-pipeline-loop)
- name: images
value: $(params.images)
- name: v2-images
value: $(params.v2-images)
- name: many-edge-duration
value: $(params.many-edge-duration)
- name: v2-image-tag
value: $(params.v2-image-tag)
---
apiVersion: tekton.dev/v1beta1
kind: TriggerBinding

View File

@ -20,6 +20,13 @@ spec:
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr-username
description: username for public container registry
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public container registry
default: docker.io
- name: resource-group
description: resource group
default: default
@ -45,6 +52,33 @@ spec:
- name: slack-channel
description: slack channel
default: ""
- name: publish-to-public-cr
description: publish images to public container registry
default: "0"
- name: extra-test-cases
description: execute extra test cases
default: "0"
- name: skip-pipeline-loop
descript: skip the pipeline loop test case
default: "0"
- name: image-tag
description: image tag
default: "nightly"
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
- name: images
description: a image list for publishing
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend pipelineloop-controller pipelineloop-webhook kubeclient"
- name: v2-images
description: a v2 image list for publishing
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook"
- name: many-edge-duration
description: duration threshold for many edge pipeline
value: "7"
- name: v2-image-tag
description: v2 tekton catalog image tag
default: "nightly"
workspaces:
- name: pipeline-pvc
tasks:
@ -77,7 +111,7 @@ spec:
workspace: pipeline-pvc
- name: build-images-artifact
taskRef:
name: build-images
name: build-images-dnd
runAfter:
- test
params:
@ -105,7 +139,7 @@ spec:
- name: build-images-api-server
retries: 1
taskRef:
name: build-images
name: build-images-dnd
runAfter:
- test
params:
@ -133,7 +167,7 @@ spec:
- name: build-images-persistenceagent
retries: 1
taskRef:
name: build-images
name: build-images-dnd
runAfter:
- test
params:
@ -161,7 +195,7 @@ spec:
- name: build-images-metadata-writer
retries: 1
taskRef:
name: build-images
name: build-images-dnd
runAfter:
- test
params:
@ -189,7 +223,7 @@ spec:
- name: build-images-scheduledworkflow
retries: 1
taskRef:
name: build-images
name: build-images-dnd
runAfter:
- test
params:
@ -214,6 +248,344 @@ spec:
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-cacheserver
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: cache-server
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile.cacheserver
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-frontend
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: frontend
- name: docker-root
value: .
- name: docker-file
value: frontend/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
- name: build-args
value: "COMMIT_HASH=$(git rev-parse HEAD) DATE=\"$(date -u)\""
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-pipeline-loops-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/pipeline-loops
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-any-sequencer-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/any-sequencer
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-kubectl-wrapper-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-pipelineloop-controller
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: pipelineloop-controller
- name: docker-root
value: tekton-catalog/pipeline-loops
- name: docker-file
value: tekton-catalog/pipeline-loops/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: build-args
value: "bin_name=pipelineloop-controller"
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-kfptask-controller
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-kfptask-controller
- name: docker-root
value: tekton-catalog/tekton-kfptask
- name: docker-file
value: tekton-catalog/tekton-kfptask/Dockerfile.tekton-kfptask.controller
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-kfptask-webhook
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-kfptask-webhook
- name: docker-root
value: tekton-catalog/tekton-kfptask
- name: docker-file
value: tekton-catalog/tekton-kfptask/Dockerfile.tekton-kfptask.webhook
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-exithandler-controller
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-exithandler-controller
- name: docker-root
value: tekton-catalog/tekton-exithandler
- name: docker-file
value: tekton-catalog/tekton-exithandler/Dockerfile.tekton-exithandler.controller
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-exithandler-webhook
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-exithandler-webhook
- name: docker-root
value: tekton-catalog/tekton-exithandler
- name: docker-file
value: tekton-catalog/tekton-exithandler/Dockerfile.tekton-exithandler.webhook
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-pipelineloop-webhook
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: pipelineloop-webhook
- name: docker-root
value: tekton-catalog/pipeline-loops
- name: docker-file
value: tekton-catalog/pipeline-loops/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: build-args
value: "bin_name=pipelineloop-webhook"
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-any-sequencer
runAfter:
- build-any-sequencer-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: any-sequencer
- name: docker-root
value: tekton-catalog/any-sequencer
- name: docker-file
value: tekton-catalog/any-sequencer/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-kubectl-wrapper
runAfter:
- build-kubectl-wrapper-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: kubeclient
- name: docker-root
value: tekton-catalog/kubectl-wrapper
- name: docker-file
value: tekton-catalog/kubectl-wrapper/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy
taskRef:
name: deploy
@ -222,6 +594,8 @@ spec:
- build-images-persistenceagent
- build-images-metadata-writer
- build-images-scheduledworkflow
- build-images-cacheserver
- build-images-frontend
params:
- name: apikey
value: $(params.apikey)
@ -233,18 +607,128 @@ spec:
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: tekton-version
value: $(params.tekton-version)
- name: tekton-ns
value: $(params.tekton-ns)
- name: public-cr
value: $(params.public-cr)
# - name: tekton-version
# value: $(params.tekton-version)
# - name: tekton-ns
# value: $(params.tekton-ns)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test
- name: setup-pipeline-loops-deploy
taskRef:
name: setup-pipeline-loops-deploy
runAfter:
- containerize-pipelineloop-controller
- containerize-pipelineloop-webhook
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: setup-kubectl-wrapper-deploy
taskRef:
name: setup-kubectl-wrapper-deploy
runAfter:
- containerize-kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-pipeline-loops-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- setup-pipeline-loops-deploy
- deploy
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export SKIP_PIPELINE_LOOP=$(params.skip-pipeline-loop);
source scripts/deploy/iks/tekton-catalog/deploy-pipeline-loops-e2e.sh;
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-any-sequencer-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- deploy
- containerize-any-sequencer
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export NEW_IMAGE_URL="${REGISTRY_URL}/${REGISTRY_NAMESPACE}/any-sequencer"
export NEW_IMAGE_TAG="${IMAGE_TAG}"
kubectl create clusterrolebinding pipeline-runner-extend --clusterrole=cluster-admin --serviceaccount=default:default || true
source scripts/deploy/iks/tekton-catalog/deploy-any-sequencer-e2e.sh
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-kubectl-wrapper-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- deploy
- setup-kubectl-wrapper-deploy
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export MANIFEST="resourceop_basic.yaml"
source scripts/deploy/iks/tekton-catalog/deploy-kubectl-wrapper-e2e.sh
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-flip-coin
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
@ -256,14 +740,191 @@ spec:
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-flip-coin.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-cond-dep
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-condition-depend.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-cache
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-cache.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-many-edges
retries: 2
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-many-edges.sh"
- name: many-edge-duration
value: $(params.many-edge-duration)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-trusted-ai
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
when:
- input: $(params.extra-test-cases)
operator: in
values:
- '1'
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-trusted-ai.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: publish-images
taskRef:
name: publish-images
runAfter:
- e2e-test-flip-coin
- deploy-any-sequencer-e2e
- deploy-kubectl-wrapper-e2e
when:
- input: $(params.publish-to-public-cr)
operator: in
values:
- '1'
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: public-cr-username
value: $(params.public-cr-username)
- name: public-cr-password
value: $(params.public-cr-password)
- name: image-tag
value: $(params.image-tag)
- name: public-cr-namespace
value: $(params.public-cr-namespace)
- name: public-cr
value: $(params.public-cr)
- name: images
value: $(params.images)
- name: v2-images
value: $(params.v2-images)
- name: v2-image-tag
value: $(params.v2-image-tag)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
finally:
- name: cleanup-pipeline-loops
taskRef:
name: iks-test
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
kubectl delete -f tekton-catalog/pipeline-loops/config || true
kubectl delete -f tekton-catalog/pipeline-loops/examples/loop-example-basic.yaml || true
kubectl delete -f sdk/python/tests/compiler/testdata/any_sequencer.yaml || true
kubectl delete -f sdk/python/tests/compiler/testdata/resourceop_basic.yaml || true
kubectl delete job --all -n default || true
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: undeploy
taskRef:
name: undeploy
params:
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: apikey
value: $(params.apikey)
- name: kubeflow-ns

View File

@ -35,7 +35,7 @@ spec:
mountPath: /artifacts
steps:
- name: run-go-unittests
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
@ -66,6 +66,7 @@ spec:
command: ["/bin/bash", "-c"]
args:
- set -ex;
rm -r /artifacts/*;
cd /artifacts && git clone -q -b $GIT_BRANCH $GIT_URL .;
GIT_COMMIT=$(git rev-parse HEAD);
source ./scripts/deploy/iks/run-test.sh;
@ -104,7 +105,7 @@ spec:
mountPath: /artifacts
steps:
- name: build-image
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
@ -138,6 +139,189 @@ spec:
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images-args
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
export BUILD_ARG_LIST="COMMIT_HASH=$(git rev-parse HEAD) DATE=\"$(date -u)\"";
source ./scripts/deploy/iks/build-image.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images-dnd
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
- name: build-args
description: docker build args
default: ""
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
export BUILD_ARG_LIST="$(params.build-args)";
source scripts/deploy/iks/build-image-dind.sh;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
sidecars:
- name: server
image: icr.io/continuous-delivery/pipeline/docker:20.10.22-dind
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: dind-certs
emptyDir: {}
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: deploy
spec:
@ -149,16 +333,19 @@ spec:
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: tekton-version
description: tekton version
- name: tekton-ns
description: tekton namespace
default: tekton-pipeline
# - name: tekton-version
# description: tekton version
# - name: tekton-ns
# description: tekton namespace
# default: tekton-pipeline
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr
description: public container registry
default: quay.io
- name: archive-dir
description: archive directory
default: "."
@ -166,30 +353,8 @@ spec:
- name: task-pvc
mountPath: /artifacts
steps:
- name: deploy-tekton
image: docker.io/aipipeline/pipeline-base-image:1.0
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: TEKTON_VERSION
value: $(params.tekton-version)
- name: TEKTON_NS
value: $(params.tekton-ns)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/deploy-tekton.sh;
- name: deploy-kfp-tekton
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
@ -197,6 +362,8 @@ spec:
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PUBLIC_CR
value: $(params.public-cr)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
@ -231,12 +398,18 @@ spec:
- name: slack-channel
description: slack channel
default: ""
- name: test-script
description: a shell script to run the test case
default: ""
- name: many-edge-duration
description: duration threshold for pipeline
default: "5"
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: flip-coin
image: docker.io/aipipeline/pipeline-base-image:1.0
- name: run-test
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
@ -258,6 +431,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: TEST_SCRIPT
value: $(params.test-script)
- name: MANY_EDGE_DURATION
value: $(params.many-edge-duration)
command: ["/bin/bash", "-c"]
args:
- set -ex;
@ -266,6 +443,147 @@ spec:
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: publish-images
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: ibm container registry username
- name: docker-password
description: ibm container registry password
- name: public-cr-username
description: username for public container registry
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public conainer registry uri
default: docker.io
- name: images
description: image list to publish
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend"
- name: image-tag
description: image tag
default: "nightly"
- name: v2-images
description: v2 image list to publish
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook"
- name: v2-image-tag
description: v2 image tag
default: "nightly"
- name: dind-ns
description: dind namespace
default: docker-build
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: publish-images-to-cr
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: PUBLIC_CR_USERNAME
value: $(params.public-cr-username)
- name: PUBLIC_CR_TOKEN
value: $(params.public-cr-password)
- name: PUBLIC_CR_NAMESPACE
value: $(params.public-cr-namespace)
- name: PUBLIC_CR
value: $(params.public-cr)
- name: IMAGES
value: $(params.images)
- name: PUBLISH_TAG
value: $(params.image-tag)
- name: V2_IMAGES
value: $(params.v2-images)
- name: V2_PUBLISH_TAG
value: $(params.v2-image-tag)
- name: DIND_NS
value: $(params.dind-ns)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source scripts/deploy/iks/publish-image.sh;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
sidecars:
- image: icr.io/continuous-delivery/pipeline/docker:19.03.15-dind
name: server
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: dind-certs
emptyDir: {}
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: undeploy
spec:
@ -278,13 +596,17 @@ spec:
- name: archive-dir
description: archive directory
default: "."
- name: kubernetes-cluster
description: cluster name
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: undeploy-kfp-tekton
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: ARCHIVE_DIR
@ -296,15 +618,157 @@ spec:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/undeploy-kfp.sh;
- name: undeploy-tekton
image: docker.io/aipipeline/pipeline-base-image:1.0
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-binaries
spec:
params:
- name: build-dir
description: directory with makefile
default: tekton-catalog/feature
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-binaries
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
command: ["/bin/bash", "-c"]
- name: BUILD_DIR
value: $(params.build-dir)
command:
- /bin/bash
- '-c'
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/undeploy-tekton.sh;
source scripts/deploy/iks/tekton-catalog/build-binaries.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-pipeline-loops-deploy
spec:
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
command:
- /bin/bash
- '-c'
args:
- |
set -ex;
cd /artifacts && source build.properties;
export CONTROLLER_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/pipelineloop-controller;
export WEBHOOK_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/pipelineloop-webhook;
source scripts/deploy/iks/tekton-catalog/setup-pipeline-loops-deploy.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-kubectl-wrapper-deploy
spec:
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
command:
- /bin/bash
- '-c'
args:
- >
cd /artifacts && source build.properties;
export KUBECTL_WRAPPER_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/kubeclient;
source scripts/deploy/iks/tekton-catalog/setup-kubectl-wrapper-deploy.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: iks-test
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: resource-group
description: resource group
default: default
- name: org
description: organization
- name: region
description: region
- name: space
description: space
- name: archive-dir
description: archive directory
default: "."
- name: test-commands
description: testing commands
default: ""
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: iks-script
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: REGION
value: $(params.region)
- name: ORG
value: $(params.org)
- name: SPACE
value: $(params.space)
- name: RESOURCE_GROUP
value: $(params.resource-group)
- name: PIPELINE_URL
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/pipeline-run-url']
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- |
set -ex;
cd /artifacts && source build.properties
retry() {
local max=$1; shift
local interval=$1; shift
until "$@"; do
echo "trying.."
max=$((max-1))
if [[ "$max" -eq 0 ]]; then
return 1
fi
sleep "$interval"
done
}
# Set up kubernetes config
retry 3 3 ibmcloud login --apikey "${IBM_CLOUD_API_KEY}" --no-region
retry 3 3 ibmcloud target -r "$REGION" -g "$RESOURCE_GROUP"
retry 3 3 ibmcloud ks cluster config -c "$PIPELINE_KUBERNETES_CLUSTER_NAME"
$(params.test-commands)

View File

@ -18,7 +18,7 @@ spec:
mountPath: /artifacts
steps:
- name: build-binaries
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: BUILD_DIR
value: $(params.build-dir)
@ -95,7 +95,7 @@ spec:
mountPath: /artifacts
steps:
- name: create-docker-registry-secret
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: API_KEY
valueFrom:
@ -133,7 +133,7 @@ spec:
- mountPath: /steps
name: steps-volume
- name: run-docker-commands
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_HOST
value: "tcp://localhost:2376"
@ -243,7 +243,7 @@ spec:
mountPath: /artifacts
steps:
- name: cleanup
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: API_KEY
valueFrom:
@ -293,7 +293,7 @@ spec:
mountPath: /artifacts
steps:
- name: setup-deploy
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: CONTROLLER_IMAGE_URL
value: $(params.controller-image-url)
@ -336,7 +336,7 @@ spec:
mountPath: /artifacts
steps:
- name: setup-deploy
image: docker.io/aipipeline/pipeline-base-image:1.0
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: KUBECTL_WRAPPER_IMAGE_URL
value: $(params.kubectl-wrapper-image-url)

View File

@ -1,193 +0,0 @@
# Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
matrix:
include:
- name: "Unit tests, Python 3.6"
language: python
python: "3.6"
env: TOXENV=py36
install: &0
- python3 -m pip install -e sdk/python
script: &1
- VENV=$VIRTUAL_ENV make unit_test
- name: "Unit tests, Python 3.7"
language: python
python: "3.7"
env: TOXENV=py37
install: *0
script: *1
- name: "Unit tests, Python 3.8"
language: python
python: "3.8"
env: TOXENV=py38
install: *0
script: *1
- name: "Progress report on compiling KFP DSL test scripts"
language: python
python: "3.7"
install: *0
script:
- VENV=$VIRTUAL_ENV make report
- name: "Lint Python code with flake8"
language: python
python: "3.7"
script:
- VENV=$VIRTUAL_ENV make lint
- name: "Verify source files contain the license header"
language: bash
script:
- make check_license
- name: "Verify Markdown files have current table of contents"
language: bash
script:
- make check_mdtoc
- name: "Verify Markdown files have valid links"
language: python
python: "3.7"
install:
- python3 -m pip install requests
script:
- make check_doc_links
- name: "Verify apiserver, agent, and workflow build"
language: go
go: 1.15.x
script:
- make build-backend
- name: "run go unit tests"
language: go
go: 1.15.x
script:
- make run-go-unittests
- name: "Verify api-server, persistenceagent, metadata-writer, and scheduledworkflow docker build"
services: docker
env:
- DOCKER_REGISTRY=fake_registry
- DIFF_DETECTED_ERR_CODE=169
before_script:
# No diff detected: terminate job with success i.e. travis_terminate 0
# Script throws unexpected error: fail job with error code
# Diff detected: continue/run this job
- git remote add upstream https://github.com/kubeflow/kfp-tekton.git
- git fetch upstream
- ./scripts/check_diff.sh ; EXIT_CODE=$?
- |
if [ $EXIT_CODE -eq 0 ]; then
echo "No changes detected - skipping job."
travis_terminate 0
elif [ $EXIT_CODE -ne $DIFF_DETECTED_ERR_CODE ]; then
echo "Unexpected error in check_diff.sh - failing job."
travis_terminate $EXIT_CODE
fi
- echo "Changes detected - continue running job"
script:
- make build-backend-images
# =================================
# TODO: merge with config from KFP
# =================================
#matrix:
# include:
# - language: python
# python: "3.5"
# env: TOXENV=py35
# before_install:
# - export PYTHONPATH=$PYTHONPATH:/home/travis/.local/lib/python3.5/site-packages/
# install: &0
# - python3 -m pip install --upgrade pip
# - python3 -m pip install -r $TRAVIS_BUILD_DIR/sdk/python/requirements.txt
# # Additional dependencies
# - pip3 install coverage==4.5.4 coveralls==1.9.2 six>=1.13.0
# # Sample test infra dependencies
# - pip3 install minio
# - pip3 install junit_xml
# # Visualization test dependencies
# - cd $TRAVIS_BUILD_DIR/backend/src/apiserver/visualization
# - pip3 install -r requirements-test.txt
# # Using Argo to lint all compiled workflows
# - export LOCAL_BIN="${HOME}/.local/bin"
# - mkdir -p "$LOCAL_BIN"
# - export PATH="${PATH}:$LOCAL_BIN" # Unnecessary - Travis already has it in PATH
# - wget --quiet -O "${LOCAL_BIN}/argo" https://github.com/argoproj/argo/releases/download/v2.4.3/argo-linux-amd64 && chmod +x "${LOCAL_BIN}/argo"
# script: &1 # DSL tests
# - cd $TRAVIS_BUILD_DIR/sdk/python
# - python3 -m pip install -e .
# - cd $TRAVIS_BUILD_DIR # Changing the current directory to the repo root for correct coverall paths
# - coverage run --source=kfp --append -m unittest discover --verbose --start-dir sdk/python/tests --top-level-directory=sdk/python
# #- coveralls
#
# # Test against TFX
# # Compile and setup protobuf
# - PROTOC_ZIP=protoc-3.7.1-linux-x86_64.zip
# - curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/$PROTOC_ZIP
# - sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
# - sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
# - rm -f $PROTOC_ZIP
# # Install TFX from head
# - cd $TRAVIS_BUILD_DIR
# - git clone https://github.com/tensorflow/tfx.git
# - cd $TRAVIS_BUILD_DIR/tfx
# - pip3 install --upgrade pip
# - pip3 install --upgrade 'numpy>=1.16,<1.17'
# # Specify transitive dependency to get around: https://github.com/pypa/pip/issues/8583
# - pip3 install --upgrade 'google-auth>=1.18.0'
# - set -x
# - set -e
# - python3 setup.py bdist_wheel
# - WHEEL_PATH=$(find dist -name "tfx-*.whl")
# - python3 -m pip install "${WHEEL_PATH}" --upgrade
# - set +e
# - set +x
# # Three KFP-related unittests
# - cd $TRAVIS_BUILD_DIR/tfx/tfx/orchestration/kubeflow
# - python3 kubeflow_dag_runner_test.py
# - cd $TRAVIS_BUILD_DIR/tfx/tfx/examples/chicago_taxi_pipeline
# - python3 taxi_pipeline_kubeflow_gcp_test.py
# - python3 taxi_pipeline_kubeflow_local_test.py
#
# # Visualization test
# - cd $TRAVIS_BUILD_DIR/backend/src/apiserver/visualization
# - python3 test_exporter.py
# - python3 test_server.py
#
# # Test loading all component.yaml definitions
# - $TRAVIS_BUILD_DIR/components/test_load_all_components.sh
#
# # Component SDK tests
# - cd $TRAVIS_BUILD_DIR/components/gcp/container/component_sdk/python
# - ./run_test.sh
#
# # Sample test unittests.
# - cd $TRAVIS_BUILD_DIR/test/sample-test/unittests
# - python3 -m unittest utils_tests.py
# - language: python
# python: "3.6"
# env: TOXENV=py36
# before_install:
# - export PYTHONPATH=$PYTHONPATH:/home/travis/.local/lib/python3.6/site-packages/
# install: *0
# script: *1
# - language: python
# python: "3.7"
# env: TOXENV=py37
# before_install:
# - export PYTHONPATH=$PYTHONPATH:/home/travis/.local/lib/python3.7/site-packages/
# install: *0
# script: *1
# - name: "Lint Python code with flake8"
# language: python
# python: "3.7"
# install: pip install flake8
# script: flake8 . --count --exclude=backend/src/apiserver/visualization/types/*.py --select=E9,F63,F7,F82 --show-source --statistics || true

View File

@ -9,7 +9,7 @@ just a few small guidelines you need to follow.
<!-- START of ToC generated by running ./tools/mdtoc.sh CONTRIBUTING.md -->
- [Project Structure](#project-structure)
- [Contributor License Agreement](#contributor-license-agreement)
- [Legal](#legal)
- [Coding Style](#coding-style)
- [Unit Testing Best Practices](#unit-testing-best-practices)
- [Golang](#golang)
@ -35,17 +35,11 @@ To get started, see the development guides:
* [Backend development guide](./backend/README.md)
* [SDK development guide](./sdk/python/README.md)
## Contributor License Agreement
## Legal
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
Kubeflow uses Developer Certificate of Origin ([DCO](https://github.com/apps/dco/)).
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
Please see https://github.com/kubeflow/community/tree/master/dco-signoff-hook#signing-off-commits to learn how to sign off your commits.
## Coding Style

View File

@ -16,10 +16,11 @@
# - The help target was derived from https://stackoverflow.com/a/35730328/5601796
VENV ?= .venv
KFP_TEKTON_RELEASE ?= v0.7.0
KFP_TEKTON_RELEASE ?= v1.9.2
export VIRTUAL_ENV := $(abspath ${VENV})
export PATH := ${VIRTUAL_ENV}/bin:${PATH}
DOCKER_REGISTRY ?= aipipeline
GITHUB_ACTION ?= false
.PHONY: help
help: ## Display the Make targets
@ -35,12 +36,38 @@ $(VENV)/bin/activate: sdk/python/setup.py
@echo "VENV=$(VENV)"
@test -d $(VENV) || python3 -m venv $(VENV)
@$(VENV)/bin/pip show kfp-tekton >/dev/null 2>&1 || $(VENV)/bin/pip install -e sdk/python
@touch $(VENV)/bin/activate
@if [ "$(GITHUB_ACTION)" = "false" ]; then touch $(VENV)/bin/activate; fi
.PHONY: install
install: venv ## Install the kfp_tekton compiler in a virtual environment
@echo "Run 'source $(VENV)/bin/activate' to activate the virtual environment."
.PHONY: validate-generated-test-yamls
validate-generated-test-yamls:
@echo "=================================================================="
@echo "Reporting files with same non-inlined and inlined generated yamls in testdata:"
@find sdk/python/tests/compiler/testdata \
\( -type f -name "*yaml" -name "*noninlined.yaml" \) | sort -z >/tmp/validate-generated-test-yamls_total
@find sdk/python/tests/compiler/testdata \
\( -type f -name "*yaml" -name "*noninlined.yaml" \) | \
sed -n -e 's/\(.*\)_noninlined.yaml/\1/p' | \
xargs -n1 -I '{}' diff -q '{}.yaml' '{}_noninlined.yaml' | cut -f4 -d' ' | \
sort -z >/tmp/validate-generated-test-yamls_valid
@echo "=================================================================="
@echo "Noninlined and inlined testdata yamls, having same content."
@diff -a /tmp/validate-generated-test-yamls_total /tmp/validate-generated-test-yamls_valid
@echo "$@: OK"
.PHONY: validate-testdata
validate-testdata:
@cd tekton-catalog/pipeline-loops/ && make validate-testdata-python-sdk
@echo "$@: OK"
.PHONY: validate-pipelineloop-examples
validate-pipelineloop-examples:
@cd tekton-catalog/pipeline-loops/ && make validate-examples
@echo "$@: OK"
.PHONY: unit_test
unit_test: venv ## Run compiler unit tests
@echo "=================================================================="
@ -51,6 +78,9 @@ unit_test: venv ## Run compiler unit tests
@sdk/python/tests/run_tests.sh
@echo "$@: OK"
.PHONY: ci_unit_test
ci_unit_test: unit_test
.PHONY: e2e_test
e2e_test: venv ## Run compiler end-to-end tests (requires kubectl and tkn CLI)
@echo "=================================================================="
@ -78,7 +108,7 @@ lint: venv ## Check Python code style compliance
@which flake8 > /dev/null || pip install flake8
@flake8 sdk/python --show-source --statistics \
--select=E9,E2,E3,E5,F63,F7,F82,F4,F841,W291,W292 \
--per-file-ignores sdk/python/tests/compiler/testdata/*:F841 \
--per-file-ignores sdk/python/tests/compiler/testdata/*:F841,F821 \
--max-line-length=140
@echo "$@: OK"
@ -167,6 +197,15 @@ build-backend-images: \
## Build backend docker images
@echo "$@: OK"
.PHONY: build-v2-custom-controller-images
build-v2-custom-controller-images: \
build-tekton-exithandler-controller-image \
build-tekton-exithandler-webhook-image \
build-tekton-kfptask-controller-image \
build-tekton-kfptask-webhook-image \
## Build V2 Tekton custom task controller images
@echo "$@: OK"
.PHONY: build-api-server-image
build-api-server-image: ## Build api-server docker image
docker build -t ${DOCKER_REGISTRY}/api-server -f backend/Dockerfile .
@ -187,6 +226,26 @@ build-scheduledworkflow-image: ## Build scheduledworkflow docker image
build-cacheserver-image: ## Build cacheserver docker image
docker build -t ${DOCKER_REGISTRY}/cache-server -f backend/Dockerfile.cacheserver .
.PHONY: build-tekton-exithandler-controller-image
build-tekton-exithandler-controller-image: ## Build tekton-exithandler-controller docker image
@cd tekton-catalog/tekton-exithandler/ && docker build -t ${DOCKER_REGISTRY}/tekton-exithandler-controller -f Dockerfile.tekton-exithandler.controller .
.PHONY: build-tekton-exithandler-webhook-image
build-tekton-exithandler-webhook-image: ## Build tekton-exithandler-webhook docker image
@cd tekton-catalog/tekton-exithandler/ && docker build -t ${DOCKER_REGISTRY}/tekton-exithandler-webhook -f Dockerfile.tekton-exithandler.webhook .
.PHONY: build-tekton-kfptask-controller-image
build-tekton-kfptask-controller-image: ## Build tekton-kfptask-controller docker image
@cd tekton-catalog/tekton-kfptask/ && docker build -t ${DOCKER_REGISTRY}/tekton-kfptask-controller -f Dockerfile.tekton-kfptask.controller .
.PHONY: build-tekton-kfptask-webhook-image
build-tekton-kfptask-webhook-image: ## Build tekton-kfptask-webhook docker image
@cd tekton-catalog/tekton-kfptask/ && docker build -t ${DOCKER_REGISTRY}/tekton-kfptask-webhook -f Dockerfile.tekton-kfptask.webhook .
.PHONY: build-tekton-driver-image
build-tekton-driver-image: ## Build tekton-driver docker image
@cd tekton-catalog/tekton-driver/ && docker build -t ${DOCKER_REGISTRY}/tekton-driver -f Dockerfile.tektondriver .
.PHONY: run-go-unittests
run-go-unittests: \
run-apiserver-unittests \
@ -194,6 +253,8 @@ run-go-unittests: \
run-crd-unittests \
run-persistenceagent-unittests \
run-cacheserver-unittests \
run-tekton-exithandler-unittests \
run-tekton-kfptask-unittests \
## Verify go backend unit tests
@echo "$@: OK"
@ -211,3 +272,12 @@ run-persistenceagent-unittests: # persistence agent golang unit tests
run-cacheserver-unittests: # cache golang unit tests
go test -v -cover ./backend/src/cache/...
run-tekton-exithandler-unittests: # tekton-exithandler golang unit tests
@cd tekton-catalog/tekton-exithandler/ && go test -v -cover ./...
run-tekton-kfptask-unittests: # tekton-kfptask golang unit tests
@cd tekton-catalog/tekton-kfptask/ && go test -v -cover ./...
run-tekton-driver-unittests: # tekton-driver golang unit tests
@cd tekton-catalog/tekton-driver/ && go test -v -cover ./...

12
OWNERS
View File

@ -3,11 +3,15 @@ approvers:
- ckadner
- Tomcli
- fenglixa
- drewbutlerbb4
- pugangxa
- scrapcodes
- yhwang
- rafalbigaj
reviewers:
- animeshsingh
- ckadner
- Tomcli
- fenglixa
- drewbutlerbb4
- pugangxa
- jinchihe
- scrapcodes
- yhwang

View File

@ -1,27 +1,35 @@
# Kubeflow Pipelines with Tekton
# Kubeflow Pipelines on Tekton
Project bringing Kubeflow Pipelines and Tekton together. The project is driven according to this [design doc](http://bit.ly/kfp-tekton). The current code allows you run Kubeflow Pipelines with Tekton backend end to end.
Project bringing Kubeflow Pipelines and Tekton together. The project is driven
according to this [design doc](http://bit.ly/kfp-tekton). The current code allows you run Kubeflow Pipelines with Tekton backend end to end.
* Create your Pipeline using Kubeflow Pipelines DSL, and compile it to Tekton YAML.
* Upload the compiled Tekton YAML to KFP engine (API and UI), and run end to end with logging and artifacts tracking enabled.
* Create your Pipeline using Kubeflow Pipelines DSL, and compile it to Tekton
YAML.
* Upload the compiled Tekton YAML to KFP engine (API and UI), and run end to end
with logging and artifacts tracking enabled.
* In KFP-Tekton V2, the SDK compiler will generate the same intermediate representation as in the main Kubeflow pipelines SDK. All the Tekton related implementations are all embedded into the V2 backend API service.
For more details about the project please follow this detailed [blog post](https://developer.ibm.com/blogs/kubeflow-pipelines-with-tekton-and-watson/). Additionally look at these [slides](https://www.slideshare.net/AnimeshSingh/kubeflow-pipelines-with-tekton-236769976) and the [deep dive presentation](https://www.youtube.com/watch?v=AYIeNtXLT_k) for demos.
For more details about the project please follow this detailed [blog post](https://developer.ibm.com/blogs/awb-tekton-optimizations-for-kubeflow-pipelines-2-0) . For the latest KFP-Tekton V2 implementation and [supported offerings](https://developer.ibm.com/articles/advance-machine-learning-workflows-with-ibm-watson-pipelines/), please follow our latest [Kubecon Talk](https://www.youtube.com/watch?v=ecx-yp4g7YU) and [slides](https://docs.google.com/presentation/d/1Su42ApXzZvVwhNSYRAk3bd0heHOtrdEX/edit?usp=sharing&ouid=103716780892927252554&rtpof=true&sd=true). For information on the KFP-Tekton V1 implementation, look at these [slides](https://www.slideshare.net/AnimeshSingh/kubeflow-pipelines-with-tekton-236769976) as well as this [deep dive presentation](https://www.youtube.com/watch?v=AYIeNtXLT_k) for demos.
## Tekton
## Architecture
The Tekton Pipelines project provides Kubernetes-style resources for declaring CI/CD-style pipelines. Tekton introduces
several new [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) (CRDs) including Task, Pipeline, TaskRun, and PipelineRun. A PipelineRun represents a single running instance
of a Pipeline and is responsible for creating a Pod for each of its Tasks and as many containers within each Pod as it
has Steps. Please look for more details in [Tekton repo](https://github.com/tektoncd/pipeline).
We are currently using [Kubeflow Pipelines 1.8.4](https://github.com/kubeflow/pipelines/releases/tag/1.8.4) and
[Tekton >= 0.53.2](https://github.com/tektoncd/pipeline/releases/tag/v0.53.2)
in the master branch for this project.
## Kubeflow Pipeline with Tekton Backend
We are currently using [Kubeflow Pipelines 1.5.0-rc.2](https://github.com/kubeflow/pipelines/releases/tag/1.5.0-rc.2) and
[Tekton >= 0.21.0](https://github.com/tektoncd/pipeline/releases/tag/v0.21.0) for this project.
For [Kubeflow Pipelines 2.0.5](https://github.com/kubeflow/pipelines/releases/tag/2.0.5) and
[Tekton >= 0.53.2](https://github.com/tektoncd/pipeline/releases/tag/v0.53.2)
integration, please check out the [kfp-tekton v2-integration](https://github.com/kubeflow/kfp-tekton/tree/v2-integration) branch and [KFP-Tekton V2 deployment](/guides/kfp_tekton_install.md#standalone-kubeflow-pipelines-v2-with-tekton-backend-deployment) instead.
![kfp-tekton](images/kfp-tekton.png)
### Get Started using Kubeflow Pipelines with Tekton
Kubeflow Pipelines is a platform for building and deploying portable, scalable machine learning (ML) workflows. More architectural details about the Kubeflow Pipelines can be found on the [Kubeflow website](https://www.kubeflow.org/docs/components/pipelines/overview/).
The Tekton Pipelines project provides Kubernetes-style resources for declaring
CI/CD-style pipelines. Tekton introduces several [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)(CRDs) including Task, Pipeline, TaskRun, and PipelineRun. A PipelineRun represents a single running instance of a Pipeline and is responsible for creating a Pod for each of its Tasks and as many containers within each Pod as it has Steps. Please look for more details in the [Tekton repo](https://github.com/tektoncd/pipeline).
### Get Started using Kubeflow Pipelines on Tekton
[Install Kubeflow Pipelines with Tekton backend](/guides/kfp_tekton_install.md)
@ -33,6 +41,8 @@ We are currently using [Kubeflow Pipelines 1.5.0-rc.2](https://github.com/kubefl
[Available KFP DSL Features](/sdk/FEATURES.md)
[Tekton Specific Features](/guides/advanced_user_guide.md)
### Development Guides
[Backend Developer Guide](/guides/developer_guide.md)
@ -51,10 +61,6 @@ We are currently using [Kubeflow Pipelines 1.5.0-rc.2](https://github.com/kubefl
[Kubeflow Slack](https://join.slack.com/t/kubeflow/shared_invite/zt-cpr020z4-PfcAue_2nw67~iIDy7maAQ)
[CD Foundation MLOps Sig](https://cd.foundation/blog/2020/02/11/announcing-the-cd-foundation-mlops-sig/).
[Instructions to join](https://github.com/cdfoundation/sig-mlops)
### References
[Kubeflow and TFX Pipelines](/samples/kfp-tfx)

17
SECURITY.md Normal file
View File

@ -0,0 +1,17 @@
# Security Policy
## Supported Versions
Below are the list of supported KFP-Tekton for security and bug fixes.
| Version | Supported |
| ------- | ------------------ |
| 2.0.x | :white_check_mark: |
| 1.9.x | :white_check_mark: |
| 1.8.x | :white_check_mark: |
| 1.7.x | :white_check_mark: |
| < 1.7 | :x: |
## Reporting a Vulnerability
Please summit [an issue](https://github.com/kubeflow/kfp-tekton/issues) in the KFP-Tekton repo for any vulnerability you found in this project. If the vulnerability should not be exposed in open source, then please summit the vulnerability to the Kubeflow slack channel `#external-kfp-tekton` or direct message to `Tommy Li`.

View File

@ -1 +1 @@
0.8.0-rc.0
1.9.2

View File

@ -1,7 +0,0 @@
PROTO_PATH=/usr/local/include/
gen_proto: v2alpha1/pipeline_spec.proto
cd v2alpha1 && protoc -I=$(PROTO_PATH) -I=. \
--go_out=go \
--go_opt=paths=source_relative \
pipeline_spec.proto

View File

@ -1,19 +0,0 @@
# Pipeline Spec
## Generate golang proto code
Documentation: <https://developers.google.com/protocol-buffers/docs/reference/go-generated>
Download `protoc` compiler binary from: <https://github.com/protocolbuffers/protobuf/releases/tag/v3.14.0>.
Install proto code generator:
```bash
go install google.golang.org/protobuf/cmd/protoc-gen-go
```
Generate golang proto code:
```bash
make gen_proto
```

File diff suppressed because it is too large Load Diff

View File

@ -1,838 +0,0 @@
syntax = "proto3";
package ml_pipelines;
import "google/protobuf/any.proto";
import "google/protobuf/struct.proto";
option go_package = "github.com/kubeflow/pipelines/api/v2alpha1/go";
// The spec of a pipeline job.
message PipelineJob {
string name = 1; // Name of the job.
// User friendly display name
string display_name = 2;
reserved 3, 4, 5, 6;
// Definition of the pipeline that is being executed.
google.protobuf.Struct pipeline_spec = 7;
reserved 8, 9, 10;
// The labels with user-defined metadata to organize PipelineJob.
map<string, string> labels = 11;
// The runtime config of a PipelineJob.
message RuntimeConfig {
// The runtime parameters of the PipelineJob. The parameters will be
// passed into [PipelineJob.pipeline_spec][] to replace the placeholders
// at runtime.
map<string, Value> parameters = 1;
// A path in a Cloud Storage bucket which will be treated as the root
// output directory of the pipeline. It is used by the system to
// generate the paths of output artifacts.
// This is a GCP-specific optimization.
string gcs_output_directory = 2;
}
// Runtime config of the pipeline.
RuntimeConfig runtime_config = 12;
}
// The spec of a pipeline.
message PipelineSpec {
// The metadata of the pipeline.
PipelineInfo pipeline_info = 1;
// A list of pipeline tasks, which form a DAG.
// Deprecated, use [PipelineSpec.root][] instead.
repeated PipelineTaskSpec tasks = 2 [deprecated = true];
// The deployment config of the pipeline.
// The deployment config can be extended to provide platform specific configs.
// The supported config is [PipelineDeploymentConifg]().
// Deprecated in favor of deployment_spec.
google.protobuf.Any deployment_config = 3 [deprecated = true];
// The deployment config of the pipeline.
// The deployment config can be extended to provide platform specific configs.
google.protobuf.Struct deployment_spec = 7;
// The version of the sdk, which compiles the spec.
string sdk_version = 4;
// The version of the schema.
string schema_version = 5;
// The definition of the runtime parameter.
message RuntimeParameter {
// Required field. The type of the runtime parameter.
PrimitiveType.PrimitiveTypeEnum type = 1;
// Optional field. Default value of the runtime parameter. If not set and
// the runtime parameter value is not provided during runtime, an error will
// be raised.
Value default_value = 2;
}
// The runtime parameters of the pipeline. Keyed by parameter name.
// Deprecated, instead of defining the runtime_parameters, user can define
// [ComponentSpec.input_definitions][] field of the [PipelineSpec.root][] to
// define the pipeline parameters.
map<string, RuntimeParameter> runtime_parameters = 6 [deprecated = true];
// The map of name to definition of all components used in this pipeline.
map<string, ComponentSpec> components = 8;
// The definition of the main pipeline. Execution of the pipeline is
// completed upon the completion of this component.
ComponentSpec root = 9;
}
// Definition of a component.
message ComponentSpec {
// Definition of the input parameters and artifacts of the component.
ComponentInputsSpec input_definitions = 1;
// Definition of the output parameters and artifacts of the component.
ComponentOutputsSpec output_definitions = 2;
// Either a DAG or a single execution.
oneof implementation {
DagSpec dag = 3;
string executor_label = 4;
}
}
// A DAG contains multiple tasks.
message DagSpec {
// The tasks inside the dag.
map<string, PipelineTaskSpec> tasks = 1;
// Defines how the outputs of the dag are linked to the sub tasks.
DagOutputsSpec outputs = 2;
}
// Definition of the output artifacts and parameters of the DAG component.
message DagOutputsSpec {
// Selects a defined output artifact from a sub task of the DAG.
message ArtifactSelectorSpec {
// The name of the sub task which produces the output that matches with
// the `output_artifact_key`.
string producer_subtask = 1;
// The key of [ComponentOutputsSpec.artifacts][] map of the producer task.
string output_artifact_key = 2;
}
// Selects a list of output artifacts that will be aggregated to the single
// output artifact channel of the DAG.
message DagOutputArtifactSpec {
// The selected artifacts will be aggregated as output as a single
// output channel of the DAG.
repeated ArtifactSelectorSpec artifact_selectors = 1;
}
// Name to the output artifact channel of the DAG.
map<string, DagOutputArtifactSpec> artifacts = 1;
// Selects a defined output parameter from a sub task of the DAG.
message ParameterSelectorSpec {
// The name of the sub task which produces the output that matches with
// the `output_parameter_key`.
string producer_subtask = 1;
// The key of [ComponentOutputsSpec.parameters][] map of the producer task.
string output_parameter_key = 2;
}
// Aggregate output parameters from sub tasks into a list object.
message ParameterSelectorsSpec {
repeated ParameterSelectorSpec parameter_selectors = 1;
}
// Aggregates output parameters from sub tasks into a map object.
message MapParameterSelectorsSpec {
map<string, ParameterSelectorSpec> mapped_parameters = 2;
}
// We support four ways to fan-in output parameters from sub tasks to the DAG
// parent task.
// 1. Directly expose a single output parameter from a sub task,
// 2. (Conditional flow) Expose a list of output from multiple tasks
// (some might be skipped) but allows only one of the output being generated.
// 3. Expose a list of outputs from multiple tasks (e.g. iterator flow).
// 4. Expose the aggregation of output parameters as a name-value map.
message DagOutputParameterSpec {
oneof kind {
// Returns the sub-task parameter as a DAG parameter. The selected
// parameter must have the same type as the DAG parameter type.
ParameterSelectorSpec value_from_parameter = 1;
// Returns one of the sub-task parameters as a DAG parameter. If there are
// multiple values are available to select, the DAG will fail. All the
// selected parameters must have the same type as the DAG parameter type.
ParameterSelectorsSpec value_from_oneof = 2;
}
}
// The name to the output parameter.
map<string, DagOutputParameterSpec> parameters = 2;
}
// Definition specification of the component input parameters and artifacts.
message ComponentInputsSpec {
// Definition of an artifact input.
message ArtifactSpec {
ArtifactTypeSchema artifact_type = 1;
}
// Definition of a parameter input.
message ParameterSpec {
PrimitiveType.PrimitiveTypeEnum type = 1;
}
// Name to artifact input.
map<string, ArtifactSpec> artifacts = 1;
// Name to parameter input.
map<string, ParameterSpec> parameters = 2;
}
// Definition specification of the component output parameters and artifacts.
message ComponentOutputsSpec {
// Definition of an artifact output.
message ArtifactSpec {
ArtifactTypeSchema artifact_type = 1;
// Deprecated. Use [ArtifactSpec.metadata][] instead.
map<string, ValueOrRuntimeParameter> properties = 2 [deprecated = true];
// Deprecated. Use [ArtifactSpec.metadata][] instead.
map<string, ValueOrRuntimeParameter> custom_properties = 3
[deprecated = true];
// Properties of the Artifact.
google.protobuf.Struct metadata = 4;
}
// Definition of a parameter output.
message ParameterSpec {
PrimitiveType.PrimitiveTypeEnum type = 1;
}
// Name to artifact output.
map<string, ArtifactSpec> artifacts = 1;
// Name to parameter output.
map<string, ParameterSpec> parameters = 2;
}
// The spec of task inputs.
message TaskInputsSpec {
// The specification of a task input artifact.
message InputArtifactSpec {
// The name of the upstream task which produces the output that matches with
// the `output_artifact_key`.
// Deprecated, use
// [TaskInputSpec.InputArtifactSpec.TaskOutputArtifactSpec][] instead.
string producer_task = 1 [deprecated = true];
// The key of [TaskOutputsSpec.artifacts][] map of the producer task.
// Deprecated, use
// [TaskInputSpec.InputArtifactSpec.TaskOutputArtifactSpec][] instead.
string output_artifact_key = 2 [deprecated = true];
message TaskOutputArtifactSpec {
// The name of the upstream task which produces the output that matches
// with the `output_artifact_key`.
string producer_task = 1;
// The key of [TaskOutputsSpec.artifacts][] map of the producer task.
string output_artifact_key = 2;
}
oneof kind {
// Pass the input artifact from another task within the same parent
// component.
TaskOutputArtifactSpec task_output_artifact = 3;
// Pass the input artifact from parent component input artifact.
string component_input_artifact = 4;
}
reserved 5;
}
// Represents an input parameter. The value can be taken from an upstream
// task's output parameter (if specifying `producer_task` and
// `output_parameter_key`, or it can be a runtime value, which can either be
// determined at compile-time, or from a pipeline parameter.
message InputParameterSpec {
// Represents an upstream task's output parameter.
message TaskOutputParameterSpec {
// The name of the upstream task which produces the output parameter that
// matches with the `output_parameter_key`.
string producer_task = 1;
// The key of [TaskOutputsSpec.parameters][] map of the producer task.
string output_parameter_key = 2;
}
// Represents an upstream task's final status. The field can only be set if
// the schema version is `2.0.0`. The resolved input parameter will be a
// json payload in string type.
message TaskFinalStatus {
// The name of the upsteram task where the final status is coming from.
string producer_task = 1;
}
oneof kind {
// Output parameter from an upstream task.
TaskOutputParameterSpec task_output_parameter = 1;
// A constant value or runtime parameter.
ValueOrRuntimeParameter runtime_value = 2;
// Pass the input parameter from parent component input parameter.
string component_input_parameter = 3;
// The final status of an uptream task.
TaskFinalStatus task_final_status = 5;
}
// Selector expression of Common Expression Language (CEL)
// that applies to the parameter found from above kind.
//
// The expression is applied to the Value type
// [Value][]. For example,
// 'size(string_value)' will return the size of the Value.string_value.
//
// After applying the selection, the parameter will be returned as a
// [Value][]. The type of the Value is either deferred from the input
// definition in the corresponding
// [ComponentSpec.input_definitions.parameters][], or if not found,
// automatically deferred as either string value or double value.
//
// In addition to the builtin functions in CEL, The value.string_value can
// be treated as a json string and parsed to the [google.protobuf.Value][]
// proto message. Then, the CEL expression provided in this field will be
// used to get the requested field. For examples,
// - if Value.string_value is a json array of "[1.1, 2.2, 3.3]",
// 'parseJson(string_value)[i]' will pass the ith parameter from the list
// to the current task, or
// - if the Value.string_value is a json map of "{"a": 1.1, "b": 2.2,
// "c": 3.3}, 'parseJson(string_value)[key]' will pass the map value from
// the struct map to the current task.
//
// If unset, the value will be passed directly to the current task.
string parameter_expression_selector = 4;
}
// A map of input parameters which are small values, stored by the system and
// can be queriable.
map<string, InputParameterSpec> parameters = 1;
// A map of input artifacts.
map<string, InputArtifactSpec> artifacts = 2;
}
// The spec of task outputs.
message TaskOutputsSpec {
// The specification of a task output artifact.
message OutputArtifactSpec {
// The type of the artifact.
ArtifactTypeSchema artifact_type = 1;
// The properties of the artifact, which are determined either at
// compile-time, or at pipeline submission time through runtime parameters
map<string, ValueOrRuntimeParameter> properties = 2;
// The custom properties of the artifact, which are determined either at
// compile-time, or at pipeline submission time through runtime parameters
map<string, ValueOrRuntimeParameter> custom_properties = 3;
}
// Specification for output parameters produced by the task.
message OutputParameterSpec {
// Required field. The type of the output parameter.
PrimitiveType.PrimitiveTypeEnum type = 1;
}
// A map of output parameters which are small values, stored by the system and
// can be queriable. The output key is used
// by [TaskInputsSpec.InputParameterSpec][] of the downstream task to specify
// the data dependency. The same key will also be used by
// [ExecutorInput.Inputs][] to reference the output parameter.
map<string, OutputParameterSpec> parameters = 1;
// A map of output artifacts. Keyed by output key. The output key is used
// by [TaskInputsSpec.InputArtifactSpec][] of the downstream task to specify
// the data dependency. The same key will also be used by
// [ExecutorInput.Inputs][] to reference the output artifact.
map<string, OutputArtifactSpec> artifacts = 2;
}
// Represent primitive types. The wrapper is needed to give a namespace of
// enum value so we don't need add `PRIMITIVE_TYPE_` prefix of each enum value.
message PrimitiveType {
// The primitive types.
enum PrimitiveTypeEnum {
PRIMITIVE_TYPE_UNSPECIFIED = 0;
INT = 1;
DOUBLE = 2;
STRING = 3;
}
}
// The spec of a pipeline task.
message PipelineTaskSpec {
// Basic info of a pipeline task.
PipelineTaskInfo task_info = 1;
// Specification for task inputs which contains parameters and artifacts.
TaskInputsSpec inputs = 2;
// Specification for task outputs.
// Deprecated, the output definition is moved to [ComponentSpec.outputs][].
TaskOutputsSpec outputs = 3 [deprecated = true];
// Label for the executor of the task.
// The specification will be specified in the deployment config.
// For example:
// ```
// tasks:
// - task_info:
// name: trainer
// executor_label: trainer
// deployment_config:
// @type: cloud.ml.pipelines.v1alpha3.proto.PipelineDeploymentConfig
// executors:
// trainer:
// container:
// image: gcr.io/tfx:latest
// args: []
// ```
// Deprecated, the executor_label is moved to
// [ComponentSpec.executor_label][].
string executor_label = 4 [deprecated = true];
// A list of names of upstream tasks that do not provide input
// artifacts for this task, but nonetheless whose completion this task depends
// on.
repeated string dependent_tasks = 5;
message CachingOptions {
// Whether or not to enable cache for this task. Defaults to false.
bool enable_cache = 1;
}
CachingOptions caching_options = 6;
// Reference to a component. Use this field to define either a DAG or an
// executor.
ComponentRef component_ref = 7;
// Trigger policy defines how the task gets triggered. If a task is not
// triggered, it will run into NOT_TRIGGERED state.
message TriggerPolicy {
// An expression which will be evaluated into a boolean value. True to
// trigger the task to run. The expression follows the language of
// [CEL Spec][https://github.com/google/cel-spec]. It can access the data
// from [ExecutorInput][] message of the task.
// For example:
// - `inputs.artifacts['model'][0].properties['accuracy']*100 > 90`
// - `inputs.parameters['type'] == 'foo' && inputs.parameters['num'] == 1`
string condition = 1;
// An enum defines the trigger strategy of when the task will be ready to be
// triggered.
// ALL_UPSTREAM_TASKS_SUCCEEDED - all upstream tasks in succeeded state.
// ALL_UPSTREAM_TASKS_COMPLETED - all upstream tasks in any final state.
// (Note that CANCELLED is also a final state but job will not trigger new
// tasks when job is in CANCELLING state, so that the task with the trigger
// policy at ALL_UPSTREAM_TASKS_COMPLETED will not start when job
// cancellation is in progress.)
enum TriggerStrategy {
// Unspecified. Behave the same as ALL_UPSTREAM_TASKS_SUCCEEDED.
TRIGGER_STRATEGY_UNSPECIFIED = 0;
// Specifies that all upstream tasks are in succeeded state.
ALL_UPSTREAM_TASKS_SUCCEEDED = 1;
// Specifies that all upstream tasks are in any final state.
ALL_UPSTREAM_TASKS_COMPLETED = 2;
}
// The trigger strategy of this task. The `strategy` and `condition` are
// in logic "AND", as a task will only be tested for the `condition` when
// the `strategy` is meet.
// Unset or set to default value of TRIGGER_STATEGY_UNDEFINED behaves the
// same as ALL_UPSTREAM_TASKS_SUCCEEDED.
TriggerStrategy strategy = 2;
}
// Trigger policy of the task.
TriggerPolicy trigger_policy = 8;
// Iterator supports fanning out the task into multiple sub-tasks based on the
// values of input artifact or parameter. The current task will become the
// parent of all the fan-out tasks. The output of the current task follows
// the following conventions:
// * Output artifacts with the same name of each iteration will be merged
// into one output artifact channel of the parent iterator task.
// * Output parameters with the same name of each iteration will be merged
// into a string output parameter with the same name with content being a
// JSON array.
//
// For example, if an iterator starts two sub-tasks (t1 and t2) with the
// following outputs.
//
// t1.outputs.parameters = { 'p': 'v1' }
// t1.outputs.artifacts = { 'a': [a1] }
// t2.outputs.parameters = { 'p': 'v2' }
// t2.outputs.artifacts = { 'a': [a2] }
// parent_task.outputs.parameters = { 'p': '["v1", "v2"]' }
// parent_task.outputs.aritfacts = { 'a': [a1, a2] }
oneof iterator {
// Iterator to iterate over an artifact input.
ArtifactIteratorSpec artifact_iterator = 9;
// Iterator to iterate over a parameter input.
ParameterIteratorSpec parameter_iterator = 10;
}
}
// The spec of an artifact iterator. It supports fan-out a workflow from a list
// of artifacts.
message ArtifactIteratorSpec {
// Specifies the name of the artifact channel which contains the collection of
// items to iterate. The iterator will create a sub-task for each item of
// the collection and pass the item as a new input artifact channel as
// specified by [item_input][].
message ItemsSpec {
// The name of the input artifact.
string input_artifact = 1;
}
// The items to iterate.
ItemsSpec items = 1;
// The name of the input artifact channel which has the artifact item from the
// [items][] collection.
string item_input = 2;
}
// The spec of a parameter iterator. It supports fan-out a workflow from a
// string parameter which contains a JSON array.
message ParameterIteratorSpec {
// Specifies the spec to decribe the parameter items to iterate.
message ItemsSpec {
// Specifies where to get the collection of items to iterate. The iterator
// will create a sub-task for each item of the collection and pass the item
// as a new input parameter as specified by [item_input][].
oneof kind {
// The raw JSON array.
string raw = 1;
// The name of the input parameter whose value has the items collection.
// The parameter must be in STRING type and its content can be parsed
// as a JSON array.
string input_parameter = 2;
}
}
// The items to iterate.
ItemsSpec items = 1;
// The name of the input parameter which has the item value from the
// [items][] collection.
string item_input = 2;
}
message ComponentRef {
// The name of a component. Refer to the key of the
// [PipelineSpec.components][] map.
string name = 1;
}
// Basic info of a pipeline.
message PipelineInfo {
// Required field. The name of the pipeline.
// The name will be used to create or find pipeline context in MLMD.
string name = 1;
}
// The definition of a artifact type in MLMD.
message ArtifactTypeSchema {
oneof kind {
// The name of the type. The format of the title must be:
// `<namespace>.<title>.<version>`.
// Examples:
// - `aiplatform.Model.v1`
// - `acme.CustomModel.v2`
// When this field is set, the type must be pre-registered in the MLMD
// store.
string schema_title = 1;
// Points to a YAML file stored on Google Cloud Storage describing the
// format.
string schema_uri = 2;
// Contains a raw YAML string, describing the format of
// the properties of the type.
string instance_schema = 3;
}
}
// The basic info of a task.
message PipelineTaskInfo {
// The unique name of the task within the pipeline definition. This name
// will be used in downstream tasks to indicate task and data dependencies.
string name = 1;
}
// Definition for a value or reference to a runtime parameter. A
// ValueOrRuntimeParameter instance can be either a field value that is
// determined during compilation time, or a runtime parameter which will be
// determined during runtime.
message ValueOrRuntimeParameter {
oneof value {
// Constant value which is determined in compile time.
Value constant_value = 1;
// Name of the runtime parameter.
string runtime_parameter = 2;
}
}
// The definition of the deployment config of the pipeline. It contains the
// the platform specific executor configs for KFP OSS.
message PipelineDeploymentConfig {
// The specification on a container invocation.
// The string fields of the message support string based placeholder contract
// defined in [ExecutorInput](). The output of the container follows the
// contract of [ExecutorOutput]().
message PipelineContainerSpec {
// The image uri of the container.
string image = 1;
// The main entrypoint commands of the container to run. If not provided,
// fallback to use the entry point command defined in the container image.
repeated string command = 2;
// The arguments to pass into the main entrypoint of the container.
repeated string args = 3;
// The lifecycle hooks of the container.
// Each hook follows the same I/O contract as the main container entrypoint.
// See [ExecutorInput]() and [ExecutorOutput]() for details.
// (-- TODO(b/165323565): add more documentation on caching and lifecycle
// hooks. --)
message Lifecycle {
// The command and args to execute a program.
message Exec {
// The command of the exec program.
repeated string command = 2;
// The args of the exec program.
repeated string args = 3;
}
// This hook is invoked before caching check. It can change the properties
// of the execution and output artifacts before they are used to compute
// the cache key. The updated metadata will be passed into the main
// container entrypoint.
Exec pre_cache_check = 1;
}
// The lifecycle hooks of the container executor.
Lifecycle lifecycle = 4;
// The specification on the resource requirements of a container execution.
// This can include specification of vCPU, memory requirements, as well as
// accelerator types and counts.
message ResourceSpec {
// The limit of the number of vCPU cores. This container execution needs
// at most cpu_limit vCPU to run.
double cpu_limit = 1;
// The memory limit in GB. This container execution needs at most
// memory_limit RAM to run.
double memory_limit = 2;
// The specification on the accelerators being attached to this container.
message AcceleratorConfig {
// The type of accelerators.
string type = 1;
// The number of accelerators.
int64 count = 2;
}
AcceleratorConfig accelerator = 3;
}
ResourceSpec resources = 5;
}
// The specification to import or reimport a new artifact to the pipeline.
message ImporterSpec {
// The URI of the artifact.
ValueOrRuntimeParameter artifact_uri = 1;
// The type of the artifact.
ArtifactTypeSchema type_schema = 2;
// The properties of the artifact.
// Deprecated. Use [ImporterSpec.metadata][] instead.
map<string, ValueOrRuntimeParameter> properties = 3 [deprecated = true];
// The custom properties of the artifact.
// Deprecated. Use [ImporterSpec.metadata][] instead.
map<string, ValueOrRuntimeParameter> custom_properties = 4
[deprecated = true];
// Properties of the Artifact.
google.protobuf.Struct metadata = 6;
// Whether or not import an artifact regardless it has been imported before.
bool reimport = 5;
}
// ResolverSpec resolves artifacts from historical metadata and returns them
// to the pipeline as output artifacts of the resolver task. The downstream
// tasks can consume them as their input artifacts.
message ResolverSpec {
// The query to fetch artifacts.
message ArtifactQuerySpec {
// The filter of the artifact query. The supported syntax are:
// - `contexts.name='<context name>'`
// - `artifact_type='<artifact type name>'`
// - `uri='<uri>'`
// - `state=<state>`
// - `properties['key']='value'`
// - `custom_properties['key']='value'`
// - `name='value'`
// - `and` to combine two conditions and returns when both are true.
// If no `contexts.name` filter is set, the query will be scoped to the
// the current pipeline context.
string filter = 1;
// The maximum number of the artifacts to be returned from the
// query. If not defined, the default limit is `1`.
int32 limit = 2;
}
// A list of resolver output definitions. The
// key of the map must be exactly the same as
// the keys in the [TaskOutputsSpec.artifacts][] map.
// At least one output must be defined.
map<string, ArtifactQuerySpec> output_artifact_queries = 1;
}
message AIPlatformCustomJobSpec {
// API Specification for invoking a Google Cloud AI Platform CustomJob.
// The fields must match the field names and structures of CustomJob
// defined in
// https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/projects.locations.customJobs.
// The field types must be either the same, or be a string containing the
// string based placeholder contract defined in [ExecutorInput](). The
// placeholders will be replaced with the actual value during the runtime
// before the job is launched.
google.protobuf.Struct custom_job = 1;
}
// The specification of the executor.
message ExecutorSpec {
oneof spec {
// Starts a container.
PipelineContainerSpec container = 1;
// Import an artifact.
ImporterSpec importer = 2;
// Resolves an existing artifact.
ResolverSpec resolver = 3;
// Starts a Google Cloud AI Platform CustomJob.
AIPlatformCustomJobSpec custom_job = 4 [deprecated = true];
}
}
// Map from executor label to executor spec.
map<string, ExecutorSpec> executors = 1;
}
// Value is the value of the field.
message Value {
oneof value {
// An integer value
int64 int_value = 1;
// A double value
double double_value = 2;
// A string value
string string_value = 3;
}
}
// The definition of a runtime artifact.
message RuntimeArtifact {
// The name of an artifact.
string name = 1;
// The type of the artifact.
ArtifactTypeSchema type = 2;
// The URI of the artifact.
string uri = 3;
// The properties of the artifact.
// Deprecated. Use [RuntimeArtifact.metadata][] instead.
map<string, Value> properties = 4 [deprecated = true];
// The custom properties of the artifact.
// Deprecated. Use [RuntimeArtifact.metadata][] instead.
map<string, Value> custom_properties = 5 [deprecated = true];
// Properties of the Artifact.
google.protobuf.Struct metadata = 6;
}
// Message that represents a list of artifacts.
message ArtifactList {
// A list of artifacts.
repeated RuntimeArtifact artifacts = 1;
}
// The input of an executor, which includes all the data that
// can be passed into the executor spec by a string based placeholder.
//
// The string based placeholder uses a JSON path to reference to the data
// in the [ExecutionInput]().
//
// `{{$}}`: prints the full [ExecutorInput]() as a JSON string.
// `{{$.inputs.artifacts['<name>'].uri}}`: prints the URI of an input
// artifact.
// `{{$.inputs.artifacts['<name>'].properties['<property name>']}}`: prints
// the
// property of an input artifact.
// `{{$.inputs.parameters['<name>']}}`: prints the value of an input
// parameter.
// `{{$.outputs.artifacts['<name>'].uri}}: prints the URI of an output artifact.
// `{{$.outputs.artifacts['<name>'].properties['<property name>']}}`: prints the
// property of an output artifact.
// `{{$.outputs.parameters['<name>'].output_file}}`: prints a file path which
// points to a file and container can write to it to return the value of the
// parameter..
// `{{$.outputs.output_file}}`: prints a file path of the output metadata file
// which is used to send output metadata from executor to orchestrator. The
// contract of the output metadata is [ExecutorOutput](). When both parameter
// output file and executor output metadata files are set by the container, the
// output metadata file will have higher precedence to set output parameters.
message ExecutorInput {
// The runtime inputs data of the execution.
message Inputs {
// Input parameters of the execution.
map<string, Value> parameters = 1;
// Input artifacts of the execution.
map<string, ArtifactList> artifacts = 2;
}
// The runtime input artifacts of the task invocation.
Inputs inputs = 1;
// The runtime output parameter.
message OutputParameter {
// The file path which is used by the executor to pass the parameter value
// to the system.
string output_file = 1;
}
// The runtime outputs data of the execution.
message Outputs {
// The runtime output parameters.
map<string, OutputParameter> parameters = 1;
// The runtime output artifacts.
map<string, ArtifactList> artifacts = 2;
// The file path of the full output metadata JSON. The schema of the output
// file is [ExecutorOutput][].
//
// When the full output metadata file is set by the container, the output
// parameter files will be ignored.
string output_file = 3;
}
// The runtime output artifacts of the task invocation.
Outputs outputs = 2;
}
// The schema of the output metadata of an execution. It will be used to parse
// the output metadata file.
message ExecutorOutput {
// The values for output parameters.
map<string, Value> parameters = 1;
// The updated metadata for output artifact.
map<string, ArtifactList> artifacts = 2;
}

View File

@ -1,13 +0,0 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,90 +0,0 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import setuptools
try:
from distutils.spawn import find_executable
except ImportError:
from shutil import which as find_executable
NAME = "kfp-pipeline-spec"
VERSION = "0.1.7"
PROTO_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), os.pardir))
PKG_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), "kfp", "pipeline_spec"))
# Find the Protocol Compiler. (Taken from protobuf/python/setup.py)
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
PROTOC = os.environ["PROTOC"]
else:
PROTOC = find_executable("protoc")
def GenerateProto(source):
"""Generate a _pb2.py from a .proto file.
Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input.
Args:
source: The source proto file that needs to be compiled.
"""
output = source.replace(".proto", "_pb2.py")
if not os.path.exists(output) or (
os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output)):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if PROTOC is None:
sys.stderr.write("protoc is not found. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [
PROTOC, "-I%s" % PROTO_DIR,
"--python_out=%s" % PKG_DIR, source
]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
# Generate the protobuf files that we depend on.
GenerateProto(os.path.join(PROTO_DIR, "pipeline_spec.proto"))
setuptools.setup(
name=NAME,
version=VERSION,
description="Kubeflow Pipelines pipeline spec",
author="google",
author_email="kubeflow-pipelines@google.com",
url="https://github.com/kubeflow/pipelines",
packages=setuptools.find_namespace_packages(include=['kfp.*']),
python_requires=">=3.6.1",
install_requires=["protobuf>=3.13.0,<4"],
include_package_data=True,
license="Apache 2.0",
)

View File

@ -1,31 +1,41 @@
### Updated base image to golang in order to build with go modules
### Bazel build cannot work with the Tekton library because the current
### KFP Bazel does not support go.mod "replace" on key dependencies.
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 1. Build api server application
# Use golang:1.13.1-stretch to keep GLIBC at 2.24 https://github.com/gotify/server/issues/225
FROM golang:1.13.1-stretch as builder
FROM golang:1.19.3-buster as builder
RUN apt-get update && apt-get install -y cmake clang musl-dev openssl
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
RUN go mod vendor
RUN GO111MODULE=on go build -o /bin/apiserver backend/src/apiserver/*.go
RUN mkdir -p /go/src/github.com/kubeflow/pipelines/bazel-bin/backend/src/apiserver/
RUN go build -o /go/src/github.com/kubeflow/pipelines/bazel-bin/backend/src/apiserver/apiserver ./backend/src/apiserver
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/apiserver
RUN go-licenses csv ./backend/src/apiserver > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/apiserver.csv && \
go-licenses save ./backend/src/apiserver --save_path /tmp/NOTICES
# 2. Compile preloaded pipeline samples
FROM python:3.7 as compiler
FROM python:3.8 as compiler
RUN apt-get update -y && apt-get install --no-install-recommends -y -q default-jdk python3-setuptools python3-dev jq
RUN wget https://bootstrap.pypa.io/get-pip.py && python3 get-pip.py
COPY sdk/python/requirements.txt .
RUN python3 -m pip install -r requirements.txt --no-cache-dir
# Downloading Argo CLI so that the samples are validated, but doesn't need it for Tekton.
# ADD https://github.com/argoproj/argo/releases/download/v2.7.5/argo-linux-amd64 /usr/local/bin/argo
# RUN chmod +x /usr/local/bin/argo
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY sdk sdk
WORKDIR /go/src/github.com/kubeflow/pipelines/sdk/python
@ -40,13 +50,16 @@ COPY backend/src/apiserver/config/sample_config.json /samples/
RUN set -e; \
< /samples/sample_config.json jq .[].file --raw-output | while read pipeline_yaml; do \
pipeline_py="${pipeline_yaml%.yaml}.py"; \
pipeline_py="${pipeline_yaml%.yaml}.py"; \
mode=`< /samples/sample_config.json jq ".[] | select(.file == \"${pipeline_yaml}\") | (if .mode == null then \"V1\" else .mode end)" --raw-output`; \
mv "$pipeline_py" "${pipeline_py}.tmp"; \
echo 'import kfp; kfp.components.default_base_image_or_builder="gcr.io/google-appengine/python:2020-03-31-141326"' | cat - "${pipeline_py}.tmp" > "$pipeline_py"; \
dsl-compile-tekton --py "$pipeline_py" --output "$pipeline_yaml" || python3 "$pipeline_py"; \
done
# 3. Start api web server
FROM golang:1.13.0
FROM debian:stable
ARG COMMIT_SHA=unknown
ENV COMMIT_SHA=${COMMIT_SHA}
@ -55,10 +68,12 @@ ENV TAG_NAME=${TAG_NAME}
WORKDIR /bin
COPY third_party/license.txt /bin/license.txt
COPY backend/src/apiserver/config/ /config
COPY --from=builder /bin/apiserver /bin/apiserver
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
COPY --from=compiler /samples/ /samples/
RUN chmod +x /bin/apiserver

View File

@ -1,5 +1,19 @@
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Dockerfile for building the source code of cache_server
FROM golang:1.13.15-alpine3.12 as builder
FROM golang:1.19.3-alpine3.15 as builder
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
@ -8,13 +22,21 @@ WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
RUN GO111MODULE=on go build -o /bin/cache_server backend/src/cache/*.go
RUN git clone https://github.com/hashicorp/golang-lru.git /kfp/cache/golang-lru/
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/cache
RUN go-licenses csv ./backend/src/cache > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/cache_server.csv && \
go-licenses save ./backend/src/cache --save_path /tmp/NOTICES
FROM alpine:3.8
WORKDIR /bin
COPY --from=builder /bin/cache_server /bin/cache_server
COPY --from=builder /go/src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
COPY --from=builder /kfp/cache/golang-lru/* /bin/golang-lru/
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENTRYPOINT [ "/bin/cache_server" ]
ENTRYPOINT [ "/bin/cache_server" ]

View File

@ -1,7 +1,20 @@
# Updated golang image to 1.13 since Tekton 0.13 onwards
# requires golang 1.13 in order to build certain packages.
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.13.15-alpine3.12 as builder
FROM golang:1.19.3-alpine3.15 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
@ -11,12 +24,22 @@ RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
RUN GO111MODULE=on go build -o /bin/persistence_agent backend/src/agent/persistence/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/agent/persistence
RUN go-licenses csv ./backend/src/agent/persistence > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/persistence_agent.csv && \
go-licenses save ./backend/src/agent/persistence --save_path /tmp/NOTICES
FROM alpine:3.11
WORKDIR /bin
COPY backend/src/apiserver/config/ /config
COPY --from=builder /bin/persistence_agent /bin/persistence_agent
COPY --from=builder /go/src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENV NAMESPACE ""
@ -26,4 +49,10 @@ ENV TTL_SECONDS_AFTER_WORKFLOW_FINISH 86400
# NUM_WORKERS indicates now many worker goroutines
ENV NUM_WORKERS 2
CMD persistence_agent --logtostderr=true --namespace=${NAMESPACE} --ttlSecondsAfterWorkflowFinish=${TTL_SECONDS_AFTER_WORKFLOW_FINISH} --numWorker ${NUM_WORKERS}
#CHILDREFERENCESKIND kind of runs to search for the childReferences
ENV CHILDREFERENCES_KINDS ""
#LEGACY_STATUS_UPDATE legacy status update method to pass update via apiserver
ENV LEGACY_STATUS_UPDATE "false"
CMD persistence_agent --logtostderr=true --namespace=${NAMESPACE} --ttlSecondsAfterWorkflowFinish=${TTL_SECONDS_AFTER_WORKFLOW_FINISH} --numWorker=${NUM_WORKERS} --childReferencesKinds=${CHILDREFERENCES_KINDS} --legacyStatusUpdate=${LEGACY_STATUS_UPDATE} --config=/config

View File

@ -1,4 +1,18 @@
FROM golang:1.13.15-alpine3.12 as builder
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.19.3-alpine3.15 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
@ -8,13 +22,23 @@ RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
RUN GO111MODULE=on go build -o /bin/controller backend/src/crd/controller/scheduledworkflow/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/crd/controller/scheduledworkflow
RUN go-licenses csv ./backend/src/crd/controller/scheduledworkflow > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/swf.csv && \
go-licenses save ./backend/src/crd/controller/scheduledworkflow --save_path /tmp/NOTICES
FROM alpine:3.11
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
COPY --from=builder /go/src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/controller
RUN apk --no-cache add tzdata
ENV NAMESPACE ""

View File

@ -1,4 +1,18 @@
FROM golang:alpine as builder
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.17.6-alpine3.15 as builder
RUN apk update && apk upgrade
RUN apk add --no-cache git gcc musl-dev
@ -6,19 +20,25 @@ RUN apk add --no-cache git gcc musl-dev
WORKDIR /src/github.com/kubeflow/pipelines
COPY . .
RUN go mod vendor
RUN go build -o /bin/controller backend/src/crd/controller/viewer/*.go
RUN GO111MODULE=on go build -o /bin/controller backend/src/crd/controller/viewer/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/crd/controller/viewer
RUN go-licenses csv ./backend/src/crd/controller/viewer > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/viewer.csv && \
go-licenses save ./backend/src/crd/controller/viewer --save_path /tmp/NOTICES
FROM alpine
WORKDIR /src
COPY --from=builder /src/github.com/kubeflow/pipelines/vendor vendor
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
COPY --from=builder /src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
RUN chmod +x /bin/controller
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENV MAX_NUM_VIEWERS "50"
ENV NAMESPACE "kubeflow"

View File

@ -1,40 +0,0 @@
# This docker file starts server.py (located at src/apiserver/visualization)
# which accepts a post request that resolves to html that depicts a specified
# visualization. More details about this process can be found in the server.py
# and exporter.py files in the directory specified above.
# Copyright 2019-2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This image should be in sync with image in backend/src/apiserver/visualization/update_requirements.sh.
FROM tensorflow/tensorflow:2.4.0
RUN apt-get update \
&& apt-get install -y wget curl tar openssl
RUN curl https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz > /tmp/google-cloud-sdk.tar.gz
RUN mkdir -p /usr/local/gcloud
RUN tar -C /usr/local/gcloud -xf /tmp/google-cloud-sdk.tar.gz
RUN /usr/local/gcloud/google-cloud-sdk/install.sh
ENV PATH $PATH:/usr/local/gcloud/google-cloud-sdk/bin
WORKDIR /src
COPY backend/src/apiserver/visualization/requirements.txt /src
RUN python3 -m pip install -r requirements.txt --no-cache-dir
COPY backend/src/apiserver/visualization /src
ENTRYPOINT [ "python3", "server.py" ]

60
backend/Makefile Normal file
View File

@ -0,0 +1,60 @@
BUILD=build
MOD_ROOT=..
CSV_PATH=backend/third_party_licenses
# Whenever build command for any of the binaries change, we should update them both here and in backend/Dockerfiles.
.PHONY: all
all: license_apiserver license_persistence_agent license_cache_server license_swf license_viewer
.PHONY: clean
clean:
rm -rf $(BUILD)
$(BUILD)/apiserver:
GO111MODULE=on go build -o $(BUILD)/apiserver github.com/kubeflow/pipelines/backend/src/apiserver
$(BUILD)/persistence_agent:
GO111MODULE=on go build -o $(BUILD)/persistence_agent github.com/kubeflow/pipelines/backend/src/agent/persistence
$(BUILD)/cache_server:
GO111MODULE=on go build -o $(BUILD)/cache_server github.com/kubeflow/pipelines/backend/src/cache
$(BUILD)/swf:
GO111MODULE=on go build -o $(BUILD)/swf github.com/kubeflow/pipelines/backend/src/crd/controller/scheduledworkflow
$(BUILD)/viewer:
GO111MODULE=on go build -o $(BUILD)/viewer github.com/kubeflow/pipelines/backend/src/crd/controller/viewer
# Update licenses info after dependencies changed.
# See README.md#updating-licenses-info section for more details.
.PHONY: license_apiserver
license_apiserver: $(BUILD)/apiserver
cd $(MOD_ROOT) && go-licenses csv ./backend/src/apiserver > $(CSV_PATH)/apiserver.csv
.PHONY: license_persistence_agent
license_persistence_agent: $(BUILD)/persistence_agent
cd $(MOD_ROOT) && go-licenses csv ./backend/src/agent/persistence > $(CSV_PATH)/persistence_agent.csv
.PHONY: license_cache_server
license_cache_server: $(BUILD)/cache_server
cd $(MOD_ROOT) && go-licenses csv ./backend/src/cache > $(CSV_PATH)/cache_server.csv
.PHONY: license_swf
license_swf: $(BUILD)/swf
cd $(MOD_ROOT) && go-licenses csv ./backend/src/crd/controller/scheduledworkflow > $(CSV_PATH)/swf.csv
.PHONY: license_viewer
license_viewer: $(BUILD)/viewer
cd $(MOD_ROOT) && go-licenses csv ./backend/src/crd/controller/viewer > $(CSV_PATH)/viewer.csv
.PHONY: image_all
image_all: image_apiserver image_persistence_agent image_cache image_swf image_viewer image_visualization
.PHONY: image_apiserver
image_apiserver:
cd $(MOD_ROOT) && docker build -t apiserver -f backend/Dockerfile .
.PHONY: image_persistence_agent
image_persistence_agent:
cd $(MOD_ROOT) && docker build -t persistence-agent -f backend/Dockerfile.persistenceagent .
.PHONY: image_cache
image_cache:
cd $(MOD_ROOT) && docker build -t cache-server -f backend/Dockerfile.cacheserver .
.PHONY: image_swf
image_swf:
cd $(MOD_ROOT) && docker build -t scheduledworkflow -f backend/Dockerfile.scheduledworkflow .
.PHONY: image_viewer
image_viewer:
cd $(MOD_ROOT) && docker build -t viewercontroller -f backend/Dockerfile.viewercontroller .

View File

@ -45,9 +45,24 @@ docker build -f backend/Dockerfile . --tag $API_SERVER_IMAGE
After making changes to proto files, the Go client libraries, Python client libraries and swagger files
need to be regenerated and checked-in. Refer to [backend/api](./api/README.md) for details.
## Updating licenses info
1. Install go-licenses tool and refer to [its documentation](https://github.com/google/go-licenses) for how to use it.
```bash
go install github.com/google/go-licenses@d483853
```
2. Run the tool to update all licenses:
```bash
make all
```
## Updating python dependencies
[pip-tools](https://github.com/jazzband/pip-tools) is used to manage python
dependencies. To update dependencies, edit [requirements.in](requirements.in)
and run `./update_requirements.sh <requirements.in >requirements.txt` to update
and run `./update_requirements.sh` to update and pin the transitive
and pin the transitive dependencies.

View File

@ -1,10 +1,24 @@
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate client code (go & json) from API protocol buffers
FROM golang:1.13.1-stretch as generator
ENV GRPC_GATEWAY_VERSION v1.9.0
ENV GO_SWAGGER_VERSION v0.18.0
ENV GOLANG_PROTOBUF_VERSION v1.3.2
ENV GRPC_VERSION v1.23.0
ENV PROTOC_VERSION 3.13.0
FROM golang:1.15.10 as generator
ENV GRPC_GATEWAY_VERSION v2.11.3
ENV GO_SWAGGER_VERSION v0.30.4
ENV GOLANG_PROTOBUF_VERSION v1.5.2
ENV GRPC_VERSION v1.48.0
ENV PROTOC_VERSION 3.17.3
ENV GOBIN=/go/bin
# Install protoc.
@ -24,9 +38,10 @@ RUN mkdir grpc && git clone --depth 1 --branch $GRPC_VERSION https://github.com/
# Install protoc-gen-rpc-gateway && protoc-gen-swagger.
RUN cd grpc-ecosystem/grpc-gateway && GO111MODULE=on go mod vendor
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-openapiv2
# Download go-swagger binary.
# swagger doesn't exist for openapiv2 yet
RUN curl -LO "https://github.com/go-swagger/go-swagger/releases/download/${GO_SWAGGER_VERSION}/swagger_linux_amd64"
RUN chmod +x swagger_linux_amd64 && mv swagger_linux_amd64 /usr/bin/swagger

View File

@ -1,22 +1,42 @@
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Makefile to generate KFP api clients from proto.
IMAGE_TAG=kfp-api-generator
# Contact one of Bobgy, capri-xiyue or zijianjoy if this remote image needs an update.
# Contact one of Bobgy, or zijianjoy if this remote image needs an update.
REMOTE_IMAGE=gcr.io/ml-pipeline-test/api-generator
PREBUILT_REMOTE_IMAGE=gcr.io/ml-pipeline-test/api-generator@sha256:2bca5a3e4c1a6c8f4677ef8433ec373894599e35febdc84c4563c2c9bb3f8de7
PREBUILT_REMOTE_IMAGE=aipipeline/api-generator:openapiv2.11.3
# PREBUILT_REMOTE_IMAGE=aipipeline/api-generator:test
API_VERSION=v1
# Generate clients using a pre-built api-generator image.
.PHONY: generate
generate: hack/generator.sh *.proto
generate: hack/generator.sh $(API_VERSION)/*.proto
docker run --interactive --rm \
-e API_VERSION=$(API_VERSION) \
--user $$(id -u):$$(id -g) \
--mount type=bind,source="$$(pwd)/../..",target=/go/src/github.com/kubeflow/pipelines \
$(PREBUILT_REMOTE_IMAGE) /go/src/github.com/kubeflow/pipelines/backend/api/hack/generator.sh
# Generate clients starting by building api-generator image locally.
# Note, this should only be used for local development purposes. Once any change is made to the Dockerfile,
# we should push the new image remotely to ensure everyone is using the same tools.
.PHONY: generate-from-scratch
generate-from-scratch: .image-built hack/generator.sh *.proto
docker run --interactive --rm \
generate-from-scratch: .image-built hack/generator.sh $(API_VERSION)/*.proto
docker run --interactive --rm \
-e API_VERSION=$(API_VERSION) \
--user $$(id -u):$$(id -g) \
--mount type=bind,source="$$(pwd)/../..",target=/go/src/github.com/kubeflow/pipelines \
$(IMAGE_TAG) /go/src/github.com/kubeflow/pipelines/backend/api/hack/generator.sh

View File

@ -1,35 +1,55 @@
# Kubeflow Pipelines API
## Before You Start
Tools needed:
* Docker
* Make
## Auto-generation of Go client and swagger definitions
Use `make generate` command to generate clients using a pre-built api-generator image:
```bash
make
```
make generate
```
Code will be generated in:
* `./go_client`
* `./go_http_client`
* `./swagger`
* `./${API_VERSION}/go_client`
* `./${API_VERSION}/go_http_client`
* `./${API_VERSION}/swagger`
## Auto-generation of Python client
This will generate the Python client for the API version specified in the environment variable.
```bash
./build_kfp_server_api_python_package.sh
```
Code will be generated in `./python_http_client`.
Code will be generated in `./${API_VERSION}/python_http_client`.
## Auto-generation of API reference documentation
This directory contains API definitions. They are used to generate [the API reference on kubeflow.org](https://www.kubeflow.org/docs/pipelines/reference/api/kubeflow-pipeline-api-spec/).
- Use the tools [bootprint-openapi](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi) and [html-inline](https://github.com/substack/html-inline) to generate the API reference from [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/swagger/kfp_api_single_file.swagger.json). These [instructions](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi#bootprint-openapi) have shown how to generate *a single self-contained html file* which is the API reference, from a json file.
* Use the tools [bootprint-openapi](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi) and [html-inline](https://github.com/substack/html-inline) to generate the API reference from [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json). These [instructions](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi#bootprint-openapi) have shown how to generate *a single self-contained html file* which is the API reference, from a json file.
- Use the above generated html to replace the html section, which is below the title section, in the file [kubeflow-pipeline-api-spec.html](https://github.com/kubeflow/website/blob/master/content/en/docs/pipelines/reference/api/kubeflow-pipeline-api-spec.html)
* Use the above generated html to replace the html section, which is below the title section, in the file [kubeflow-pipeline-api-spec.html](https://github.com/kubeflow/website/blob/master/content/en/docs/pipelines/reference/api/kubeflow-pipeline-api-spec.html)
Note: whenever the API definition changes (i.e., the file [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/swagger/kfp_api_single_file.swagger.json) changes), the API reference needs to be updated.
Note: whenever the API definition changes (i.e., the file [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json) changes), the API reference needs to be updated.
## Auto-generation of api generator image
```bash
make push
```
When you update the [Dockerfile](`./Dockerfile`), to make sure others are using the same image as you do:
1. push a new version of the api generator image to gcr.io/ml-pipeline-test/api-generator:latest.
2. update the PREBUILT_REMOTE_IMAGE var in Makefile to point to your new image.
3. push a new version of the release tools image to gcr.io/ml-pipeline-test/release:latest, run `make push` in [test/release/Makefile](../../test/release/Makefile).

View File

@ -1,6 +1,6 @@
#!/bin/bash -e
#
# Copyright 2018-2021 Google LLC
# Copyright 2018-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -34,6 +34,8 @@ if [ -z "$VERSION" ]; then
echo "ERROR: $REPO_ROOT/VERSION is empty"
exit 1
fi
API_VERSION=v1
codegen_file=/tmp/openapi-generator-cli.jar
# Browse all versions in: https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/
@ -45,17 +47,17 @@ fi
pushd "$(dirname "$0")"
CURRENT_DIR="$(pwd)"
DIR="$CURRENT_DIR/python_http_client"
swagger_file="$CURRENT_DIR/swagger/kfp_api_single_file.swagger.json"
DIR="$CURRENT_DIR/$API_VERSION/python_http_client"
swagger_file="$CURRENT_DIR/$API_VERSION/swagger/kfp_api_single_file.swagger.json"
echo "Removing old content in DIR first."
rm -rf "$DIR"
echo "Generating python code from swagger json in $DIR."
java -jar "$codegen_file" generate -g python -t "$CURRENT_DIR/python_http_client_template" -i "$swagger_file" -o "$DIR" -c <(echo '{
"packageName": "kfp_server_api",
java -jar "$codegen_file" generate -g python -t "$CURRENT_DIR/$API_VERSION/python_http_client_template" -i "$swagger_file" -o "$DIR" -c <(echo '{
"packageName": "'"kfp_tekton_server_api"'",
"packageVersion": "'"$VERSION"'",
"packageUrl": "https://github.com/kubeflow/pipelines"
"packageUrl": "https://github.com/kubeflow/kfp-tekton"
}')
echo "Copying LICENSE to $DIR"

View File

@ -1,265 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/auth.proto
package go_client
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
empty "github.com/golang/protobuf/ptypes/empty"
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Type of resources in pipelines system.
type AuthorizeRequest_Resources int32
const (
AuthorizeRequest_UNASSIGNED_RESOURCES AuthorizeRequest_Resources = 0
AuthorizeRequest_VIEWERS AuthorizeRequest_Resources = 1
)
var AuthorizeRequest_Resources_name = map[int32]string{
0: "UNASSIGNED_RESOURCES",
1: "VIEWERS",
}
var AuthorizeRequest_Resources_value = map[string]int32{
"UNASSIGNED_RESOURCES": 0,
"VIEWERS": 1,
}
func (x AuthorizeRequest_Resources) String() string {
return proto.EnumName(AuthorizeRequest_Resources_name, int32(x))
}
func (AuthorizeRequest_Resources) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_cc78a6d242bd08ff, []int{0, 0}
}
// Type of verbs that act on the resources.
type AuthorizeRequest_Verb int32
const (
AuthorizeRequest_UNASSIGNED_VERB AuthorizeRequest_Verb = 0
AuthorizeRequest_CREATE AuthorizeRequest_Verb = 1
AuthorizeRequest_GET AuthorizeRequest_Verb = 2
AuthorizeRequest_DELETE AuthorizeRequest_Verb = 3
)
var AuthorizeRequest_Verb_name = map[int32]string{
0: "UNASSIGNED_VERB",
1: "CREATE",
2: "GET",
3: "DELETE",
}
var AuthorizeRequest_Verb_value = map[string]int32{
"UNASSIGNED_VERB": 0,
"CREATE": 1,
"GET": 2,
"DELETE": 3,
}
func (x AuthorizeRequest_Verb) String() string {
return proto.EnumName(AuthorizeRequest_Verb_name, int32(x))
}
func (AuthorizeRequest_Verb) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_cc78a6d242bd08ff, []int{0, 1}
}
// Ask for authorization of an access by providing resource's namespace, type
// and verb. User identity is not part of the message, because it is expected
// to be parsed from request headers. Caller should proxy user request's headers.
type AuthorizeRequest struct {
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
Resources AuthorizeRequest_Resources `protobuf:"varint,2,opt,name=resources,proto3,enum=api.AuthorizeRequest_Resources" json:"resources,omitempty"`
Verb AuthorizeRequest_Verb `protobuf:"varint,3,opt,name=verb,proto3,enum=api.AuthorizeRequest_Verb" json:"verb,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthorizeRequest) Reset() { *m = AuthorizeRequest{} }
func (m *AuthorizeRequest) String() string { return proto.CompactTextString(m) }
func (*AuthorizeRequest) ProtoMessage() {}
func (*AuthorizeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cc78a6d242bd08ff, []int{0}
}
func (m *AuthorizeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthorizeRequest.Unmarshal(m, b)
}
func (m *AuthorizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthorizeRequest.Marshal(b, m, deterministic)
}
func (m *AuthorizeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthorizeRequest.Merge(m, src)
}
func (m *AuthorizeRequest) XXX_Size() int {
return xxx_messageInfo_AuthorizeRequest.Size(m)
}
func (m *AuthorizeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AuthorizeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AuthorizeRequest proto.InternalMessageInfo
func (m *AuthorizeRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
func (m *AuthorizeRequest) GetResources() AuthorizeRequest_Resources {
if m != nil {
return m.Resources
}
return AuthorizeRequest_UNASSIGNED_RESOURCES
}
func (m *AuthorizeRequest) GetVerb() AuthorizeRequest_Verb {
if m != nil {
return m.Verb
}
return AuthorizeRequest_UNASSIGNED_VERB
}
func init() {
proto.RegisterEnum("api.AuthorizeRequest_Resources", AuthorizeRequest_Resources_name, AuthorizeRequest_Resources_value)
proto.RegisterEnum("api.AuthorizeRequest_Verb", AuthorizeRequest_Verb_name, AuthorizeRequest_Verb_value)
proto.RegisterType((*AuthorizeRequest)(nil), "api.AuthorizeRequest")
}
func init() { proto.RegisterFile("backend/api/auth.proto", fileDescriptor_cc78a6d242bd08ff) }
var fileDescriptor_cc78a6d242bd08ff = []byte{
// 453 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xc1, 0x4e, 0xdb, 0x40,
0x10, 0x86, 0x63, 0x07, 0x25, 0xcd, 0xa4, 0x14, 0x77, 0xa1, 0x28, 0x72, 0x53, 0x11, 0xe5, 0xc4,
0xa1, 0xd8, 0x22, 0x5c, 0xdb, 0x43, 0x02, 0x2b, 0x84, 0xd4, 0x52, 0x69, 0x1d, 0x52, 0x89, 0x0b,
0x5a, 0x9b, 0x89, 0xb3, 0xc2, 0xf1, 0xba, 0xeb, 0xdd, 0xa0, 0xf6, 0x58, 0xa9, 0x0f, 0xd0, 0xf6,
0xd1, 0xfa, 0x0a, 0x7d, 0x90, 0xca, 0x9b, 0x10, 0x50, 0xe1, 0x64, 0x79, 0xe6, 0xff, 0xff, 0xf9,
0xa5, 0x6f, 0x61, 0x37, 0xe6, 0xc9, 0x0d, 0xe6, 0xd7, 0x21, 0x2f, 0x44, 0xc8, 0x8d, 0x9e, 0x05,
0x85, 0x92, 0x5a, 0x92, 0x3a, 0x2f, 0x84, 0xdf, 0x4d, 0xa5, 0x4c, 0x33, 0x5c, 0xee, 0xf2, 0x5c,
0x6a, 0xae, 0x85, 0xcc, 0xcb, 0xa5, 0xc4, 0x7f, 0xbd, 0xda, 0xda, 0xbf, 0xd8, 0x4c, 0x43, 0x9c,
0x17, 0xfa, 0xeb, 0x6a, 0xf9, 0xd6, 0x7e, 0x92, 0x83, 0x14, 0xf3, 0x83, 0xf2, 0x96, 0xa7, 0x29,
0xaa, 0x50, 0x16, 0xd6, 0xfe, 0x38, 0xaa, 0xff, 0xd3, 0x05, 0x6f, 0x68, 0xf4, 0x4c, 0x2a, 0xf1,
0x0d, 0x19, 0x7e, 0x31, 0x58, 0x6a, 0xd2, 0x85, 0x56, 0xce, 0xe7, 0x58, 0x16, 0x3c, 0xc1, 0x8e,
0xd3, 0x73, 0xf6, 0x5b, 0xec, 0x7e, 0x40, 0xde, 0x43, 0x4b, 0x61, 0x29, 0x8d, 0x4a, 0xb0, 0xec,
0xb8, 0x3d, 0x67, 0xff, 0xc5, 0x60, 0x2f, 0xe0, 0x85, 0x08, 0xfe, 0xcf, 0x09, 0xd8, 0x9d, 0x8c,
0xdd, 0x3b, 0x48, 0x00, 0x1b, 0x0b, 0x54, 0x71, 0xa7, 0x6e, 0x9d, 0xfe, 0xd3, 0xce, 0x09, 0xaa,
0x98, 0x59, 0x5d, 0x7f, 0x00, 0xad, 0x75, 0x0e, 0xe9, 0xc0, 0xce, 0xc5, 0xf9, 0x30, 0x8a, 0xce,
0x4e, 0xcf, 0xe9, 0xc9, 0x15, 0xa3, 0xd1, 0xa7, 0x0b, 0x76, 0x4c, 0x23, 0xaf, 0x46, 0xda, 0xd0,
0x9c, 0x9c, 0xd1, 0xcf, 0x94, 0x45, 0x9e, 0xd3, 0x7f, 0x07, 0x1b, 0x55, 0x02, 0xd9, 0x86, 0xad,
0x07, 0xf2, 0x09, 0x65, 0x23, 0xaf, 0x46, 0x00, 0x1a, 0xc7, 0x8c, 0x0e, 0xc7, 0xd4, 0x73, 0x48,
0x13, 0xea, 0xa7, 0x74, 0xec, 0xb9, 0xd5, 0xf0, 0x84, 0x7e, 0xa0, 0x63, 0xea, 0xd5, 0x07, 0x08,
0xed, 0xaa, 0x50, 0x84, 0x6a, 0x21, 0x12, 0x24, 0x13, 0x68, 0xad, 0xfb, 0x91, 0x57, 0x4f, 0xf6,
0xf5, 0x77, 0x83, 0x25, 0x92, 0xe0, 0x0e, 0x49, 0x40, 0x2b, 0x24, 0x7d, 0xff, 0xfb, 0x9f, 0xbf,
0xbf, 0xdd, 0x1d, 0x42, 0x2a, 0x92, 0x65, 0xb8, 0x38, 0x8c, 0x51, 0xf3, 0x43, 0x8b, 0x7b, 0xf4,
0xc3, 0xf9, 0x35, 0xfc, 0xc8, 0xba, 0xd0, 0xbc, 0xc6, 0x29, 0x37, 0x99, 0x26, 0x2f, 0xc9, 0x16,
0x6c, 0xfa, 0x6d, 0x7b, 0x21, 0xd2, 0x5c, 0x9b, 0xf2, 0x72, 0x0f, 0xde, 0x40, 0x63, 0x84, 0x5c,
0xa1, 0x22, 0xdb, 0xcf, 0x5c, 0x7f, 0x93, 0xaf, 0x2e, 0x5b, 0x88, 0x3d, 0x37, 0x7e, 0x0e, 0xb0,
0x16, 0xd4, 0x2e, 0x8f, 0x52, 0xa1, 0x67, 0x26, 0x0e, 0x12, 0x39, 0x0f, 0x6f, 0x4c, 0x8c, 0xd3,
0x4c, 0xde, 0x86, 0x85, 0x28, 0x30, 0x13, 0x39, 0x96, 0xe1, 0xc3, 0x17, 0x97, 0xca, 0xab, 0x24,
0x13, 0x98, 0xeb, 0xb8, 0x61, 0x3b, 0x1f, 0xfd, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x98, 0xcf, 0xb4,
0xb0, 0x91, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// AuthServiceClient is the client API for AuthService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AuthServiceClient interface {
Authorize(ctx context.Context, in *AuthorizeRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}
type authServiceClient struct {
cc *grpc.ClientConn
}
func NewAuthServiceClient(cc *grpc.ClientConn) AuthServiceClient {
return &authServiceClient{cc}
}
func (c *authServiceClient) Authorize(ctx context.Context, in *AuthorizeRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.AuthService/Authorize", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// AuthServiceServer is the server API for AuthService service.
type AuthServiceServer interface {
Authorize(context.Context, *AuthorizeRequest) (*empty.Empty, error)
}
// UnimplementedAuthServiceServer can be embedded to have forward compatible implementations.
type UnimplementedAuthServiceServer struct {
}
func (*UnimplementedAuthServiceServer) Authorize(ctx context.Context, req *AuthorizeRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Authorize not implemented")
}
func RegisterAuthServiceServer(s *grpc.Server, srv AuthServiceServer) {
s.RegisterService(&_AuthService_serviceDesc, srv)
}
func _AuthService_Authorize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AuthorizeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AuthServiceServer).Authorize(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.AuthService/Authorize",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AuthServiceServer).Authorize(ctx, req.(*AuthorizeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _AuthService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.AuthService",
HandlerType: (*AuthServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Authorize",
Handler: _AuthService_Authorize_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/auth.proto",
}

View File

@ -1,115 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/auth.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var (
filter_AuthService_Authorize_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_AuthService_Authorize_0(ctx context.Context, marshaler runtime.Marshaler, client AuthServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq AuthorizeRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_AuthService_Authorize_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Authorize(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterAuthServiceHandlerFromEndpoint is same as RegisterAuthServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterAuthServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterAuthServiceHandler(ctx, mux, conn)
}
// RegisterAuthServiceHandler registers the http handlers for service AuthService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterAuthServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterAuthServiceHandlerClient(ctx, mux, NewAuthServiceClient(conn))
}
// RegisterAuthServiceHandlerClient registers the http handlers for service AuthService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AuthServiceClient" to call the correct interceptors.
func RegisterAuthServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AuthServiceClient) error {
mux.Handle("GET", pattern_AuthService_Authorize_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_AuthService_Authorize_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_AuthService_Authorize_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_AuthService_Authorize_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "auth"}, ""))
)
var (
forward_AuthService_Authorize_0 = runtime.ForwardResponseMessage
)

View File

@ -1,150 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/error.proto
package go_client
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
any "github.com/golang/protobuf/ptypes/any"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Error struct {
ErrorMessage string `protobuf:"bytes,1,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
ErrorDetails string `protobuf:"bytes,2,opt,name=error_details,json=errorDetails,proto3" json:"error_details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_6c5901d52662fdde, []int{0}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Error.Unmarshal(m, b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
}
func (m *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(m, src)
}
func (m *Error) XXX_Size() int {
return xxx_messageInfo_Error.Size(m)
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetErrorMessage() string {
if m != nil {
return m.ErrorMessage
}
return ""
}
func (m *Error) GetErrorDetails() string {
if m != nil {
return m.ErrorDetails
}
return ""
}
type Status struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) {
return fileDescriptor_6c5901d52662fdde, []int{1}
}
func (m *Status) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Status.Unmarshal(m, b)
}
func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
}
func (m *Status) XXX_Merge(src proto.Message) {
xxx_messageInfo_Status.Merge(m, src)
}
func (m *Status) XXX_Size() int {
return xxx_messageInfo_Status.Size(m)
}
func (m *Status) XXX_DiscardUnknown() {
xxx_messageInfo_Status.DiscardUnknown(m)
}
var xxx_messageInfo_Status proto.InternalMessageInfo
func (m *Status) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Error)(nil), "api.Error")
proto.RegisterType((*Status)(nil), "api.Status")
}
func init() { proto.RegisterFile("backend/api/error.proto", fileDescriptor_6c5901d52662fdde) }
var fileDescriptor_6c5901d52662fdde = []byte{
// 228 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x4d, 0x4b, 0x03, 0x31,
0x10, 0x86, 0xa9, 0xeb, 0x56, 0x8c, 0x7a, 0x09, 0x05, 0xab, 0xa7, 0x52, 0x2f, 0x3d, 0x25, 0x60,
0xf1, 0x07, 0x28, 0x7a, 0xf4, 0xe0, 0x7a, 0xf3, 0x52, 0x92, 0xec, 0x34, 0x86, 0xa6, 0x99, 0x90,
0x0f, 0x64, 0xff, 0xbd, 0x98, 0xb8, 0x74, 0x6f, 0xc9, 0x33, 0xcf, 0xbc, 0xbc, 0x0c, 0xb9, 0x95,
0x42, 0x1d, 0xc0, 0xf5, 0x5c, 0x78, 0xc3, 0x21, 0x04, 0x0c, 0xcc, 0x07, 0x4c, 0x48, 0x1b, 0xe1,
0xcd, 0xfd, 0x9d, 0x46, 0xd4, 0x16, 0x78, 0x41, 0x32, 0xef, 0xb9, 0x70, 0x43, 0x9d, 0xaf, 0x3f,
0x48, 0xfb, 0xf6, 0xa7, 0xd3, 0x07, 0x72, 0x53, 0xf6, 0x76, 0x47, 0x88, 0x51, 0x68, 0x58, 0xce,
0x56, 0xb3, 0xcd, 0x65, 0x77, 0x5d, 0xe0, 0x7b, 0x65, 0x27, 0xa9, 0x87, 0x24, 0x8c, 0x8d, 0xcb,
0xb3, 0x89, 0xf4, 0x5a, 0xd9, 0x5a, 0x92, 0xf9, 0x67, 0x12, 0x29, 0x47, 0xba, 0x20, 0x6d, 0x99,
0xfc, 0x67, 0xd5, 0x0f, 0xa5, 0xe4, 0x5c, 0x61, 0x0f, 0x65, 0xb7, 0xed, 0xca, 0x9b, 0x32, 0x72,
0x31, 0x46, 0x36, 0xab, 0x66, 0x73, 0xf5, 0xb8, 0x60, 0xb5, 0x33, 0x1b, 0x3b, 0xb3, 0x67, 0x37,
0x74, 0xa3, 0xf4, 0xf2, 0xf4, 0xb5, 0xd5, 0x26, 0x7d, 0x67, 0xc9, 0x14, 0x1e, 0xf9, 0x21, 0x4b,
0xd8, 0x5b, 0xfc, 0xe1, 0xde, 0x78, 0xb0, 0xc6, 0x41, 0xe4, 0xd3, 0x7b, 0x68, 0xdc, 0x29, 0x6b,
0xc0, 0x25, 0x39, 0x2f, 0x69, 0xdb, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x2e, 0x20, 0xa9,
0x2f, 0x01, 0x00, 0x00,
}

View File

@ -1,838 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/experiment.proto
package go_client
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
empty "github.com/golang/protobuf/ptypes/empty"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Experiment_StorageState int32
const (
Experiment_STORAGESTATE_UNSPECIFIED Experiment_StorageState = 0
Experiment_STORAGESTATE_AVAILABLE Experiment_StorageState = 1
Experiment_STORAGESTATE_ARCHIVED Experiment_StorageState = 2
)
var Experiment_StorageState_name = map[int32]string{
0: "STORAGESTATE_UNSPECIFIED",
1: "STORAGESTATE_AVAILABLE",
2: "STORAGESTATE_ARCHIVED",
}
var Experiment_StorageState_value = map[string]int32{
"STORAGESTATE_UNSPECIFIED": 0,
"STORAGESTATE_AVAILABLE": 1,
"STORAGESTATE_ARCHIVED": 2,
}
func (x Experiment_StorageState) String() string {
return proto.EnumName(Experiment_StorageState_name, int32(x))
}
func (Experiment_StorageState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{5, 0}
}
type CreateExperimentRequest struct {
// The experiment to be created.
Experiment *Experiment `protobuf:"bytes,1,opt,name=experiment,proto3" json:"experiment,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateExperimentRequest) Reset() { *m = CreateExperimentRequest{} }
func (m *CreateExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*CreateExperimentRequest) ProtoMessage() {}
func (*CreateExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{0}
}
func (m *CreateExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateExperimentRequest.Unmarshal(m, b)
}
func (m *CreateExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateExperimentRequest.Marshal(b, m, deterministic)
}
func (m *CreateExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateExperimentRequest.Merge(m, src)
}
func (m *CreateExperimentRequest) XXX_Size() int {
return xxx_messageInfo_CreateExperimentRequest.Size(m)
}
func (m *CreateExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateExperimentRequest proto.InternalMessageInfo
func (m *CreateExperimentRequest) GetExperiment() *Experiment {
if m != nil {
return m.Experiment
}
return nil
}
type GetExperimentRequest struct {
// The ID of the experiment to be retrieved.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetExperimentRequest) Reset() { *m = GetExperimentRequest{} }
func (m *GetExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*GetExperimentRequest) ProtoMessage() {}
func (*GetExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{1}
}
func (m *GetExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetExperimentRequest.Unmarshal(m, b)
}
func (m *GetExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetExperimentRequest.Marshal(b, m, deterministic)
}
func (m *GetExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetExperimentRequest.Merge(m, src)
}
func (m *GetExperimentRequest) XXX_Size() int {
return xxx_messageInfo_GetExperimentRequest.Size(m)
}
func (m *GetExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetExperimentRequest proto.InternalMessageInfo
func (m *GetExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type ListExperimentsRequest struct {
// A page token to request the next page of results. The token is acquried
// from the nextPageToken field of the response from the previous
// ListExperiment call or can be omitted when fetching the first page.
PageToken string `protobuf:"bytes,1,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// The number of experiments to be listed per page. If there are more
// experiments than this number, the response message will contain a
// nextPageToken field you can use to fetch the next page.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Can be format of "field_name", "field_name asc" or "field_name desc"
// Ascending by default.
SortBy string `protobuf:"bytes,3,opt,name=sort_by,json=sortBy,proto3" json:"sort_by,omitempty"`
// A url-encoded, JSON-serialized Filter protocol buffer (see
// [filter.proto](https://github.com/kubeflow/pipelines/
// blob/master/backend/api/filter.proto)).
Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
// What resource reference to filter on.
// For Experiment, the only valid resource type is Namespace. An sample query string could be
// resource_reference_key.type=NAMESPACE&resource_reference_key.id=ns1
ResourceReferenceKey *ResourceKey `protobuf:"bytes,5,opt,name=resource_reference_key,json=resourceReferenceKey,proto3" json:"resource_reference_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListExperimentsRequest) Reset() { *m = ListExperimentsRequest{} }
func (m *ListExperimentsRequest) String() string { return proto.CompactTextString(m) }
func (*ListExperimentsRequest) ProtoMessage() {}
func (*ListExperimentsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{2}
}
func (m *ListExperimentsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListExperimentsRequest.Unmarshal(m, b)
}
func (m *ListExperimentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListExperimentsRequest.Marshal(b, m, deterministic)
}
func (m *ListExperimentsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListExperimentsRequest.Merge(m, src)
}
func (m *ListExperimentsRequest) XXX_Size() int {
return xxx_messageInfo_ListExperimentsRequest.Size(m)
}
func (m *ListExperimentsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListExperimentsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListExperimentsRequest proto.InternalMessageInfo
func (m *ListExperimentsRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
func (m *ListExperimentsRequest) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *ListExperimentsRequest) GetSortBy() string {
if m != nil {
return m.SortBy
}
return ""
}
func (m *ListExperimentsRequest) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
func (m *ListExperimentsRequest) GetResourceReferenceKey() *ResourceKey {
if m != nil {
return m.ResourceReferenceKey
}
return nil
}
type ListExperimentsResponse struct {
// A list of experiments returned.
Experiments []*Experiment `protobuf:"bytes,1,rep,name=experiments,proto3" json:"experiments,omitempty"`
// The total number of experiments for the given query.
TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
// The token to list the next page of experiments.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListExperimentsResponse) Reset() { *m = ListExperimentsResponse{} }
func (m *ListExperimentsResponse) String() string { return proto.CompactTextString(m) }
func (*ListExperimentsResponse) ProtoMessage() {}
func (*ListExperimentsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{3}
}
func (m *ListExperimentsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListExperimentsResponse.Unmarshal(m, b)
}
func (m *ListExperimentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListExperimentsResponse.Marshal(b, m, deterministic)
}
func (m *ListExperimentsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListExperimentsResponse.Merge(m, src)
}
func (m *ListExperimentsResponse) XXX_Size() int {
return xxx_messageInfo_ListExperimentsResponse.Size(m)
}
func (m *ListExperimentsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListExperimentsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListExperimentsResponse proto.InternalMessageInfo
func (m *ListExperimentsResponse) GetExperiments() []*Experiment {
if m != nil {
return m.Experiments
}
return nil
}
func (m *ListExperimentsResponse) GetTotalSize() int32 {
if m != nil {
return m.TotalSize
}
return 0
}
func (m *ListExperimentsResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
type DeleteExperimentRequest struct {
// The ID of the experiment to be deleted.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteExperimentRequest) Reset() { *m = DeleteExperimentRequest{} }
func (m *DeleteExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteExperimentRequest) ProtoMessage() {}
func (*DeleteExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{4}
}
func (m *DeleteExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteExperimentRequest.Unmarshal(m, b)
}
func (m *DeleteExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DeleteExperimentRequest.Marshal(b, m, deterministic)
}
func (m *DeleteExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteExperimentRequest.Merge(m, src)
}
func (m *DeleteExperimentRequest) XXX_Size() int {
return xxx_messageInfo_DeleteExperimentRequest.Size(m)
}
func (m *DeleteExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteExperimentRequest proto.InternalMessageInfo
func (m *DeleteExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type Experiment struct {
// Output. Unique experiment ID. Generated by API server.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Required input field. Unique experiment name provided by user.
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// Optional input field. Describing the purpose of the experiment
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// Output. The time that the experiment created.
CreatedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
// Optional input field. Specify which resource this run belongs to.
// For Experiment, the only valid resource reference is a single Namespace.
ResourceReferences []*ResourceReference `protobuf:"bytes,5,rep,name=resource_references,json=resourceReferences,proto3" json:"resource_references,omitempty"`
// Output. Specifies whether this experiment is in archived or available state.
StorageState Experiment_StorageState `protobuf:"varint,6,opt,name=storage_state,json=storageState,proto3,enum=api.Experiment_StorageState" json:"storage_state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Experiment) Reset() { *m = Experiment{} }
func (m *Experiment) String() string { return proto.CompactTextString(m) }
func (*Experiment) ProtoMessage() {}
func (*Experiment) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{5}
}
func (m *Experiment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Experiment.Unmarshal(m, b)
}
func (m *Experiment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Experiment.Marshal(b, m, deterministic)
}
func (m *Experiment) XXX_Merge(src proto.Message) {
xxx_messageInfo_Experiment.Merge(m, src)
}
func (m *Experiment) XXX_Size() int {
return xxx_messageInfo_Experiment.Size(m)
}
func (m *Experiment) XXX_DiscardUnknown() {
xxx_messageInfo_Experiment.DiscardUnknown(m)
}
var xxx_messageInfo_Experiment proto.InternalMessageInfo
func (m *Experiment) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Experiment) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Experiment) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Experiment) GetCreatedAt() *timestamp.Timestamp {
if m != nil {
return m.CreatedAt
}
return nil
}
func (m *Experiment) GetResourceReferences() []*ResourceReference {
if m != nil {
return m.ResourceReferences
}
return nil
}
func (m *Experiment) GetStorageState() Experiment_StorageState {
if m != nil {
return m.StorageState
}
return Experiment_STORAGESTATE_UNSPECIFIED
}
type ArchiveExperimentRequest struct {
// The ID of the experiment to be archived.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArchiveExperimentRequest) Reset() { *m = ArchiveExperimentRequest{} }
func (m *ArchiveExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*ArchiveExperimentRequest) ProtoMessage() {}
func (*ArchiveExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{6}
}
func (m *ArchiveExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArchiveExperimentRequest.Unmarshal(m, b)
}
func (m *ArchiveExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArchiveExperimentRequest.Marshal(b, m, deterministic)
}
func (m *ArchiveExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArchiveExperimentRequest.Merge(m, src)
}
func (m *ArchiveExperimentRequest) XXX_Size() int {
return xxx_messageInfo_ArchiveExperimentRequest.Size(m)
}
func (m *ArchiveExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ArchiveExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ArchiveExperimentRequest proto.InternalMessageInfo
func (m *ArchiveExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type UnarchiveExperimentRequest struct {
// The ID of the experiment to be restored.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UnarchiveExperimentRequest) Reset() { *m = UnarchiveExperimentRequest{} }
func (m *UnarchiveExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*UnarchiveExperimentRequest) ProtoMessage() {}
func (*UnarchiveExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_2acb5110e2ac785b, []int{7}
}
func (m *UnarchiveExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UnarchiveExperimentRequest.Unmarshal(m, b)
}
func (m *UnarchiveExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UnarchiveExperimentRequest.Marshal(b, m, deterministic)
}
func (m *UnarchiveExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UnarchiveExperimentRequest.Merge(m, src)
}
func (m *UnarchiveExperimentRequest) XXX_Size() int {
return xxx_messageInfo_UnarchiveExperimentRequest.Size(m)
}
func (m *UnarchiveExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UnarchiveExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UnarchiveExperimentRequest proto.InternalMessageInfo
func (m *UnarchiveExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func init() {
proto.RegisterEnum("api.Experiment_StorageState", Experiment_StorageState_name, Experiment_StorageState_value)
proto.RegisterType((*CreateExperimentRequest)(nil), "api.CreateExperimentRequest")
proto.RegisterType((*GetExperimentRequest)(nil), "api.GetExperimentRequest")
proto.RegisterType((*ListExperimentsRequest)(nil), "api.ListExperimentsRequest")
proto.RegisterType((*ListExperimentsResponse)(nil), "api.ListExperimentsResponse")
proto.RegisterType((*DeleteExperimentRequest)(nil), "api.DeleteExperimentRequest")
proto.RegisterType((*Experiment)(nil), "api.Experiment")
proto.RegisterType((*ArchiveExperimentRequest)(nil), "api.ArchiveExperimentRequest")
proto.RegisterType((*UnarchiveExperimentRequest)(nil), "api.UnarchiveExperimentRequest")
}
func init() { proto.RegisterFile("backend/api/experiment.proto", fileDescriptor_2acb5110e2ac785b) }
var fileDescriptor_2acb5110e2ac785b = []byte{
// 889 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x51, 0x53, 0xdb, 0x46,
0x10, 0x8e, 0x4c, 0x70, 0xc2, 0x1a, 0x83, 0x39, 0x52, 0x5b, 0x08, 0x53, 0x5c, 0x4d, 0x87, 0xba,
0x4c, 0xb0, 0x0a, 0x79, 0x6a, 0xde, 0x0c, 0x38, 0x94, 0x86, 0xb6, 0x19, 0xd9, 0xc9, 0x43, 0x5e,
0x3c, 0x67, 0x79, 0x6d, 0x6e, 0xb0, 0x75, 0xea, 0xdd, 0x89, 0xc4, 0x74, 0x3a, 0xd3, 0xe9, 0x4c,
0xff, 0x40, 0xf3, 0xb3, 0x3a, 0x7d, 0xea, 0x5f, 0xe8, 0xef, 0xe8, 0x74, 0x74, 0x96, 0x41, 0xb6,
0xec, 0x84, 0x27, 0xb8, 0xdd, 0xcf, 0xb7, 0xf7, 0x7d, 0xfb, 0xed, 0x0a, 0xca, 0x1d, 0xea, 0x5d,
0xa1, 0xdf, 0x75, 0x68, 0xc0, 0x1c, 0x7c, 0x1f, 0xa0, 0x60, 0x43, 0xf4, 0x55, 0x2d, 0x10, 0x5c,
0x71, 0xb2, 0x44, 0x03, 0x66, 0x95, 0xa6, 0x20, 0x42, 0x70, 0x31, 0xce, 0x5a, 0x5f, 0x26, 0x13,
0x02, 0x25, 0x0f, 0x85, 0x87, 0x6d, 0x81, 0x3d, 0x14, 0xe8, 0x7b, 0x18, 0xa3, 0xca, 0x7d, 0xce,
0xfb, 0x03, 0xd4, 0x20, 0xea, 0xfb, 0x5c, 0x51, 0xc5, 0xb8, 0x2f, 0xe3, 0xec, 0x76, 0x9c, 0xd5,
0xa7, 0x4e, 0xd8, 0x73, 0x70, 0x18, 0xa8, 0x51, 0x9c, 0xdc, 0x9d, 0x4d, 0x2a, 0x36, 0x44, 0xa9,
0xe8, 0x30, 0x88, 0x01, 0x4f, 0xf5, 0x1f, 0xef, 0xa0, 0x8f, 0xfe, 0x81, 0x7c, 0x47, 0xfb, 0x7d,
0x14, 0x0e, 0x0f, 0xf4, 0xfd, 0xe9, 0x5a, 0xf6, 0xf7, 0x50, 0x3a, 0x11, 0x48, 0x15, 0x36, 0x6e,
0x79, 0xba, 0xf8, 0x73, 0x88, 0x52, 0x11, 0x07, 0xe0, 0x8e, 0xbc, 0x69, 0x54, 0x8c, 0x6a, 0xee,
0x68, 0xbd, 0x46, 0x03, 0x56, 0x4b, 0x60, 0x13, 0x10, 0x7b, 0x0f, 0x9e, 0x9c, 0xa1, 0x4a, 0x5f,
0xb4, 0x06, 0x19, 0xd6, 0xd5, 0x17, 0xac, 0xb8, 0x19, 0xd6, 0xb5, 0xff, 0x36, 0xa0, 0x78, 0xc1,
0x64, 0x02, 0x29, 0x27, 0xd0, 0x1d, 0x80, 0x80, 0xf6, 0xb1, 0xad, 0xf8, 0x15, 0xfa, 0xf1, 0x4f,
0x56, 0xa2, 0x48, 0x2b, 0x0a, 0x90, 0x6d, 0xd0, 0x87, 0xb6, 0x64, 0x37, 0x68, 0x66, 0x2a, 0x46,
0x75, 0xd9, 0x7d, 0x1c, 0x05, 0x9a, 0xec, 0x06, 0x49, 0x09, 0x1e, 0x49, 0x2e, 0x54, 0xbb, 0x33,
0x32, 0x97, 0xf4, 0x0f, 0xb3, 0xd1, 0xf1, 0x78, 0x44, 0x8a, 0x90, 0xed, 0xb1, 0x81, 0x42, 0x61,
0x3e, 0x1c, 0xc7, 0xc7, 0x27, 0xf2, 0x02, 0x8a, 0xe9, 0x0e, 0xb5, 0xaf, 0x70, 0x64, 0x2e, 0x6b,
0xb2, 0x05, 0x4d, 0xd6, 0x8d, 0x21, 0x2f, 0x71, 0xe4, 0x3e, 0x99, 0xe0, 0xdd, 0x09, 0xfc, 0x25,
0x8e, 0xec, 0x0f, 0x06, 0x94, 0x52, 0x7c, 0x64, 0xc0, 0x7d, 0x89, 0xe4, 0x10, 0x72, 0x77, 0x0a,
0x49, 0xd3, 0xa8, 0x2c, 0xcd, 0x53, 0x31, 0x89, 0x89, 0x34, 0x50, 0x5c, 0xd1, 0xc1, 0x98, 0xe5,
0x92, 0x66, 0xb9, 0xa2, 0x23, 0x9a, 0xe6, 0x1e, 0xac, 0xfb, 0xf8, 0x5e, 0xb5, 0x13, 0x3a, 0x65,
0x34, 0xad, 0x7c, 0x14, 0x7e, 0x35, 0xd1, 0xca, 0xfe, 0x1a, 0x4a, 0xa7, 0x38, 0xc0, 0x79, 0x9d,
0x9d, 0x6d, 0xc8, 0x7f, 0x19, 0x80, 0x3b, 0xd4, 0x6c, 0x9a, 0x10, 0x78, 0xe8, 0xd3, 0x21, 0xc6,
0x65, 0xf4, 0xff, 0xa4, 0x02, 0xb9, 0x2e, 0x4a, 0x4f, 0x30, 0xed, 0xac, 0x58, 0xf0, 0x64, 0x88,
0x7c, 0x0b, 0xe0, 0x69, 0x67, 0x75, 0xdb, 0x54, 0x69, 0xe5, 0x73, 0x47, 0x56, 0x6d, 0xec, 0xde,
0xda, 0xc4, 0xbd, 0xb5, 0xd6, 0xc4, 0xbd, 0xee, 0x4a, 0x8c, 0xae, 0x2b, 0x72, 0x06, 0x9b, 0xe9,
0xc6, 0x48, 0x73, 0x59, 0x8b, 0x57, 0x9c, 0xea, 0xca, 0x6d, 0x23, 0x5c, 0x92, 0xea, 0x8d, 0x24,
0x75, 0xc8, 0x4b, 0xc5, 0x85, 0xb6, 0x8c, 0xa2, 0x0a, 0xcd, 0x6c, 0xc5, 0xa8, 0xae, 0x1d, 0x95,
0x67, 0xf4, 0xaf, 0x35, 0xc7, 0xa0, 0x66, 0x84, 0x71, 0x57, 0x65, 0xe2, 0x64, 0x7b, 0xb0, 0x9a,
0xcc, 0x92, 0x32, 0x98, 0xcd, 0xd6, 0x4f, 0x6e, 0xfd, 0xac, 0xd1, 0x6c, 0xd5, 0x5b, 0x8d, 0xf6,
0xeb, 0x1f, 0x9b, 0xaf, 0x1a, 0x27, 0xe7, 0x2f, 0xce, 0x1b, 0xa7, 0x85, 0x07, 0xc4, 0x82, 0xe2,
0x54, 0xb6, 0xfe, 0xa6, 0x7e, 0x7e, 0x51, 0x3f, 0xbe, 0x68, 0x14, 0x0c, 0xb2, 0x05, 0x9f, 0x4d,
0xe7, 0xdc, 0x93, 0xef, 0xce, 0xdf, 0x34, 0x4e, 0x0b, 0x19, 0x7b, 0x1f, 0xcc, 0xba, 0xf0, 0x2e,
0xd9, 0xf5, 0x3d, 0x9a, 0xf5, 0x14, 0xac, 0xd7, 0x3e, 0xbd, 0x27, 0xfa, 0xe8, 0xaf, 0x65, 0xd8,
0xb8, 0x43, 0x35, 0x51, 0x5c, 0x33, 0x0f, 0x49, 0x00, 0x85, 0xd9, 0xa9, 0x27, 0x63, 0x51, 0x16,
0x2c, 0x03, 0x6b, 0xd6, 0xb2, 0xf6, 0xc1, 0xef, 0xff, 0xfc, 0xfb, 0x21, 0xf3, 0x95, 0xbd, 0x15,
0x2d, 0x31, 0xe9, 0x5c, 0x1f, 0x76, 0x50, 0xd1, 0xc3, 0xc4, 0xba, 0x94, 0xcf, 0x13, 0xbb, 0x81,
0x78, 0x90, 0x9f, 0xda, 0x0d, 0x64, 0x4b, 0x5f, 0x38, 0x6f, 0x5f, 0xa4, 0x6b, 0xed, 0xe9, 0x5a,
0x15, 0xf2, 0xf9, 0xc2, 0x5a, 0xce, 0x2f, 0xac, 0xfb, 0x2b, 0xf1, 0x61, 0x6d, 0x7a, 0x0e, 0xc9,
0xb6, 0xbe, 0x6a, 0xfe, 0xb2, 0xb1, 0xca, 0xf3, 0x93, 0xe3, 0xc9, 0xb5, 0xbf, 0xd0, 0x45, 0xb7,
0xc9, 0x62, 0x82, 0x91, 0x8c, 0xb3, 0x23, 0x16, 0xcb, 0xb8, 0x60, 0xf2, 0xac, 0x62, 0x6a, 0x00,
0x1a, 0xd1, 0x6e, 0x9f, 0x30, 0xdc, 0xff, 0x14, 0xc3, 0x1b, 0xd8, 0x48, 0x19, 0x85, 0xec, 0xe8,
0x92, 0x8b, 0x0c, 0xb4, 0xb0, 0x66, 0x4d, 0xd7, 0xac, 0xda, 0x7b, 0x1f, 0xaf, 0xf9, 0x3c, 0xf6,
0x1a, 0xf9, 0xcd, 0x80, 0xcd, 0x39, 0xce, 0x23, 0xbb, 0xba, 0xfc, 0x62, 0x4f, 0x2e, 0x7c, 0xc0,
0x37, 0xfa, 0x01, 0xfb, 0x76, 0xf5, 0x13, 0x0f, 0x08, 0x27, 0x57, 0x1f, 0xff, 0x61, 0xfc, 0x59,
0xff, 0xc1, 0x2d, 0xc3, 0xa3, 0x2e, 0xf6, 0x68, 0x38, 0x50, 0x64, 0x83, 0xac, 0x43, 0xde, 0xca,
0xe9, 0x17, 0x44, 0xf3, 0x19, 0xca, 0xb7, 0xbb, 0xb0, 0x03, 0xd9, 0x63, 0xa4, 0x02, 0x05, 0xd9,
0x7c, 0x9c, 0xb1, 0xf2, 0x34, 0x54, 0x97, 0x5c, 0xb0, 0x1b, 0xfd, 0xdd, 0xab, 0x64, 0x3a, 0xab,
0x00, 0xb7, 0x80, 0x07, 0x6f, 0x9f, 0xf5, 0x99, 0xba, 0x0c, 0x3b, 0x35, 0x8f, 0x0f, 0x9d, 0xab,
0xb0, 0x83, 0xbd, 0x01, 0x7f, 0xe7, 0x04, 0x2c, 0xc0, 0x01, 0xf3, 0x51, 0x3a, 0xc9, 0xcf, 0x79,
0x9f, 0xb7, 0xbd, 0x01, 0x43, 0x5f, 0x75, 0xb2, 0x9a, 0xc9, 0xb3, 0xff, 0x03, 0x00, 0x00, 0xff,
0xff, 0x0b, 0x8f, 0xfa, 0xad, 0x2a, 0x08, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ExperimentServiceClient is the client API for ExperimentService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ExperimentServiceClient interface {
// Creates a new experiment.
CreateExperiment(ctx context.Context, in *CreateExperimentRequest, opts ...grpc.CallOption) (*Experiment, error)
// Finds a specific experiment by ID.
GetExperiment(ctx context.Context, in *GetExperimentRequest, opts ...grpc.CallOption) (*Experiment, error)
// Finds all experiments. Supports pagination, and sorting on certain fields.
ListExperiment(ctx context.Context, in *ListExperimentsRequest, opts ...grpc.CallOption) (*ListExperimentsResponse, error)
// Deletes an experiment without deleting the experiment's runs and jobs. To
// avoid unexpected behaviors, delete an experiment's runs and jobs before
// deleting the experiment.
DeleteExperiment(ctx context.Context, in *DeleteExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error)
// Archives an experiment and the experiment's runs and jobs.
ArchiveExperiment(ctx context.Context, in *ArchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error)
// Restores an archived experiment. The experiment's archived runs and jobs
// will stay archived.
UnarchiveExperiment(ctx context.Context, in *UnarchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}
type experimentServiceClient struct {
cc *grpc.ClientConn
}
func NewExperimentServiceClient(cc *grpc.ClientConn) ExperimentServiceClient {
return &experimentServiceClient{cc}
}
func (c *experimentServiceClient) CreateExperiment(ctx context.Context, in *CreateExperimentRequest, opts ...grpc.CallOption) (*Experiment, error) {
out := new(Experiment)
err := c.cc.Invoke(ctx, "/api.ExperimentService/CreateExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) GetExperiment(ctx context.Context, in *GetExperimentRequest, opts ...grpc.CallOption) (*Experiment, error) {
out := new(Experiment)
err := c.cc.Invoke(ctx, "/api.ExperimentService/GetExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) ListExperiment(ctx context.Context, in *ListExperimentsRequest, opts ...grpc.CallOption) (*ListExperimentsResponse, error) {
out := new(ListExperimentsResponse)
err := c.cc.Invoke(ctx, "/api.ExperimentService/ListExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) DeleteExperiment(ctx context.Context, in *DeleteExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ExperimentService/DeleteExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) ArchiveExperiment(ctx context.Context, in *ArchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ExperimentService/ArchiveExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) UnarchiveExperiment(ctx context.Context, in *UnarchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ExperimentService/UnarchiveExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ExperimentServiceServer is the server API for ExperimentService service.
type ExperimentServiceServer interface {
// Creates a new experiment.
CreateExperiment(context.Context, *CreateExperimentRequest) (*Experiment, error)
// Finds a specific experiment by ID.
GetExperiment(context.Context, *GetExperimentRequest) (*Experiment, error)
// Finds all experiments. Supports pagination, and sorting on certain fields.
ListExperiment(context.Context, *ListExperimentsRequest) (*ListExperimentsResponse, error)
// Deletes an experiment without deleting the experiment's runs and jobs. To
// avoid unexpected behaviors, delete an experiment's runs and jobs before
// deleting the experiment.
DeleteExperiment(context.Context, *DeleteExperimentRequest) (*empty.Empty, error)
// Archives an experiment and the experiment's runs and jobs.
ArchiveExperiment(context.Context, *ArchiveExperimentRequest) (*empty.Empty, error)
// Restores an archived experiment. The experiment's archived runs and jobs
// will stay archived.
UnarchiveExperiment(context.Context, *UnarchiveExperimentRequest) (*empty.Empty, error)
}
// UnimplementedExperimentServiceServer can be embedded to have forward compatible implementations.
type UnimplementedExperimentServiceServer struct {
}
func (*UnimplementedExperimentServiceServer) CreateExperiment(ctx context.Context, req *CreateExperimentRequest) (*Experiment, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateExperiment not implemented")
}
func (*UnimplementedExperimentServiceServer) GetExperiment(ctx context.Context, req *GetExperimentRequest) (*Experiment, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetExperiment not implemented")
}
func (*UnimplementedExperimentServiceServer) ListExperiment(ctx context.Context, req *ListExperimentsRequest) (*ListExperimentsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListExperiment not implemented")
}
func (*UnimplementedExperimentServiceServer) DeleteExperiment(ctx context.Context, req *DeleteExperimentRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteExperiment not implemented")
}
func (*UnimplementedExperimentServiceServer) ArchiveExperiment(ctx context.Context, req *ArchiveExperimentRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ArchiveExperiment not implemented")
}
func (*UnimplementedExperimentServiceServer) UnarchiveExperiment(ctx context.Context, req *UnarchiveExperimentRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method UnarchiveExperiment not implemented")
}
func RegisterExperimentServiceServer(s *grpc.Server, srv ExperimentServiceServer) {
s.RegisterService(&_ExperimentService_serviceDesc, srv)
}
func _ExperimentService_CreateExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).CreateExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/CreateExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).CreateExperiment(ctx, req.(*CreateExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_GetExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).GetExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/GetExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).GetExperiment(ctx, req.(*GetExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_ListExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListExperimentsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).ListExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/ListExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).ListExperiment(ctx, req.(*ListExperimentsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_DeleteExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).DeleteExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/DeleteExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).DeleteExperiment(ctx, req.(*DeleteExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_ArchiveExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ArchiveExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).ArchiveExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/ArchiveExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).ArchiveExperiment(ctx, req.(*ArchiveExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_UnarchiveExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UnarchiveExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).UnarchiveExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/UnarchiveExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).UnarchiveExperiment(ctx, req.(*UnarchiveExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ExperimentService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.ExperimentService",
HandlerType: (*ExperimentServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateExperiment",
Handler: _ExperimentService_CreateExperiment_Handler,
},
{
MethodName: "GetExperiment",
Handler: _ExperimentService_GetExperiment_Handler,
},
{
MethodName: "ListExperiment",
Handler: _ExperimentService_ListExperiment_Handler,
},
{
MethodName: "DeleteExperiment",
Handler: _ExperimentService_DeleteExperiment_Handler,
},
{
MethodName: "ArchiveExperiment",
Handler: _ExperimentService_ArchiveExperiment_Handler,
},
{
MethodName: "UnarchiveExperiment",
Handler: _ExperimentService_UnarchiveExperiment_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/experiment.proto",
}

View File

@ -1,360 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/experiment.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_ExperimentService_CreateExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateExperimentRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Experiment); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_GetExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_ExperimentService_ListExperiment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_ExperimentService_ListExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListExperimentsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ExperimentService_ListExperiment_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_DeleteExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_ArchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ArchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.ArchiveExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_UnarchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq UnarchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.UnarchiveExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterExperimentServiceHandlerFromEndpoint is same as RegisterExperimentServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterExperimentServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterExperimentServiceHandler(ctx, mux, conn)
}
// RegisterExperimentServiceHandler registers the http handlers for service ExperimentService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterExperimentServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterExperimentServiceHandlerClient(ctx, mux, NewExperimentServiceClient(conn))
}
// RegisterExperimentServiceHandlerClient registers the http handlers for service ExperimentService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ExperimentServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ExperimentServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ExperimentServiceClient" to call the correct interceptors.
func RegisterExperimentServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ExperimentServiceClient) error {
mux.Handle("POST", pattern_ExperimentService_CreateExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_CreateExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_CreateExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_GetExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_GetExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_GetExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_ListExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_ListExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ListExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_ExperimentService_DeleteExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_DeleteExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_DeleteExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_ArchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_ArchiveExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ArchiveExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_UnarchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_UnarchiveExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_UnarchiveExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_ExperimentService_CreateExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "experiments"}, ""))
pattern_ExperimentService_GetExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, ""))
pattern_ExperimentService_ListExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "experiments"}, ""))
pattern_ExperimentService_DeleteExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, ""))
pattern_ExperimentService_ArchiveExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, "archive"))
pattern_ExperimentService_UnarchiveExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, "unarchive"))
)
var (
forward_ExperimentService_CreateExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_GetExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_ListExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_DeleteExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_ArchiveExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_UnarchiveExperiment_0 = runtime.ForwardResponseMessage
)

View File

@ -1,580 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/filter.proto
package go_client
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Op is the operation to apply.
type Predicate_Op int32
const (
Predicate_UNKNOWN Predicate_Op = 0
// Operators on scalar values. Only applies to one of |int_value|,
// |long_value|, |string_value| or |timestamp_value|.
Predicate_EQUALS Predicate_Op = 1
Predicate_NOT_EQUALS Predicate_Op = 2
Predicate_GREATER_THAN Predicate_Op = 3
Predicate_GREATER_THAN_EQUALS Predicate_Op = 5
Predicate_LESS_THAN Predicate_Op = 6
Predicate_LESS_THAN_EQUALS Predicate_Op = 7
// Checks if the value is a member of a given array, which should be one of
// |int_values|, |long_values| or |string_values|.
Predicate_IN Predicate_Op = 8
// Checks if the value contains |string_value| as a substring match. Only
// applies to |string_value|.
Predicate_IS_SUBSTRING Predicate_Op = 9
)
var Predicate_Op_name = map[int32]string{
0: "UNKNOWN",
1: "EQUALS",
2: "NOT_EQUALS",
3: "GREATER_THAN",
5: "GREATER_THAN_EQUALS",
6: "LESS_THAN",
7: "LESS_THAN_EQUALS",
8: "IN",
9: "IS_SUBSTRING",
}
var Predicate_Op_value = map[string]int32{
"UNKNOWN": 0,
"EQUALS": 1,
"NOT_EQUALS": 2,
"GREATER_THAN": 3,
"GREATER_THAN_EQUALS": 5,
"LESS_THAN": 6,
"LESS_THAN_EQUALS": 7,
"IN": 8,
"IS_SUBSTRING": 9,
}
func (x Predicate_Op) String() string {
return proto.EnumName(Predicate_Op_name, int32(x))
}
func (Predicate_Op) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_aab96529e99c2762, []int{0, 0}
}
// Predicate captures individual conditions that must be true for a resource
// being filtered.
type Predicate struct {
Op Predicate_Op `protobuf:"varint,1,opt,name=op,proto3,enum=api.Predicate_Op" json:"op,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
// Types that are valid to be assigned to Value:
// *Predicate_IntValue
// *Predicate_LongValue
// *Predicate_StringValue
// *Predicate_TimestampValue
// *Predicate_IntValues
// *Predicate_LongValues
// *Predicate_StringValues
Value isPredicate_Value `protobuf_oneof:"value"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Predicate) Reset() { *m = Predicate{} }
func (m *Predicate) String() string { return proto.CompactTextString(m) }
func (*Predicate) ProtoMessage() {}
func (*Predicate) Descriptor() ([]byte, []int) {
return fileDescriptor_aab96529e99c2762, []int{0}
}
func (m *Predicate) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Predicate.Unmarshal(m, b)
}
func (m *Predicate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Predicate.Marshal(b, m, deterministic)
}
func (m *Predicate) XXX_Merge(src proto.Message) {
xxx_messageInfo_Predicate.Merge(m, src)
}
func (m *Predicate) XXX_Size() int {
return xxx_messageInfo_Predicate.Size(m)
}
func (m *Predicate) XXX_DiscardUnknown() {
xxx_messageInfo_Predicate.DiscardUnknown(m)
}
var xxx_messageInfo_Predicate proto.InternalMessageInfo
func (m *Predicate) GetOp() Predicate_Op {
if m != nil {
return m.Op
}
return Predicate_UNKNOWN
}
func (m *Predicate) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
type isPredicate_Value interface {
isPredicate_Value()
}
type Predicate_IntValue struct {
IntValue int32 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
}
type Predicate_LongValue struct {
LongValue int64 `protobuf:"varint,4,opt,name=long_value,json=longValue,proto3,oneof"`
}
type Predicate_StringValue struct {
StringValue string `protobuf:"bytes,5,opt,name=string_value,json=stringValue,proto3,oneof"`
}
type Predicate_TimestampValue struct {
TimestampValue *timestamp.Timestamp `protobuf:"bytes,6,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
}
type Predicate_IntValues struct {
IntValues *IntValues `protobuf:"bytes,7,opt,name=int_values,json=intValues,proto3,oneof"`
}
type Predicate_LongValues struct {
LongValues *LongValues `protobuf:"bytes,8,opt,name=long_values,json=longValues,proto3,oneof"`
}
type Predicate_StringValues struct {
StringValues *StringValues `protobuf:"bytes,9,opt,name=string_values,json=stringValues,proto3,oneof"`
}
func (*Predicate_IntValue) isPredicate_Value() {}
func (*Predicate_LongValue) isPredicate_Value() {}
func (*Predicate_StringValue) isPredicate_Value() {}
func (*Predicate_TimestampValue) isPredicate_Value() {}
func (*Predicate_IntValues) isPredicate_Value() {}
func (*Predicate_LongValues) isPredicate_Value() {}
func (*Predicate_StringValues) isPredicate_Value() {}
func (m *Predicate) GetValue() isPredicate_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *Predicate) GetIntValue() int32 {
if x, ok := m.GetValue().(*Predicate_IntValue); ok {
return x.IntValue
}
return 0
}
func (m *Predicate) GetLongValue() int64 {
if x, ok := m.GetValue().(*Predicate_LongValue); ok {
return x.LongValue
}
return 0
}
func (m *Predicate) GetStringValue() string {
if x, ok := m.GetValue().(*Predicate_StringValue); ok {
return x.StringValue
}
return ""
}
func (m *Predicate) GetTimestampValue() *timestamp.Timestamp {
if x, ok := m.GetValue().(*Predicate_TimestampValue); ok {
return x.TimestampValue
}
return nil
}
func (m *Predicate) GetIntValues() *IntValues {
if x, ok := m.GetValue().(*Predicate_IntValues); ok {
return x.IntValues
}
return nil
}
func (m *Predicate) GetLongValues() *LongValues {
if x, ok := m.GetValue().(*Predicate_LongValues); ok {
return x.LongValues
}
return nil
}
func (m *Predicate) GetStringValues() *StringValues {
if x, ok := m.GetValue().(*Predicate_StringValues); ok {
return x.StringValues
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*Predicate) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*Predicate_IntValue)(nil),
(*Predicate_LongValue)(nil),
(*Predicate_StringValue)(nil),
(*Predicate_TimestampValue)(nil),
(*Predicate_IntValues)(nil),
(*Predicate_LongValues)(nil),
(*Predicate_StringValues)(nil),
}
}
type IntValues struct {
Values []int32 `protobuf:"varint,1,rep,packed,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IntValues) Reset() { *m = IntValues{} }
func (m *IntValues) String() string { return proto.CompactTextString(m) }
func (*IntValues) ProtoMessage() {}
func (*IntValues) Descriptor() ([]byte, []int) {
return fileDescriptor_aab96529e99c2762, []int{1}
}
func (m *IntValues) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IntValues.Unmarshal(m, b)
}
func (m *IntValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IntValues.Marshal(b, m, deterministic)
}
func (m *IntValues) XXX_Merge(src proto.Message) {
xxx_messageInfo_IntValues.Merge(m, src)
}
func (m *IntValues) XXX_Size() int {
return xxx_messageInfo_IntValues.Size(m)
}
func (m *IntValues) XXX_DiscardUnknown() {
xxx_messageInfo_IntValues.DiscardUnknown(m)
}
var xxx_messageInfo_IntValues proto.InternalMessageInfo
func (m *IntValues) GetValues() []int32 {
if m != nil {
return m.Values
}
return nil
}
type StringValues struct {
Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StringValues) Reset() { *m = StringValues{} }
func (m *StringValues) String() string { return proto.CompactTextString(m) }
func (*StringValues) ProtoMessage() {}
func (*StringValues) Descriptor() ([]byte, []int) {
return fileDescriptor_aab96529e99c2762, []int{2}
}
func (m *StringValues) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StringValues.Unmarshal(m, b)
}
func (m *StringValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StringValues.Marshal(b, m, deterministic)
}
func (m *StringValues) XXX_Merge(src proto.Message) {
xxx_messageInfo_StringValues.Merge(m, src)
}
func (m *StringValues) XXX_Size() int {
return xxx_messageInfo_StringValues.Size(m)
}
func (m *StringValues) XXX_DiscardUnknown() {
xxx_messageInfo_StringValues.DiscardUnknown(m)
}
var xxx_messageInfo_StringValues proto.InternalMessageInfo
func (m *StringValues) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
type LongValues struct {
Values []int64 `protobuf:"varint,3,rep,packed,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LongValues) Reset() { *m = LongValues{} }
func (m *LongValues) String() string { return proto.CompactTextString(m) }
func (*LongValues) ProtoMessage() {}
func (*LongValues) Descriptor() ([]byte, []int) {
return fileDescriptor_aab96529e99c2762, []int{3}
}
func (m *LongValues) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LongValues.Unmarshal(m, b)
}
func (m *LongValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LongValues.Marshal(b, m, deterministic)
}
func (m *LongValues) XXX_Merge(src proto.Message) {
xxx_messageInfo_LongValues.Merge(m, src)
}
func (m *LongValues) XXX_Size() int {
return xxx_messageInfo_LongValues.Size(m)
}
func (m *LongValues) XXX_DiscardUnknown() {
xxx_messageInfo_LongValues.DiscardUnknown(m)
}
var xxx_messageInfo_LongValues proto.InternalMessageInfo
func (m *LongValues) GetValues() []int64 {
if m != nil {
return m.Values
}
return nil
}
// Filter is used to filter resources returned from a ListXXX request.
//
// Example filters:
// 1) Filter runs with status = 'Running'
// filter {
// predicate {
// key: "status"
// op: EQUALS
// string_value: "Running"
// }
// }
//
// 2) Filter runs that succeeded since Dec 1, 2018
// filter {
// predicate {
// key: "status"
// op: EQUALS
// string_value: "Succeeded"
// }
// predicate {
// key: "created_at"
// op: GREATER_THAN
// timestamp_value {
// seconds: 1543651200
// }
// }
// }
//
// 3) Filter runs with one of labels 'label_1' or 'label_2'
//
// filter {
// predicate {
// key: "label"
// op: IN
// string_values {
// value: 'label_1'
// value: 'label_2'
// }
// }
// }
type Filter struct {
// All predicates are AND-ed when this filter is applied.
Predicates []*Predicate `protobuf:"bytes,1,rep,name=predicates,proto3" json:"predicates,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Filter) Reset() { *m = Filter{} }
func (m *Filter) String() string { return proto.CompactTextString(m) }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) {
return fileDescriptor_aab96529e99c2762, []int{4}
}
func (m *Filter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Filter.Unmarshal(m, b)
}
func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Filter.Marshal(b, m, deterministic)
}
func (m *Filter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Filter.Merge(m, src)
}
func (m *Filter) XXX_Size() int {
return xxx_messageInfo_Filter.Size(m)
}
func (m *Filter) XXX_DiscardUnknown() {
xxx_messageInfo_Filter.DiscardUnknown(m)
}
var xxx_messageInfo_Filter proto.InternalMessageInfo
func (m *Filter) GetPredicates() []*Predicate {
if m != nil {
return m.Predicates
}
return nil
}
func init() {
proto.RegisterEnum("api.Predicate_Op", Predicate_Op_name, Predicate_Op_value)
proto.RegisterType((*Predicate)(nil), "api.Predicate")
proto.RegisterType((*IntValues)(nil), "api.IntValues")
proto.RegisterType((*StringValues)(nil), "api.StringValues")
proto.RegisterType((*LongValues)(nil), "api.LongValues")
proto.RegisterType((*Filter)(nil), "api.Filter")
}
func init() { proto.RegisterFile("backend/api/filter.proto", fileDescriptor_aab96529e99c2762) }
var fileDescriptor_aab96529e99c2762 = []byte{
// 553 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x93, 0xdf, 0x8f, 0xd2, 0x40,
0x10, 0xc7, 0xfb, 0xe3, 0x28, 0xd7, 0x81, 0xe3, 0xea, 0x6a, 0xb4, 0x21, 0x9a, 0xab, 0x9c, 0xd1,
0x3e, 0xb5, 0x09, 0x17, 0x93, 0x7b, 0xf1, 0x01, 0x22, 0x02, 0x91, 0x14, 0x6d, 0x41, 0x13, 0x5f,
0x48, 0xe1, 0x16, 0xdc, 0x50, 0xba, 0x1b, 0xba, 0x9c, 0xb9, 0xbf, 0xc4, 0xff, 0xc2, 0xbf, 0xd1,
0xb4, 0xdb, 0x2e, 0x7d, 0xeb, 0xcc, 0x7c, 0xbe, 0xd3, 0xf9, 0x4e, 0xa7, 0x60, 0xaf, 0xe3, 0xcd,
0x1e, 0xa7, 0x0f, 0x7e, 0xcc, 0x88, 0xbf, 0x25, 0x09, 0xc7, 0x47, 0x8f, 0x1d, 0x29, 0xa7, 0x48,
0x8f, 0x19, 0xe9, 0xbe, 0xde, 0x51, 0xba, 0x4b, 0x70, 0x51, 0x8d, 0xd3, 0x94, 0xf2, 0x98, 0x13,
0x9a, 0x66, 0x02, 0xe9, 0xde, 0x94, 0xd5, 0x22, 0x5a, 0x9f, 0xb6, 0x3e, 0x27, 0x07, 0x9c, 0xf1,
0xf8, 0xc0, 0x04, 0xd0, 0xfb, 0x77, 0x01, 0xe6, 0xb7, 0x23, 0x7e, 0x20, 0x9b, 0x98, 0x63, 0xf4,
0x16, 0x34, 0xca, 0x6c, 0xd5, 0x51, 0xdd, 0x4e, 0xff, 0x99, 0x17, 0x33, 0xe2, 0xc9, 0x9a, 0x37,
0x67, 0xa1, 0x46, 0x19, 0xb2, 0x40, 0xdf, 0xe3, 0x27, 0x5b, 0x73, 0x54, 0xd7, 0x0c, 0xf3, 0x47,
0xf4, 0x06, 0x4c, 0x92, 0xf2, 0xd5, 0x63, 0x9c, 0x9c, 0xb0, 0xad, 0x3b, 0xaa, 0xdb, 0x98, 0x28,
0xe1, 0x25, 0x49, 0xf9, 0x8f, 0x3c, 0x83, 0x6e, 0x00, 0x12, 0x9a, 0xee, 0xca, 0xfa, 0x85, 0xa3,
0xba, 0xfa, 0x44, 0x09, 0xcd, 0x3c, 0x27, 0x80, 0x5b, 0x68, 0x67, 0xfc, 0x48, 0x24, 0xd2, 0xc8,
0x5b, 0x4f, 0x94, 0xb0, 0x25, 0xb2, 0x02, 0x1a, 0xc1, 0xb5, 0x1c, 0xbd, 0xe4, 0x0c, 0x47, 0x75,
0x5b, 0xfd, 0xae, 0x27, 0x2c, 0x7a, 0x95, 0x45, 0x6f, 0x51, 0x71, 0x13, 0x25, 0xec, 0x48, 0x91,
0x68, 0xe3, 0x03, 0xc8, 0x59, 0x33, 0xbb, 0x59, 0x74, 0xe8, 0x14, 0x46, 0xa7, 0xe5, 0xbc, 0x59,
0x3e, 0x5c, 0x35, 0x7c, 0x86, 0xfa, 0xd0, 0x3a, 0x4f, 0x9f, 0xd9, 0x97, 0x85, 0xe2, 0xba, 0x50,
0xcc, 0x2a, 0x07, 0xb9, 0x04, 0xa4, 0x9f, 0x0c, 0xdd, 0xc3, 0x55, 0xdd, 0x50, 0x66, 0x9b, 0x85,
0x4a, 0x2c, 0x34, 0x3a, 0x9b, 0xca, 0x75, 0xed, 0x9a, 0xc9, 0xac, 0xf7, 0x57, 0x05, 0x6d, 0xce,
0x50, 0x0b, 0x9a, 0xcb, 0xe0, 0x6b, 0x30, 0xff, 0x19, 0x58, 0x0a, 0x02, 0x30, 0x46, 0xdf, 0x97,
0x83, 0x59, 0x64, 0xa9, 0xa8, 0x03, 0x10, 0xcc, 0x17, 0xab, 0x32, 0xd6, 0x90, 0x05, 0xed, 0x71,
0x38, 0x1a, 0x2c, 0x46, 0xe1, 0x6a, 0x31, 0x19, 0x04, 0x96, 0x8e, 0x5e, 0xc1, 0xf3, 0x7a, 0xa6,
0x42, 0x1b, 0xe8, 0x0a, 0xcc, 0xd9, 0x28, 0x8a, 0x04, 0x67, 0xa0, 0x17, 0x60, 0xc9, 0xb0, 0x82,
0x9a, 0xc8, 0x00, 0x6d, 0x1a, 0x58, 0x97, 0x79, 0xdf, 0x69, 0xb4, 0x8a, 0x96, 0xc3, 0x68, 0x11,
0x4e, 0x83, 0xb1, 0x65, 0x0e, 0x9b, 0xd0, 0x28, 0xcc, 0xf4, 0x6e, 0xc1, 0x94, 0xab, 0x42, 0x2f,
0xc1, 0x28, 0x2d, 0xaa, 0x8e, 0xee, 0x36, 0xc2, 0x32, 0xea, 0xbd, 0x87, 0x76, 0xdd, 0x67, 0x8d,
0xd3, 0x1c, 0xdd, 0x35, 0x25, 0xf7, 0x0e, 0xe0, 0xbc, 0xc5, 0x1a, 0xa5, 0x3b, 0xba, 0xab, 0x4b,
0xea, 0x1e, 0x8c, 0x2f, 0xc5, 0xdd, 0x23, 0x0f, 0x80, 0x55, 0x07, 0x29, 0xde, 0x59, 0x7d, 0x3e,
0x79, 0xa7, 0x61, 0x8d, 0xe8, 0x7f, 0x02, 0xf4, 0xf9, 0x74, 0x38, 0x3c, 0x09, 0x79, 0x84, 0x8f,
0x8f, 0x64, 0x83, 0xd1, 0x07, 0x30, 0xc7, 0x98, 0x97, 0x2d, 0x5b, 0x85, 0x5c, 0x04, 0xdd, 0x7a,
0xd0, 0x53, 0x86, 0x1f, 0x7f, 0xdd, 0xed, 0x08, 0xff, 0x7d, 0x5a, 0x7b, 0x1b, 0x7a, 0xf0, 0xf7,
0xa7, 0x35, 0xde, 0x26, 0xf4, 0x8f, 0xcf, 0x08, 0xc3, 0x09, 0x49, 0x71, 0xe6, 0xd7, 0x7f, 0xcd,
0x1d, 0x5d, 0x6d, 0x12, 0x82, 0x53, 0xbe, 0x36, 0x8a, 0x53, 0xbc, 0xfb, 0x1f, 0x00, 0x00, 0xff,
0xff, 0xf8, 0x38, 0x00, 0xb0, 0xba, 0x03, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// DummyFilterServiceClient is the client API for DummyFilterService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DummyFilterServiceClient interface {
GetFilter(ctx context.Context, in *Filter, opts ...grpc.CallOption) (*Filter, error)
}
type dummyFilterServiceClient struct {
cc *grpc.ClientConn
}
func NewDummyFilterServiceClient(cc *grpc.ClientConn) DummyFilterServiceClient {
return &dummyFilterServiceClient{cc}
}
func (c *dummyFilterServiceClient) GetFilter(ctx context.Context, in *Filter, opts ...grpc.CallOption) (*Filter, error) {
out := new(Filter)
err := c.cc.Invoke(ctx, "/api.DummyFilterService/GetFilter", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DummyFilterServiceServer is the server API for DummyFilterService service.
type DummyFilterServiceServer interface {
GetFilter(context.Context, *Filter) (*Filter, error)
}
// UnimplementedDummyFilterServiceServer can be embedded to have forward compatible implementations.
type UnimplementedDummyFilterServiceServer struct {
}
func (*UnimplementedDummyFilterServiceServer) GetFilter(ctx context.Context, req *Filter) (*Filter, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetFilter not implemented")
}
func RegisterDummyFilterServiceServer(s *grpc.Server, srv DummyFilterServiceServer) {
s.RegisterService(&_DummyFilterService_serviceDesc, srv)
}
func _DummyFilterService_GetFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Filter)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DummyFilterServiceServer).GetFilter(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.DummyFilterService/GetFilter",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DummyFilterServiceServer).GetFilter(ctx, req.(*Filter))
}
return interceptor(ctx, in, info, handler)
}
var _DummyFilterService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.DummyFilterService",
HandlerType: (*DummyFilterServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetFilter",
Handler: _DummyFilterService_GetFilter_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/filter.proto",
}

View File

@ -1,182 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/healthz.proto
package go_client
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
empty "github.com/golang/protobuf/ptypes/empty"
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type GetHealthzResponse struct {
// Returns if KFP in multi-user mode
MultiUser bool `protobuf:"varint,3,opt,name=multi_user,json=multiUser,proto3" json:"multi_user,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetHealthzResponse) Reset() { *m = GetHealthzResponse{} }
func (m *GetHealthzResponse) String() string { return proto.CompactTextString(m) }
func (*GetHealthzResponse) ProtoMessage() {}
func (*GetHealthzResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_499453e9dc64832b, []int{0}
}
func (m *GetHealthzResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetHealthzResponse.Unmarshal(m, b)
}
func (m *GetHealthzResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetHealthzResponse.Marshal(b, m, deterministic)
}
func (m *GetHealthzResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetHealthzResponse.Merge(m, src)
}
func (m *GetHealthzResponse) XXX_Size() int {
return xxx_messageInfo_GetHealthzResponse.Size(m)
}
func (m *GetHealthzResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetHealthzResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetHealthzResponse proto.InternalMessageInfo
func (m *GetHealthzResponse) GetMultiUser() bool {
if m != nil {
return m.MultiUser
}
return false
}
func init() {
proto.RegisterType((*GetHealthzResponse)(nil), "api.GetHealthzResponse")
}
func init() { proto.RegisterFile("backend/api/healthz.proto", fileDescriptor_499453e9dc64832b) }
var fileDescriptor_499453e9dc64832b = []byte{
// 337 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4e, 0x32, 0x31,
0x14, 0x85, 0x7f, 0x20, 0xe1, 0xc7, 0x2a, 0x1a, 0x6b, 0x14, 0x1d, 0x21, 0x12, 0x56, 0x2e, 0x64,
0x1a, 0xe4, 0x09, 0x24, 0x31, 0xba, 0x71, 0x03, 0x71, 0x43, 0x4c, 0x48, 0x67, 0xb8, 0xcc, 0x34,
0x0c, 0x6d, 0xd3, 0xde, 0x42, 0x64, 0x69, 0xe2, 0x0b, 0xe8, 0xa3, 0xf9, 0x0a, 0x3e, 0x88, 0xa1,
0x03, 0x4a, 0xa2, 0xab, 0xa6, 0x3d, 0xf7, 0x9c, 0x7e, 0xf7, 0x90, 0xb3, 0x88, 0xc7, 0x53, 0x90,
0x63, 0xc6, 0xb5, 0x60, 0x29, 0xf0, 0x0c, 0xd3, 0x65, 0xa8, 0x8d, 0x42, 0x45, 0x4b, 0x5c, 0x8b,
0xa0, 0x9e, 0x28, 0x95, 0x64, 0xe0, 0x65, 0x2e, 0xa5, 0x42, 0x8e, 0x42, 0x49, 0x9b, 0x8f, 0x04,
0xe7, 0x6b, 0xd5, 0xdf, 0x22, 0x37, 0x61, 0x30, 0xd3, 0xf8, 0xbc, 0x16, 0xaf, 0xfc, 0x11, 0xb7,
0x13, 0x90, 0x6d, 0xbb, 0xe0, 0x49, 0x02, 0x86, 0x29, 0xed, 0xed, 0x7f, 0x44, 0xd5, 0xb6, 0x41,
0xc0, 0x18, 0x65, 0x72, 0xa1, 0xd5, 0x25, 0xf4, 0x0e, 0xf0, 0x3e, 0x47, 0xeb, 0x83, 0xd5, 0x4a,
0x5a, 0xa0, 0x0d, 0x42, 0x66, 0x2e, 0x43, 0x31, 0x72, 0x16, 0xcc, 0x69, 0xa9, 0x59, 0xb8, 0xac,
0xf4, 0x77, 0xfc, 0xcb, 0xa3, 0x05, 0x73, 0x2d, 0xc9, 0xfe, 0xda, 0x31, 0x00, 0x33, 0x17, 0x31,
0xd0, 0x27, 0x42, 0x7e, 0x62, 0xe8, 0x49, 0x98, 0x93, 0x87, 0x1b, 0xf2, 0xf0, 0x76, 0x45, 0x1e,
0xd4, 0x42, 0xae, 0x45, 0xf8, 0xfb, 0xbf, 0x56, 0xe3, 0xe5, 0xe3, 0xf3, 0xbd, 0x58, 0xa3, 0xc7,
0x2b, 0x3e, 0xcb, 0xe6, 0x9d, 0x08, 0x90, 0x77, 0x36, 0x8d, 0xf5, 0x5e, 0x0b, 0x6f, 0x37, 0x0f,
0xfd, 0x3a, 0xf9, 0x3f, 0x86, 0x09, 0x77, 0x19, 0xd2, 0x43, 0x7a, 0x40, 0xaa, 0xc1, 0xae, 0x8f,
0x1b, 0x20, 0x47, 0x67, 0x87, 0x17, 0xa4, 0x41, 0xca, 0x3d, 0xe0, 0x06, 0x0c, 0x3d, 0xaa, 0x14,
0x83, 0x2a, 0x77, 0x98, 0x2a, 0x23, 0x96, 0xbe, 0x87, 0x66, 0x31, 0xda, 0x23, 0xe4, 0x7b, 0xe0,
0xdf, 0xb0, 0x9b, 0x08, 0x4c, 0x5d, 0x14, 0xc6, 0x6a, 0xc6, 0xa6, 0x2e, 0x82, 0x49, 0xa6, 0x16,
0x4c, 0x0b, 0x0d, 0x99, 0x90, 0x60, 0xd9, 0x76, 0x5d, 0x89, 0x1a, 0xc5, 0x99, 0x00, 0x89, 0x51,
0xd9, 0xef, 0xd3, 0xfd, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x04, 0x2d, 0x10, 0xd7, 0x01, 0x00,
0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// HealthzServiceClient is the client API for HealthzService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type HealthzServiceClient interface {
// Get healthz data.
GetHealthz(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetHealthzResponse, error)
}
type healthzServiceClient struct {
cc *grpc.ClientConn
}
func NewHealthzServiceClient(cc *grpc.ClientConn) HealthzServiceClient {
return &healthzServiceClient{cc}
}
func (c *healthzServiceClient) GetHealthz(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*GetHealthzResponse, error) {
out := new(GetHealthzResponse)
err := c.cc.Invoke(ctx, "/api.HealthzService/GetHealthz", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// HealthzServiceServer is the server API for HealthzService service.
type HealthzServiceServer interface {
// Get healthz data.
GetHealthz(context.Context, *empty.Empty) (*GetHealthzResponse, error)
}
// UnimplementedHealthzServiceServer can be embedded to have forward compatible implementations.
type UnimplementedHealthzServiceServer struct {
}
func (*UnimplementedHealthzServiceServer) GetHealthz(ctx context.Context, req *empty.Empty) (*GetHealthzResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetHealthz not implemented")
}
func RegisterHealthzServiceServer(s *grpc.Server, srv HealthzServiceServer) {
s.RegisterService(&_HealthzService_serviceDesc, srv)
}
func _HealthzService_GetHealthz_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(empty.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HealthzServiceServer).GetHealthz(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.HealthzService/GetHealthz",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HealthzServiceServer).GetHealthz(ctx, req.(*empty.Empty))
}
return interceptor(ctx, in, info, handler)
}
var _HealthzService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.HealthzService",
HandlerType: (*HealthzServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetHealthz",
Handler: _HealthzService_GetHealthz_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/healthz.proto",
}

View File

@ -1,108 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/healthz.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/empty"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_HealthzService_GetHealthz_0(ctx context.Context, marshaler runtime.Marshaler, client HealthzServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq empty.Empty
var metadata runtime.ServerMetadata
msg, err := client.GetHealthz(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterHealthzServiceHandlerFromEndpoint is same as RegisterHealthzServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterHealthzServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterHealthzServiceHandler(ctx, mux, conn)
}
// RegisterHealthzServiceHandler registers the http handlers for service HealthzService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterHealthzServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterHealthzServiceHandlerClient(ctx, mux, NewHealthzServiceClient(conn))
}
// RegisterHealthzServiceHandlerClient registers the http handlers for service HealthzService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "HealthzServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "HealthzServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "HealthzServiceClient" to call the correct interceptors.
func RegisterHealthzServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client HealthzServiceClient) error {
mux.Handle("GET", pattern_HealthzService_GetHealthz_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HealthzService_GetHealthz_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_HealthzService_GetHealthz_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_HealthzService_GetHealthz_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "healthz"}, ""))
)
var (
forward_HealthzService_GetHealthz_0 = runtime.ForwardResponseMessage
)

File diff suppressed because it is too large Load Diff

View File

@ -1,360 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/job.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_JobService_CreateJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateJobRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Job); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_GetJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_JobService_ListJobs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_JobService_ListJobs_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListJobsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_JobService_ListJobs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListJobs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_EnableJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq EnableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.EnableJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_DisableJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DisableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DisableJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_DeleteJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterJobServiceHandlerFromEndpoint is same as RegisterJobServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterJobServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterJobServiceHandler(ctx, mux, conn)
}
// RegisterJobServiceHandler registers the http handlers for service JobService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterJobServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterJobServiceHandlerClient(ctx, mux, NewJobServiceClient(conn))
}
// RegisterJobServiceHandlerClient registers the http handlers for service JobService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "JobServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "JobServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "JobServiceClient" to call the correct interceptors.
func RegisterJobServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client JobServiceClient) error {
mux.Handle("POST", pattern_JobService_CreateJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_CreateJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_CreateJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_GetJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_GetJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_GetJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_ListJobs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_ListJobs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_ListJobs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_EnableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_EnableJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_EnableJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_DisableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_DisableJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DisableJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_JobService_DeleteJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_DeleteJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DeleteJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_JobService_CreateJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "jobs"}, ""))
pattern_JobService_GetJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "jobs", "id"}, ""))
pattern_JobService_ListJobs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "jobs"}, ""))
pattern_JobService_EnableJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "jobs", "id", "enable"}, ""))
pattern_JobService_DisableJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "jobs", "id", "disable"}, ""))
pattern_JobService_DeleteJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "jobs", "id"}, ""))
)
var (
forward_JobService_CreateJob_0 = runtime.ForwardResponseMessage
forward_JobService_GetJob_0 = runtime.ForwardResponseMessage
forward_JobService_ListJobs_0 = runtime.ForwardResponseMessage
forward_JobService_EnableJob_0 = runtime.ForwardResponseMessage
forward_JobService_DisableJob_0 = runtime.ForwardResponseMessage
forward_JobService_DeleteJob_0 = runtime.ForwardResponseMessage
)

View File

@ -1,88 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/parameter.proto
package go_client
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type Parameter struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Parameter) Reset() { *m = Parameter{} }
func (m *Parameter) String() string { return proto.CompactTextString(m) }
func (*Parameter) ProtoMessage() {}
func (*Parameter) Descriptor() ([]byte, []int) {
return fileDescriptor_6c6f9f24a0c0798d, []int{0}
}
func (m *Parameter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Parameter.Unmarshal(m, b)
}
func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Parameter.Marshal(b, m, deterministic)
}
func (m *Parameter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Parameter.Merge(m, src)
}
func (m *Parameter) XXX_Size() int {
return xxx_messageInfo_Parameter.Size(m)
}
func (m *Parameter) XXX_DiscardUnknown() {
xxx_messageInfo_Parameter.DiscardUnknown(m)
}
var xxx_messageInfo_Parameter proto.InternalMessageInfo
func (m *Parameter) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Parameter) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
func init() {
proto.RegisterType((*Parameter)(nil), "api.Parameter")
}
func init() { proto.RegisterFile("backend/api/parameter.proto", fileDescriptor_6c6f9f24a0c0798d) }
var fileDescriptor_6c6f9f24a0c0798d = []byte{
// 147 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x4a, 0x4c, 0xce,
0x4e, 0xcd, 0x4b, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x4d, 0x2d, 0x49,
0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4e, 0x2c, 0xc8, 0x54, 0x32, 0xe5, 0xe2,
0x0c, 0x80, 0x89, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
0x70, 0x06, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60,
0x41, 0x08, 0xc7, 0xc9, 0x34, 0xca, 0x38, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f,
0x57, 0x3f, 0xbb, 0x34, 0x29, 0x35, 0x2d, 0x27, 0xbf, 0x5c, 0xbf, 0x20, 0xb3, 0x20, 0x35, 0x27,
0x33, 0x2f, 0xb5, 0x58, 0x1f, 0xd9, 0xe2, 0xf4, 0xfc, 0xf8, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc, 0x92,
0x24, 0x36, 0xb0, 0xcd, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x47, 0x98, 0xfd, 0x98,
0x00, 0x00, 0x00,
}

File diff suppressed because it is too large Load Diff

View File

@ -1,606 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/pipeline.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_PipelineService_CreatePipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreatePipelineRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Pipeline); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreatePipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetPipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetPipelineRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetPipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_PipelineService_ListPipelines_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_PipelineService_ListPipelines_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListPipelinesRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_PipelineService_ListPipelines_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListPipelines(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_DeletePipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeletePipelineRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeletePipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetTemplateRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_CreatePipelineVersion_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreatePipelineVersionRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Version); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreatePipelineVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetPipelineVersion_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetPipelineVersionRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["version_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "version_id")
}
protoReq.VersionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "version_id", err)
}
msg, err := client.GetPipelineVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_PipelineService_ListPipelineVersions_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_PipelineService_ListPipelineVersions_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListPipelineVersionsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_PipelineService_ListPipelineVersions_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListPipelineVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_DeletePipelineVersion_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeletePipelineVersionRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["version_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "version_id")
}
protoReq.VersionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "version_id", err)
}
msg, err := client.DeletePipelineVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetPipelineVersionTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetPipelineVersionTemplateRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["version_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "version_id")
}
protoReq.VersionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "version_id", err)
}
msg, err := client.GetPipelineVersionTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_UpdatePipelineDefaultVersion_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq UpdatePipelineDefaultVersionRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["pipeline_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pipeline_id")
}
protoReq.PipelineId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pipeline_id", err)
}
val, ok = pathParams["version_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "version_id")
}
protoReq.VersionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "version_id", err)
}
msg, err := client.UpdatePipelineDefaultVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterPipelineServiceHandlerFromEndpoint is same as RegisterPipelineServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterPipelineServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterPipelineServiceHandler(ctx, mux, conn)
}
// RegisterPipelineServiceHandler registers the http handlers for service PipelineService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterPipelineServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterPipelineServiceHandlerClient(ctx, mux, NewPipelineServiceClient(conn))
}
// RegisterPipelineServiceHandlerClient registers the http handlers for service PipelineService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PipelineServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PipelineServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "PipelineServiceClient" to call the correct interceptors.
func RegisterPipelineServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PipelineServiceClient) error {
mux.Handle("POST", pattern_PipelineService_CreatePipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_CreatePipeline_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_CreatePipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetPipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetPipeline_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetPipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_ListPipelines_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_ListPipelines_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_ListPipelines_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_PipelineService_DeletePipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_DeletePipeline_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_DeletePipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetTemplate_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_PipelineService_CreatePipelineVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_CreatePipelineVersion_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_CreatePipelineVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetPipelineVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetPipelineVersion_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetPipelineVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_ListPipelineVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_ListPipelineVersions_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_ListPipelineVersions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_PipelineService_DeletePipelineVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_DeletePipelineVersion_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_DeletePipelineVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetPipelineVersionTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetPipelineVersionTemplate_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetPipelineVersionTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_PipelineService_UpdatePipelineDefaultVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_UpdatePipelineDefaultVersion_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_UpdatePipelineDefaultVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_PipelineService_CreatePipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipelines"}, ""))
pattern_PipelineService_GetPipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipelines", "id"}, ""))
pattern_PipelineService_ListPipelines_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipelines"}, ""))
pattern_PipelineService_DeletePipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipelines", "id"}, ""))
pattern_PipelineService_GetTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "pipelines", "id", "templates"}, ""))
pattern_PipelineService_CreatePipelineVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipeline_versions"}, ""))
pattern_PipelineService_GetPipelineVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipeline_versions", "version_id"}, ""))
pattern_PipelineService_ListPipelineVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipeline_versions"}, ""))
pattern_PipelineService_DeletePipelineVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipeline_versions", "version_id"}, ""))
pattern_PipelineService_GetPipelineVersionTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "pipeline_versions", "version_id", "templates"}, ""))
pattern_PipelineService_UpdatePipelineDefaultVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"apis", "v1beta1", "pipelines", "pipeline_id", "default_version", "version_id"}, ""))
)
var (
forward_PipelineService_CreatePipeline_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetPipeline_0 = runtime.ForwardResponseMessage
forward_PipelineService_ListPipelines_0 = runtime.ForwardResponseMessage
forward_PipelineService_DeletePipeline_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetTemplate_0 = runtime.ForwardResponseMessage
forward_PipelineService_CreatePipelineVersion_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetPipelineVersion_0 = runtime.ForwardResponseMessage
forward_PipelineService_ListPipelineVersions_0 = runtime.ForwardResponseMessage
forward_PipelineService_DeletePipelineVersion_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetPipelineVersionTemplate_0 = runtime.ForwardResponseMessage
forward_PipelineService_UpdatePipelineDefaultVersion_0 = runtime.ForwardResponseMessage
)

View File

@ -1,126 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/pipeline_spec.proto
package go_client
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type PipelineSpec struct {
// Optional input field. The ID of the pipeline user uploaded before.
PipelineId string `protobuf:"bytes,1,opt,name=pipeline_id,json=pipelineId,proto3" json:"pipeline_id,omitempty"`
// Optional output field. The name of the pipeline.
// Not empty if the pipeline id is not empty.
PipelineName string `protobuf:"bytes,5,opt,name=pipeline_name,json=pipelineName,proto3" json:"pipeline_name,omitempty"`
// Optional input field. The marshalled raw argo JSON workflow.
// This will be deprecated when pipeline_manifest is in use.
WorkflowManifest string `protobuf:"bytes,2,opt,name=workflow_manifest,json=workflowManifest,proto3" json:"workflow_manifest,omitempty"`
// Optional input field. The raw pipeline JSON spec.
PipelineManifest string `protobuf:"bytes,3,opt,name=pipeline_manifest,json=pipelineManifest,proto3" json:"pipeline_manifest,omitempty"`
// The parameter user provide to inject to the pipeline JSON.
// If a default value of a parameter exist in the JSON,
// the value user provided here will replace.
Parameters []*Parameter `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PipelineSpec) Reset() { *m = PipelineSpec{} }
func (m *PipelineSpec) String() string { return proto.CompactTextString(m) }
func (*PipelineSpec) ProtoMessage() {}
func (*PipelineSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_d2c55e79247db888, []int{0}
}
func (m *PipelineSpec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PipelineSpec.Unmarshal(m, b)
}
func (m *PipelineSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PipelineSpec.Marshal(b, m, deterministic)
}
func (m *PipelineSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_PipelineSpec.Merge(m, src)
}
func (m *PipelineSpec) XXX_Size() int {
return xxx_messageInfo_PipelineSpec.Size(m)
}
func (m *PipelineSpec) XXX_DiscardUnknown() {
xxx_messageInfo_PipelineSpec.DiscardUnknown(m)
}
var xxx_messageInfo_PipelineSpec proto.InternalMessageInfo
func (m *PipelineSpec) GetPipelineId() string {
if m != nil {
return m.PipelineId
}
return ""
}
func (m *PipelineSpec) GetPipelineName() string {
if m != nil {
return m.PipelineName
}
return ""
}
func (m *PipelineSpec) GetWorkflowManifest() string {
if m != nil {
return m.WorkflowManifest
}
return ""
}
func (m *PipelineSpec) GetPipelineManifest() string {
if m != nil {
return m.PipelineManifest
}
return ""
}
func (m *PipelineSpec) GetParameters() []*Parameter {
if m != nil {
return m.Parameters
}
return nil
}
func init() {
proto.RegisterType((*PipelineSpec)(nil), "api.PipelineSpec")
}
func init() { proto.RegisterFile("backend/api/pipeline_spec.proto", fileDescriptor_d2c55e79247db888) }
var fileDescriptor_d2c55e79247db888 = []byte{
// 236 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0xd0, 0xc1, 0x4a, 0xc3, 0x40,
0x10, 0x06, 0x60, 0x62, 0x54, 0x70, 0x5a, 0x45, 0x73, 0x0a, 0x7a, 0x68, 0xd1, 0x4b, 0x41, 0xd8,
0x80, 0xc5, 0x17, 0xf0, 0xe6, 0x41, 0x29, 0xf5, 0xe6, 0x25, 0x4c, 0x36, 0xd3, 0x3a, 0x24, 0xbb,
0x3b, 0x24, 0x5b, 0xfa, 0xb6, 0x3e, 0x8b, 0x34, 0xed, 0x2e, 0xf1, 0xfa, 0xcf, 0xb7, 0xb3, 0xcc,
0x0f, 0xb3, 0x0a, 0x75, 0x43, 0xb6, 0x2e, 0x50, 0xb8, 0x10, 0x16, 0x6a, 0xd9, 0x52, 0xd9, 0x0b,
0x69, 0x25, 0x9d, 0xf3, 0x2e, 0x4b, 0x51, 0xf8, 0xfe, 0xe1, 0x9f, 0xc2, 0x0e, 0x0d, 0x79, 0xea,
0x8e, 0xe2, 0xf1, 0x37, 0x81, 0xe9, 0xea, 0xf4, 0xf2, 0x4b, 0x48, 0x67, 0x33, 0x98, 0xc4, 0x4d,
0x5c, 0xe7, 0xc9, 0x3c, 0x59, 0x5c, 0xad, 0x21, 0x44, 0xef, 0x75, 0xf6, 0x04, 0xd7, 0x11, 0x58,
0x34, 0x94, 0x5f, 0x0c, 0x64, 0x1a, 0xc2, 0x4f, 0x34, 0x94, 0x3d, 0xc3, 0xdd, 0xde, 0x75, 0xcd,
0xa6, 0x75, 0xfb, 0xd2, 0xa0, 0xe5, 0x0d, 0xf5, 0x3e, 0x3f, 0x1b, 0xe0, 0x6d, 0x18, 0x7c, 0x9c,
0xf2, 0x03, 0x8e, 0x1b, 0x23, 0x4e, 0x8f, 0x38, 0x0c, 0x22, 0x56, 0x00, 0xf1, 0x86, 0x3e, 0x3f,
0x9f, 0xa7, 0x8b, 0xc9, 0xcb, 0x8d, 0x42, 0x61, 0xb5, 0x0a, 0xf1, 0x7a, 0x24, 0xde, 0x5e, 0xbf,
0x97, 0x5b, 0xf6, 0x3f, 0xbb, 0x4a, 0x69, 0x67, 0x8a, 0x66, 0x57, 0xd1, 0xe1, 0xef, 0xd8, 0x56,
0x5f, 0x8c, 0xdb, 0xd9, 0xba, 0x52, 0xb7, 0x4c, 0xd6, 0x57, 0x97, 0x43, 0x3d, 0xcb, 0xbf, 0x00,
0x00, 0x00, 0xff, 0xff, 0xeb, 0x6c, 0x41, 0xaf, 0x63, 0x01, 0x00, 0x00,
}

View File

@ -1,254 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/report.proto
package go_client
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
empty "github.com/golang/protobuf/ptypes/empty"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ReportWorkflowRequest struct {
// Workflow is a workflow custom resource marshalled into a json string.
Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportWorkflowRequest) Reset() { *m = ReportWorkflowRequest{} }
func (m *ReportWorkflowRequest) String() string { return proto.CompactTextString(m) }
func (*ReportWorkflowRequest) ProtoMessage() {}
func (*ReportWorkflowRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cf464d903a9c793e, []int{0}
}
func (m *ReportWorkflowRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReportWorkflowRequest.Unmarshal(m, b)
}
func (m *ReportWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReportWorkflowRequest.Marshal(b, m, deterministic)
}
func (m *ReportWorkflowRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportWorkflowRequest.Merge(m, src)
}
func (m *ReportWorkflowRequest) XXX_Size() int {
return xxx_messageInfo_ReportWorkflowRequest.Size(m)
}
func (m *ReportWorkflowRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReportWorkflowRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReportWorkflowRequest proto.InternalMessageInfo
func (m *ReportWorkflowRequest) GetWorkflow() string {
if m != nil {
return m.Workflow
}
return ""
}
type ReportScheduledWorkflowRequest struct {
// ScheduledWorkflow a ScheduledWorkflow resource marshalled into a json string.
ScheduledWorkflow string `protobuf:"bytes,1,opt,name=scheduled_workflow,json=scheduledWorkflow,proto3" json:"scheduled_workflow,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportScheduledWorkflowRequest) Reset() { *m = ReportScheduledWorkflowRequest{} }
func (m *ReportScheduledWorkflowRequest) String() string { return proto.CompactTextString(m) }
func (*ReportScheduledWorkflowRequest) ProtoMessage() {}
func (*ReportScheduledWorkflowRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cf464d903a9c793e, []int{1}
}
func (m *ReportScheduledWorkflowRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReportScheduledWorkflowRequest.Unmarshal(m, b)
}
func (m *ReportScheduledWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReportScheduledWorkflowRequest.Marshal(b, m, deterministic)
}
func (m *ReportScheduledWorkflowRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportScheduledWorkflowRequest.Merge(m, src)
}
func (m *ReportScheduledWorkflowRequest) XXX_Size() int {
return xxx_messageInfo_ReportScheduledWorkflowRequest.Size(m)
}
func (m *ReportScheduledWorkflowRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReportScheduledWorkflowRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReportScheduledWorkflowRequest proto.InternalMessageInfo
func (m *ReportScheduledWorkflowRequest) GetScheduledWorkflow() string {
if m != nil {
return m.ScheduledWorkflow
}
return ""
}
func init() {
proto.RegisterType((*ReportWorkflowRequest)(nil), "api.ReportWorkflowRequest")
proto.RegisterType((*ReportScheduledWorkflowRequest)(nil), "api.ReportScheduledWorkflowRequest")
}
func init() { proto.RegisterFile("backend/api/report.proto", fileDescriptor_cf464d903a9c793e) }
var fileDescriptor_cf464d903a9c793e = []byte{
// 310 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x4a, 0xc3, 0x40,
0x10, 0x87, 0x69, 0x05, 0xd1, 0x05, 0x05, 0x17, 0xb4, 0x25, 0x8a, 0x94, 0xf4, 0xa2, 0x07, 0x77,
0xa9, 0x45, 0x0f, 0xe2, 0x49, 0xf0, 0x2c, 0xd4, 0x83, 0xe0, 0xa5, 0xec, 0xa6, 0xd3, 0x74, 0x69,
0xba, 0xb3, 0x66, 0x37, 0x2d, 0x5e, 0x7d, 0x05, 0x05, 0x1f, 0xcc, 0x57, 0xf0, 0x41, 0x24, 0x7f,
0x6d, 0x43, 0x73, 0x9c, 0xf9, 0x65, 0x26, 0xdf, 0x37, 0x2c, 0xe9, 0x4a, 0x11, 0xcc, 0x41, 0x4f,
0xb8, 0x30, 0x8a, 0xc7, 0x60, 0x30, 0x76, 0xcc, 0xc4, 0xe8, 0x90, 0xee, 0x08, 0xa3, 0xbc, 0xb3,
0x10, 0x31, 0x8c, 0x20, 0x4b, 0x85, 0xd6, 0xe8, 0x84, 0x53, 0xa8, 0x6d, 0xfe, 0x89, 0x77, 0x5a,
0xa4, 0x59, 0x25, 0x93, 0x29, 0x87, 0x85, 0x71, 0xef, 0x79, 0xe8, 0x0f, 0xc9, 0xf1, 0x28, 0xdb,
0xf7, 0x82, 0xf1, 0x7c, 0x1a, 0xe1, 0x6a, 0x04, 0x6f, 0x09, 0x58, 0x47, 0x3d, 0xb2, 0xb7, 0x2a,
0x5a, 0xdd, 0x56, 0xaf, 0x75, 0xb1, 0x3f, 0xaa, 0x6a, 0xff, 0x89, 0x9c, 0xe7, 0x43, 0xcf, 0xc1,
0x0c, 0x26, 0x49, 0x04, 0x93, 0xfa, 0xf4, 0x15, 0xa1, 0xb6, 0xcc, 0xc6, 0xb5, 0x3d, 0x47, 0xb6,
0x3e, 0x75, 0xfd, 0xdd, 0x26, 0x07, 0xc5, 0x46, 0x88, 0x97, 0x2a, 0x00, 0x8a, 0xe4, 0x70, 0x93,
0x8b, 0x7a, 0x4c, 0x18, 0xc5, 0xb6, 0xc2, 0x7a, 0x27, 0x2c, 0x77, 0x64, 0xa5, 0x23, 0x7b, 0x4c,
0x1d, 0xfd, 0xcb, 0x8f, 0x9f, 0xdf, 0xcf, 0x76, 0xdf, 0xef, 0xa4, 0xa7, 0xb1, 0x7c, 0x39, 0x90,
0xe0, 0xc4, 0x80, 0x97, 0x40, 0xf6, 0xae, 0x72, 0xa2, 0x5f, 0x2d, 0xd2, 0x69, 0x90, 0xa2, 0xfd,
0xb5, 0x5f, 0x37, 0x29, 0x37, 0x32, 0xdc, 0x67, 0x0c, 0xb7, 0x7e, 0x6f, 0x93, 0xa1, 0x3a, 0xc2,
0x3f, 0xcc, 0x96, 0x93, 0x3d, 0xdc, 0xbc, 0x0e, 0x43, 0xe5, 0x66, 0x89, 0x64, 0x01, 0x2e, 0xf8,
0x3c, 0x91, 0x90, 0xb6, 0xb9, 0x51, 0x06, 0x22, 0xa5, 0xc1, 0xf2, 0xf5, 0x97, 0x11, 0xe2, 0x38,
0x88, 0x14, 0x68, 0x27, 0x77, 0x33, 0x88, 0xe1, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0xe5,
0x5d, 0x89, 0x39, 0x02, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ReportServiceClient is the client API for ReportService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ReportServiceClient interface {
ReportWorkflow(ctx context.Context, in *ReportWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error)
ReportScheduledWorkflow(ctx context.Context, in *ReportScheduledWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}
type reportServiceClient struct {
cc *grpc.ClientConn
}
func NewReportServiceClient(cc *grpc.ClientConn) ReportServiceClient {
return &reportServiceClient{cc}
}
func (c *reportServiceClient) ReportWorkflow(ctx context.Context, in *ReportWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ReportService/ReportWorkflow", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *reportServiceClient) ReportScheduledWorkflow(ctx context.Context, in *ReportScheduledWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ReportService/ReportScheduledWorkflow", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ReportServiceServer is the server API for ReportService service.
type ReportServiceServer interface {
ReportWorkflow(context.Context, *ReportWorkflowRequest) (*empty.Empty, error)
ReportScheduledWorkflow(context.Context, *ReportScheduledWorkflowRequest) (*empty.Empty, error)
}
// UnimplementedReportServiceServer can be embedded to have forward compatible implementations.
type UnimplementedReportServiceServer struct {
}
func (*UnimplementedReportServiceServer) ReportWorkflow(ctx context.Context, req *ReportWorkflowRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReportWorkflow not implemented")
}
func (*UnimplementedReportServiceServer) ReportScheduledWorkflow(ctx context.Context, req *ReportScheduledWorkflowRequest) (*empty.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReportScheduledWorkflow not implemented")
}
func RegisterReportServiceServer(s *grpc.Server, srv ReportServiceServer) {
s.RegisterService(&_ReportService_serviceDesc, srv)
}
func _ReportService_ReportWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportWorkflowRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportServiceServer).ReportWorkflow(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ReportService/ReportWorkflow",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportServiceServer).ReportWorkflow(ctx, req.(*ReportWorkflowRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ReportService_ReportScheduledWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportScheduledWorkflowRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportServiceServer).ReportScheduledWorkflow(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ReportService/ReportScheduledWorkflow",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportServiceServer).ReportScheduledWorkflow(ctx, req.(*ReportScheduledWorkflowRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.ReportService",
HandlerType: (*ReportServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportWorkflow",
Handler: _ReportService_ReportWorkflow_Handler,
},
{
MethodName: "ReportScheduledWorkflow",
Handler: _ReportService_ReportScheduledWorkflow_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/report.proto",
}

View File

@ -1,156 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/report.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_ReportService_ReportWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, client ReportServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ReportWorkflow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ReportService_ReportScheduledWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, client ReportServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportScheduledWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.ScheduledWorkflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ReportScheduledWorkflow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterReportServiceHandlerFromEndpoint is same as RegisterReportServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterReportServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterReportServiceHandler(ctx, mux, conn)
}
// RegisterReportServiceHandler registers the http handlers for service ReportService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterReportServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterReportServiceHandlerClient(ctx, mux, NewReportServiceClient(conn))
}
// RegisterReportServiceHandlerClient registers the http handlers for service ReportService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ReportServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ReportServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ReportServiceClient" to call the correct interceptors.
func RegisterReportServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ReportServiceClient) error {
mux.Handle("POST", pattern_ReportService_ReportWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ReportService_ReportWorkflow_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportWorkflow_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ReportService_ReportScheduledWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ReportService_ReportScheduledWorkflow_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportScheduledWorkflow_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_ReportService_ReportWorkflow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "workflows"}, ""))
pattern_ReportService_ReportScheduledWorkflow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "scheduledworkflows"}, ""))
)
var (
forward_ReportService_ReportWorkflow_0 = runtime.ForwardResponseMessage
forward_ReportService_ReportScheduledWorkflow_0 = runtime.ForwardResponseMessage
)

View File

@ -1,230 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/resource_reference.proto
package go_client
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type ResourceType int32
const (
ResourceType_UNKNOWN_RESOURCE_TYPE ResourceType = 0
ResourceType_EXPERIMENT ResourceType = 1
ResourceType_JOB ResourceType = 2
ResourceType_PIPELINE ResourceType = 3
ResourceType_PIPELINE_VERSION ResourceType = 4
ResourceType_NAMESPACE ResourceType = 5
)
var ResourceType_name = map[int32]string{
0: "UNKNOWN_RESOURCE_TYPE",
1: "EXPERIMENT",
2: "JOB",
3: "PIPELINE",
4: "PIPELINE_VERSION",
5: "NAMESPACE",
}
var ResourceType_value = map[string]int32{
"UNKNOWN_RESOURCE_TYPE": 0,
"EXPERIMENT": 1,
"JOB": 2,
"PIPELINE": 3,
"PIPELINE_VERSION": 4,
"NAMESPACE": 5,
}
func (x ResourceType) String() string {
return proto.EnumName(ResourceType_name, int32(x))
}
func (ResourceType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_465b210f45a041b3, []int{0}
}
type Relationship int32
const (
Relationship_UNKNOWN_RELATIONSHIP Relationship = 0
Relationship_OWNER Relationship = 1
Relationship_CREATOR Relationship = 2
)
var Relationship_name = map[int32]string{
0: "UNKNOWN_RELATIONSHIP",
1: "OWNER",
2: "CREATOR",
}
var Relationship_value = map[string]int32{
"UNKNOWN_RELATIONSHIP": 0,
"OWNER": 1,
"CREATOR": 2,
}
func (x Relationship) String() string {
return proto.EnumName(Relationship_name, int32(x))
}
func (Relationship) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_465b210f45a041b3, []int{1}
}
type ResourceKey struct {
// The type of the resource that referred to.
Type ResourceType `protobuf:"varint,1,opt,name=type,proto3,enum=api.ResourceType" json:"type,omitempty"`
// The ID of the resource that referred to.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResourceKey) Reset() { *m = ResourceKey{} }
func (m *ResourceKey) String() string { return proto.CompactTextString(m) }
func (*ResourceKey) ProtoMessage() {}
func (*ResourceKey) Descriptor() ([]byte, []int) {
return fileDescriptor_465b210f45a041b3, []int{0}
}
func (m *ResourceKey) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResourceKey.Unmarshal(m, b)
}
func (m *ResourceKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ResourceKey.Marshal(b, m, deterministic)
}
func (m *ResourceKey) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceKey.Merge(m, src)
}
func (m *ResourceKey) XXX_Size() int {
return xxx_messageInfo_ResourceKey.Size(m)
}
func (m *ResourceKey) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceKey.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceKey proto.InternalMessageInfo
func (m *ResourceKey) GetType() ResourceType {
if m != nil {
return m.Type
}
return ResourceType_UNKNOWN_RESOURCE_TYPE
}
func (m *ResourceKey) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type ResourceReference struct {
Key *ResourceKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
// The name of the resource that referred to.
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// Required field. The relationship from referred resource to the object.
Relationship Relationship `protobuf:"varint,2,opt,name=relationship,proto3,enum=api.Relationship" json:"relationship,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResourceReference) Reset() { *m = ResourceReference{} }
func (m *ResourceReference) String() string { return proto.CompactTextString(m) }
func (*ResourceReference) ProtoMessage() {}
func (*ResourceReference) Descriptor() ([]byte, []int) {
return fileDescriptor_465b210f45a041b3, []int{1}
}
func (m *ResourceReference) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResourceReference.Unmarshal(m, b)
}
func (m *ResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ResourceReference.Marshal(b, m, deterministic)
}
func (m *ResourceReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceReference.Merge(m, src)
}
func (m *ResourceReference) XXX_Size() int {
return xxx_messageInfo_ResourceReference.Size(m)
}
func (m *ResourceReference) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceReference.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceReference proto.InternalMessageInfo
func (m *ResourceReference) GetKey() *ResourceKey {
if m != nil {
return m.Key
}
return nil
}
func (m *ResourceReference) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ResourceReference) GetRelationship() Relationship {
if m != nil {
return m.Relationship
}
return Relationship_UNKNOWN_RELATIONSHIP
}
func init() {
proto.RegisterEnum("api.ResourceType", ResourceType_name, ResourceType_value)
proto.RegisterEnum("api.Relationship", Relationship_name, Relationship_value)
proto.RegisterType((*ResourceKey)(nil), "api.ResourceKey")
proto.RegisterType((*ResourceReference)(nil), "api.ResourceReference")
}
func init() {
proto.RegisterFile("backend/api/resource_reference.proto", fileDescriptor_465b210f45a041b3)
}
var fileDescriptor_465b210f45a041b3 = []byte{
// 366 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x6b, 0x9c, 0x40,
0x14, 0xc6, 0xa3, 0x6e, 0x9a, 0xee, 0xdb, 0xed, 0x32, 0x79, 0xa4, 0x60, 0x6f, 0x61, 0x69, 0x21,
0xe4, 0xa0, 0x90, 0x90, 0x7b, 0xcd, 0x76, 0xa0, 0x76, 0x93, 0x51, 0x46, 0xd3, 0xb4, 0xbd, 0x88,
0xba, 0x2f, 0xc9, 0xb0, 0x46, 0x07, 0x57, 0x29, 0x5e, 0xfb, 0x97, 0x97, 0x48, 0xc4, 0xec, 0x6d,
0x86, 0xdf, 0xc7, 0xf7, 0xfb, 0xe0, 0xc1, 0xe7, 0x2c, 0xcd, 0xb7, 0x54, 0x6e, 0xdc, 0x54, 0x2b,
0xb7, 0xa6, 0x5d, 0xd5, 0xd6, 0x39, 0x25, 0x35, 0x3d, 0x50, 0x4d, 0x65, 0x4e, 0x8e, 0xae, 0xab,
0xa6, 0x42, 0x2b, 0xd5, 0x6a, 0xf9, 0x0d, 0x66, 0xf2, 0x35, 0xb0, 0xa6, 0x0e, 0xbf, 0xc0, 0xa4,
0xe9, 0x34, 0xd9, 0xc6, 0xa9, 0x71, 0xb6, 0xb8, 0x38, 0x76, 0x52, 0xad, 0x9c, 0x81, 0xc7, 0x9d,
0x26, 0xd9, 0x63, 0x5c, 0x80, 0xa9, 0x36, 0xb6, 0x79, 0x6a, 0x9c, 0x4d, 0xa5, 0xa9, 0x36, 0xcb,
0x7f, 0x06, 0x1c, 0x0f, 0x31, 0x39, 0x68, 0x70, 0x09, 0xd6, 0x96, 0xba, 0xbe, 0x6b, 0x76, 0xc1,
0xf6, 0xba, 0xd6, 0xd4, 0xc9, 0x17, 0x88, 0x08, 0x93, 0x32, 0x7d, 0x26, 0xdb, 0xea, 0xbb, 0xfa,
0x37, 0x5e, 0xc1, 0xbc, 0xa6, 0x22, 0x6d, 0x54, 0x55, 0xee, 0x9e, 0x94, 0xee, 0x3d, 0xe3, 0x98,
0x11, 0xc8, 0xbd, 0xd8, 0x79, 0x0b, 0xf3, 0xb7, 0x53, 0xf1, 0x13, 0x7c, 0xbc, 0x13, 0x6b, 0x11,
0xdc, 0x8b, 0x44, 0xf2, 0x28, 0xb8, 0x93, 0x2b, 0x9e, 0xc4, 0xbf, 0x43, 0xce, 0x0e, 0x70, 0x01,
0xc0, 0x7f, 0x85, 0x5c, 0xfa, 0xb7, 0x5c, 0xc4, 0xcc, 0xc0, 0x23, 0xb0, 0x7e, 0x04, 0xd7, 0xcc,
0xc4, 0x39, 0xbc, 0x0f, 0xfd, 0x90, 0xdf, 0xf8, 0x82, 0x33, 0x0b, 0x4f, 0x80, 0x0d, 0xbf, 0xe4,
0x27, 0x97, 0x91, 0x1f, 0x08, 0x36, 0xc1, 0x0f, 0x30, 0x15, 0xde, 0x2d, 0x8f, 0x42, 0x6f, 0xc5,
0xd9, 0xe1, 0xf9, 0xd7, 0x17, 0xed, 0x38, 0x03, 0x6d, 0x38, 0x19, 0xb5, 0x37, 0x5e, 0xec, 0x07,
0x22, 0xfa, 0xee, 0x87, 0xec, 0x00, 0xa7, 0x70, 0x18, 0xdc, 0x0b, 0x2e, 0x99, 0x81, 0x33, 0x38,
0x5a, 0x49, 0xee, 0xc5, 0x81, 0x64, 0xe6, 0xf5, 0xd5, 0x9f, 0xcb, 0x47, 0xd5, 0x3c, 0xb5, 0x99,
0x93, 0x57, 0xcf, 0xee, 0xb6, 0xcd, 0xe8, 0xa1, 0xa8, 0xfe, 0xba, 0x5a, 0x69, 0x2a, 0x54, 0x49,
0x3b, 0xf7, 0xed, 0x39, 0x1f, 0xab, 0x24, 0x2f, 0x14, 0x95, 0x4d, 0xf6, 0xae, 0x3f, 0xe3, 0xe5,
0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x58, 0x92, 0x1b, 0xee, 0x01, 0x00, 0x00,
}

File diff suppressed because it is too large Load Diff

View File

@ -1,594 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/run.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_RunService_CreateRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateRunRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Run); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_GetRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.GetRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_RunService_ListRuns_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_RunService_ListRuns_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListRunsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_RunService_ListRuns_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListRuns(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_ArchiveRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ArchiveRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.ArchiveRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_UnarchiveRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq UnarchiveRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.UnarchiveRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_DeleteRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_ReportRunMetrics_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportRunMetricsRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.ReportRunMetrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_ReadArtifact_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReadArtifactRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
val, ok = pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
val, ok = pathParams["artifact_name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_name")
}
protoReq.ArtifactName, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_name", err)
}
msg, err := client.ReadArtifact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_TerminateRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq TerminateRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.TerminateRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_RetryRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq RetryRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.RetryRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterRunServiceHandlerFromEndpoint is same as RegisterRunServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterRunServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterRunServiceHandler(ctx, mux, conn)
}
// RegisterRunServiceHandler registers the http handlers for service RunService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterRunServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterRunServiceHandlerClient(ctx, mux, NewRunServiceClient(conn))
}
// RegisterRunServiceHandlerClient registers the http handlers for service RunService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "RunServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "RunServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "RunServiceClient" to call the correct interceptors.
func RegisterRunServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client RunServiceClient) error {
mux.Handle("POST", pattern_RunService_CreateRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_CreateRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_CreateRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_RunService_GetRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_GetRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_GetRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_RunService_ListRuns_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ListRuns_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ListRuns_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_ArchiveRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ArchiveRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ArchiveRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_UnarchiveRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_UnarchiveRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_UnarchiveRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_RunService_DeleteRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_DeleteRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_DeleteRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_ReportRunMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ReportRunMetrics_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ReportRunMetrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_RunService_ReadArtifact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ReadArtifact_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ReadArtifact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_TerminateRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_TerminateRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_TerminateRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_RetryRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_RetryRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_RetryRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_RunService_CreateRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "runs"}, ""))
pattern_RunService_GetRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "run_id"}, ""))
pattern_RunService_ListRuns_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "runs"}, ""))
pattern_RunService_ArchiveRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "id"}, "archive"))
pattern_RunService_UnarchiveRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "id"}, "unarchive"))
pattern_RunService_DeleteRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "id"}, ""))
pattern_RunService_ReportRunMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "run_id"}, "reportMetrics"))
pattern_RunService_ReadArtifact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"apis", "v1beta1", "runs", "run_id", "nodes", "node_id", "artifacts", "artifact_name"}, "read"))
pattern_RunService_TerminateRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "runs", "run_id", "terminate"}, ""))
pattern_RunService_RetryRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "runs", "run_id", "retry"}, ""))
)
var (
forward_RunService_CreateRun_0 = runtime.ForwardResponseMessage
forward_RunService_GetRun_0 = runtime.ForwardResponseMessage
forward_RunService_ListRuns_0 = runtime.ForwardResponseMessage
forward_RunService_ArchiveRun_0 = runtime.ForwardResponseMessage
forward_RunService_UnarchiveRun_0 = runtime.ForwardResponseMessage
forward_RunService_DeleteRun_0 = runtime.ForwardResponseMessage
forward_RunService_ReportRunMetrics_0 = runtime.ForwardResponseMessage
forward_RunService_ReadArtifact_0 = runtime.ForwardResponseMessage
forward_RunService_TerminateRun_0 = runtime.ForwardResponseMessage
forward_RunService_RetryRun_0 = runtime.ForwardResponseMessage
)

View File

@ -1,319 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/visualization.proto
package go_client
import (
context "context"
fmt "fmt"
proto "github.com/golang/protobuf/proto"
_ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Type of visualization to be generated.
// This is required when creating the pipeline through CreateVisualization
// API.
type Visualization_Type int32
const (
Visualization_ROC_CURVE Visualization_Type = 0
Visualization_TFDV Visualization_Type = 1
Visualization_TFMA Visualization_Type = 2
Visualization_TABLE Visualization_Type = 3
Visualization_CUSTOM Visualization_Type = 4
)
var Visualization_Type_name = map[int32]string{
0: "ROC_CURVE",
1: "TFDV",
2: "TFMA",
3: "TABLE",
4: "CUSTOM",
}
var Visualization_Type_value = map[string]int32{
"ROC_CURVE": 0,
"TFDV": 1,
"TFMA": 2,
"TABLE": 3,
"CUSTOM": 4,
}
func (x Visualization_Type) String() string {
return proto.EnumName(Visualization_Type_name, int32(x))
}
func (Visualization_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_e22487dd6aa91c3b, []int{1, 0}
}
// Create visualization by providing the type of visualization that is desired
// and input data paths. Input dat paths are assumed to be unique and are used
// for determining output path.
type CreateVisualizationRequest struct {
Visualization *Visualization `protobuf:"bytes,1,opt,name=visualization,proto3" json:"visualization,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateVisualizationRequest) Reset() { *m = CreateVisualizationRequest{} }
func (m *CreateVisualizationRequest) String() string { return proto.CompactTextString(m) }
func (*CreateVisualizationRequest) ProtoMessage() {}
func (*CreateVisualizationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_e22487dd6aa91c3b, []int{0}
}
func (m *CreateVisualizationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateVisualizationRequest.Unmarshal(m, b)
}
func (m *CreateVisualizationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateVisualizationRequest.Marshal(b, m, deterministic)
}
func (m *CreateVisualizationRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateVisualizationRequest.Merge(m, src)
}
func (m *CreateVisualizationRequest) XXX_Size() int {
return xxx_messageInfo_CreateVisualizationRequest.Size(m)
}
func (m *CreateVisualizationRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateVisualizationRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateVisualizationRequest proto.InternalMessageInfo
func (m *CreateVisualizationRequest) GetVisualization() *Visualization {
if m != nil {
return m.Visualization
}
return nil
}
func (m *CreateVisualizationRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
type Visualization struct {
Type Visualization_Type `protobuf:"varint,1,opt,name=type,proto3,enum=api.Visualization_Type" json:"type,omitempty"`
// Path pattern of input data to be used during generation of visualizations.
// This is required when creating the pipeline through CreateVisualization
// API.
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
// Variables to be used during generation of a visualization.
// This should be provided as a JSON string.
// This is required when creating the pipeline through CreateVisualization
// API.
Arguments string `protobuf:"bytes,3,opt,name=arguments,proto3" json:"arguments,omitempty"`
// Output. Generated visualization html.
Html string `protobuf:"bytes,4,opt,name=html,proto3" json:"html,omitempty"`
// In case any error happens when generating visualizations, only
// visualization ID and the error message are returned. Client has the
// flexibility of choosing how to handle the error.
Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Visualization) Reset() { *m = Visualization{} }
func (m *Visualization) String() string { return proto.CompactTextString(m) }
func (*Visualization) ProtoMessage() {}
func (*Visualization) Descriptor() ([]byte, []int) {
return fileDescriptor_e22487dd6aa91c3b, []int{1}
}
func (m *Visualization) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Visualization.Unmarshal(m, b)
}
func (m *Visualization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Visualization.Marshal(b, m, deterministic)
}
func (m *Visualization) XXX_Merge(src proto.Message) {
xxx_messageInfo_Visualization.Merge(m, src)
}
func (m *Visualization) XXX_Size() int {
return xxx_messageInfo_Visualization.Size(m)
}
func (m *Visualization) XXX_DiscardUnknown() {
xxx_messageInfo_Visualization.DiscardUnknown(m)
}
var xxx_messageInfo_Visualization proto.InternalMessageInfo
func (m *Visualization) GetType() Visualization_Type {
if m != nil {
return m.Type
}
return Visualization_ROC_CURVE
}
func (m *Visualization) GetSource() string {
if m != nil {
return m.Source
}
return ""
}
func (m *Visualization) GetArguments() string {
if m != nil {
return m.Arguments
}
return ""
}
func (m *Visualization) GetHtml() string {
if m != nil {
return m.Html
}
return ""
}
func (m *Visualization) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func init() {
proto.RegisterEnum("api.Visualization_Type", Visualization_Type_name, Visualization_Type_value)
proto.RegisterType((*CreateVisualizationRequest)(nil), "api.CreateVisualizationRequest")
proto.RegisterType((*Visualization)(nil), "api.Visualization")
}
func init() { proto.RegisterFile("backend/api/visualization.proto", fileDescriptor_e22487dd6aa91c3b) }
var fileDescriptor_e22487dd6aa91c3b = []byte{
// 482 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0xad, 0x13, 0x27, 0x34, 0x53, 0x02, 0x61, 0x5b, 0x51, 0xcb, 0x0a, 0x4a, 0xe4, 0x53, 0x24,
0xa8, 0xad, 0xa6, 0x17, 0xe0, 0x02, 0x49, 0x28, 0x27, 0xa2, 0x4a, 0x4e, 0x9a, 0x43, 0x2f, 0xd5,
0xda, 0x9d, 0x3a, 0xab, 0x3a, 0xde, 0x65, 0x77, 0x9d, 0xaa, 0x20, 0x2e, 0x48, 0x9c, 0xb8, 0xc1,
0x2f, 0xf0, 0x47, 0xdc, 0x39, 0xf1, 0x21, 0x28, 0x9b, 0x28, 0xc4, 0x6a, 0x7b, 0xf2, 0xee, 0xec,
0x9b, 0xf7, 0xc6, 0x6f, 0x1e, 0xb4, 0x22, 0x1a, 0x5f, 0x61, 0x76, 0x11, 0x50, 0xc1, 0x82, 0x39,
0x53, 0x39, 0x4d, 0xd9, 0x27, 0xaa, 0x19, 0xcf, 0x7c, 0x21, 0xb9, 0xe6, 0xa4, 0x4c, 0x05, 0x73,
0x9b, 0x09, 0xe7, 0x49, 0x8a, 0x06, 0x44, 0xb3, 0x8c, 0x6b, 0x83, 0x50, 0x4b, 0x88, 0xbb, 0xbf,
0xc9, 0x81, 0x52, 0x72, 0xb9, 0x7a, 0x78, 0x61, 0x3e, 0xf1, 0x41, 0x82, 0xd9, 0x81, 0xba, 0xa6,
0x49, 0x82, 0x32, 0xe0, 0xc2, 0xb4, 0xde, 0xa6, 0xf1, 0x34, 0xb8, 0x03, 0x89, 0x54, 0xe3, 0x64,
0x73, 0x8c, 0x10, 0x3f, 0xe6, 0xa8, 0x34, 0x79, 0x09, 0xf5, 0xc2, 0x78, 0x8e, 0xd5, 0xb6, 0x3a,
0x3b, 0x5d, 0xe2, 0x53, 0xc1, 0xfc, 0x62, 0x47, 0x11, 0x48, 0x9a, 0x50, 0xcb, 0xe8, 0x0c, 0x95,
0xa0, 0x31, 0x3a, 0xa5, 0xb6, 0xd5, 0xa9, 0x85, 0xff, 0x0b, 0xde, 0x1f, 0x0b, 0xea, 0x85, 0x76,
0xf2, 0x1c, 0x6c, 0x7d, 0x23, 0xd0, 0x08, 0x3c, 0xea, 0xee, 0xdf, 0x16, 0xf0, 0xc7, 0x37, 0x02,
0x43, 0x03, 0x22, 0x4f, 0xa1, 0xaa, 0x78, 0x2e, 0xd7, 0xcc, 0xab, 0xdb, 0x42, 0x94, 0xca, 0x24,
0x9f, 0x61, 0xa6, 0x95, 0x53, 0x5e, 0x8a, 0xae, 0x0b, 0x84, 0x80, 0x3d, 0xd5, 0xb3, 0xd4, 0xb1,
0xcd, 0x83, 0x39, 0x93, 0x3d, 0xa8, 0x18, 0xef, 0x9c, 0x8a, 0x29, 0x2e, 0x2f, 0xde, 0x5b, 0xb0,
0x17, 0x6a, 0xa4, 0x0e, 0xb5, 0xf0, 0x64, 0x70, 0x3e, 0x38, 0x0d, 0x27, 0xc7, 0x8d, 0x2d, 0xb2,
0x0d, 0xf6, 0xf8, 0xfd, 0xbb, 0x49, 0xc3, 0x5a, 0x9e, 0x86, 0xbd, 0x46, 0x89, 0xd4, 0xa0, 0x32,
0xee, 0xf5, 0x3f, 0x1c, 0x37, 0xca, 0x04, 0xa0, 0x3a, 0x38, 0x1d, 0x8d, 0x4f, 0x86, 0x0d, 0xbb,
0xfb, 0xcb, 0x82, 0xbd, 0xc2, 0xf8, 0x23, 0x94, 0x73, 0x16, 0x23, 0xf9, 0x6e, 0xc1, 0xee, 0x1d,
0x86, 0x93, 0x96, 0xf9, 0xe3, 0xfb, 0x57, 0xe1, 0xde, 0xe1, 0xb9, 0xf7, 0xe6, 0xeb, 0xef, 0xbf,
0x3f, 0x4b, 0xaf, 0xbc, 0xce, 0x22, 0x04, 0x2a, 0x98, 0x1f, 0x46, 0xa8, 0xe9, 0x61, 0x31, 0x51,
0x2a, 0xf8, 0xbc, 0xf6, 0xfd, 0xcb, 0xeb, 0xe2, 0x96, 0xfa, 0xdf, 0xac, 0x1f, 0xbd, 0x61, 0xd8,
0x84, 0x07, 0x17, 0x78, 0x49, 0xf3, 0x54, 0x93, 0x27, 0xe4, 0x31, 0xd4, 0xdd, 0x1d, 0xa3, 0x35,
0xd2, 0x54, 0xe7, 0xea, 0xac, 0x05, 0xcf, 0xa0, 0xda, 0x47, 0x2a, 0x51, 0x92, 0xdd, 0xed, 0x92,
0x5b, 0xa7, 0xb9, 0x9e, 0x72, 0xb9, 0xa2, 0x68, 0x97, 0xa2, 0x87, 0x00, 0x6b, 0xc0, 0xd6, 0xd9,
0x51, 0xc2, 0xf4, 0x34, 0x8f, 0xfc, 0x98, 0xcf, 0x82, 0xab, 0x3c, 0xc2, 0xcb, 0x94, 0x5f, 0x07,
0x82, 0x09, 0x4c, 0x59, 0x86, 0x2a, 0xd8, 0x4c, 0x6d, 0xc2, 0xcf, 0xe3, 0x94, 0x61, 0xa6, 0xa3,
0xaa, 0x09, 0xe3, 0xd1, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x50, 0x9c, 0xbc, 0x19, 0x03,
0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// VisualizationServiceClient is the client API for VisualizationService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type VisualizationServiceClient interface {
CreateVisualization(ctx context.Context, in *CreateVisualizationRequest, opts ...grpc.CallOption) (*Visualization, error)
}
type visualizationServiceClient struct {
cc *grpc.ClientConn
}
func NewVisualizationServiceClient(cc *grpc.ClientConn) VisualizationServiceClient {
return &visualizationServiceClient{cc}
}
func (c *visualizationServiceClient) CreateVisualization(ctx context.Context, in *CreateVisualizationRequest, opts ...grpc.CallOption) (*Visualization, error) {
out := new(Visualization)
err := c.cc.Invoke(ctx, "/api.VisualizationService/CreateVisualization", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// VisualizationServiceServer is the server API for VisualizationService service.
type VisualizationServiceServer interface {
CreateVisualization(context.Context, *CreateVisualizationRequest) (*Visualization, error)
}
// UnimplementedVisualizationServiceServer can be embedded to have forward compatible implementations.
type UnimplementedVisualizationServiceServer struct {
}
func (*UnimplementedVisualizationServiceServer) CreateVisualization(ctx context.Context, req *CreateVisualizationRequest) (*Visualization, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateVisualization not implemented")
}
func RegisterVisualizationServiceServer(s *grpc.Server, srv VisualizationServiceServer) {
s.RegisterService(&_VisualizationService_serviceDesc, srv)
}
func _VisualizationService_CreateVisualization_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateVisualizationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(VisualizationServiceServer).CreateVisualization(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.VisualizationService/CreateVisualization",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VisualizationServiceServer).CreateVisualization(ctx, req.(*CreateVisualizationRequest))
}
return interceptor(ctx, in, info, handler)
}
var _VisualizationService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.VisualizationService",
HandlerType: (*VisualizationServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateVisualization",
Handler: _VisualizationService_CreateVisualization_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/visualization.proto",
}

View File

@ -1,133 +0,0 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/visualization.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_VisualizationService_CreateVisualization_0(ctx context.Context, marshaler runtime.Marshaler, client VisualizationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateVisualizationRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Visualization); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["namespace"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace")
}
protoReq.Namespace, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err)
}
msg, err := client.CreateVisualization(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterVisualizationServiceHandlerFromEndpoint is same as RegisterVisualizationServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterVisualizationServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterVisualizationServiceHandler(ctx, mux, conn)
}
// RegisterVisualizationServiceHandler registers the http handlers for service VisualizationService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterVisualizationServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterVisualizationServiceHandlerClient(ctx, mux, NewVisualizationServiceClient(conn))
}
// RegisterVisualizationServiceHandlerClient registers the http handlers for service VisualizationService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "VisualizationServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "VisualizationServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "VisualizationServiceClient" to call the correct interceptors.
func RegisterVisualizationServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client VisualizationServiceClient) error {
mux.Handle("POST", pattern_VisualizationService_CreateVisualization_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_VisualizationService_CreateVisualization_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_VisualizationService_CreateVisualization_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_VisualizationService_CreateVisualization_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "visualizations", "namespace"}, ""))
)
var (
forward_VisualizationService_CreateVisualization_0 = runtime.ForwardResponseMessage
)

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewArchiveExperimentParams creates a new ArchiveExperimentParams object
// with the default values initialized.
func NewArchiveExperimentParams() *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewArchiveExperimentParamsWithTimeout creates a new ArchiveExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewArchiveExperimentParamsWithTimeout(timeout time.Duration) *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
timeout: timeout,
}
}
// NewArchiveExperimentParamsWithContext creates a new ArchiveExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewArchiveExperimentParamsWithContext(ctx context.Context) *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
Context: ctx,
}
}
// NewArchiveExperimentParamsWithHTTPClient creates a new ArchiveExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewArchiveExperimentParamsWithHTTPClient(client *http.Client) *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
HTTPClient: client,
}
}
/*ArchiveExperimentParams contains all the parameters to send to the API endpoint
for the archive experiment operation typically these are written to a http.Request
*/
type ArchiveExperimentParams struct {
/*ID
The ID of the experiment to be archived.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the archive experiment params
func (o *ArchiveExperimentParams) WithTimeout(timeout time.Duration) *ArchiveExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the archive experiment params
func (o *ArchiveExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the archive experiment params
func (o *ArchiveExperimentParams) WithContext(ctx context.Context) *ArchiveExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the archive experiment params
func (o *ArchiveExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the archive experiment params
func (o *ArchiveExperimentParams) WithHTTPClient(client *http.Client) *ArchiveExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the archive experiment params
func (o *ArchiveExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the archive experiment params
func (o *ArchiveExperimentParams) WithID(id string) *ArchiveExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the archive experiment params
func (o *ArchiveExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *ArchiveExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,110 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// ArchiveExperimentReader is a Reader for the ArchiveExperiment structure.
type ArchiveExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *ArchiveExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewArchiveExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewArchiveExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewArchiveExperimentOK creates a ArchiveExperimentOK with default headers values
func NewArchiveExperimentOK() *ArchiveExperimentOK {
return &ArchiveExperimentOK{}
}
/*ArchiveExperimentOK handles this case with default header values.
A successful response.
*/
type ArchiveExperimentOK struct {
Payload interface{}
}
func (o *ArchiveExperimentOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:archive][%d] archiveExperimentOK %+v", 200, o.Payload)
}
func (o *ArchiveExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewArchiveExperimentDefault creates a ArchiveExperimentDefault with default headers values
func NewArchiveExperimentDefault(code int) *ArchiveExperimentDefault {
return &ArchiveExperimentDefault{
_statusCode: code,
}
}
/*ArchiveExperimentDefault handles this case with default header values.
ArchiveExperimentDefault archive experiment default
*/
type ArchiveExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the archive experiment default response
func (o *ArchiveExperimentDefault) Code() int {
return o._statusCode
}
func (o *ArchiveExperimentDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:archive][%d] ArchiveExperiment default %+v", o._statusCode, o.Payload)
}
func (o *ArchiveExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,139 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// NewCreateExperimentParams creates a new CreateExperimentParams object
// with the default values initialized.
func NewCreateExperimentParams() *CreateExperimentParams {
var ()
return &CreateExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewCreateExperimentParamsWithTimeout creates a new CreateExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewCreateExperimentParamsWithTimeout(timeout time.Duration) *CreateExperimentParams {
var ()
return &CreateExperimentParams{
timeout: timeout,
}
}
// NewCreateExperimentParamsWithContext creates a new CreateExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewCreateExperimentParamsWithContext(ctx context.Context) *CreateExperimentParams {
var ()
return &CreateExperimentParams{
Context: ctx,
}
}
// NewCreateExperimentParamsWithHTTPClient creates a new CreateExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewCreateExperimentParamsWithHTTPClient(client *http.Client) *CreateExperimentParams {
var ()
return &CreateExperimentParams{
HTTPClient: client,
}
}
/*CreateExperimentParams contains all the parameters to send to the API endpoint
for the create experiment operation typically these are written to a http.Request
*/
type CreateExperimentParams struct {
/*Body
The experiment to be created.
*/
Body *experiment_model.APIExperiment
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the create experiment params
func (o *CreateExperimentParams) WithTimeout(timeout time.Duration) *CreateExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the create experiment params
func (o *CreateExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the create experiment params
func (o *CreateExperimentParams) WithContext(ctx context.Context) *CreateExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the create experiment params
func (o *CreateExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the create experiment params
func (o *CreateExperimentParams) WithHTTPClient(client *http.Client) *CreateExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the create experiment params
func (o *CreateExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithBody adds the body to the create experiment params
func (o *CreateExperimentParams) WithBody(body *experiment_model.APIExperiment) *CreateExperimentParams {
o.SetBody(body)
return o
}
// SetBody adds the body to the create experiment params
func (o *CreateExperimentParams) SetBody(body *experiment_model.APIExperiment) {
o.Body = body
}
// WriteToRequest writes these params to a swagger request
func (o *CreateExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Body != nil {
if err := r.SetBodyParam(o.Body); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,112 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// CreateExperimentReader is a Reader for the CreateExperiment structure.
type CreateExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *CreateExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewCreateExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewCreateExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewCreateExperimentOK creates a CreateExperimentOK with default headers values
func NewCreateExperimentOK() *CreateExperimentOK {
return &CreateExperimentOK{}
}
/*CreateExperimentOK handles this case with default header values.
A successful response.
*/
type CreateExperimentOK struct {
Payload *experiment_model.APIExperiment
}
func (o *CreateExperimentOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments][%d] createExperimentOK %+v", 200, o.Payload)
}
func (o *CreateExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIExperiment)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCreateExperimentDefault creates a CreateExperimentDefault with default headers values
func NewCreateExperimentDefault(code int) *CreateExperimentDefault {
return &CreateExperimentDefault{
_statusCode: code,
}
}
/*CreateExperimentDefault handles this case with default header values.
CreateExperimentDefault create experiment default
*/
type CreateExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the create experiment default response
func (o *CreateExperimentDefault) Code() int {
return o._statusCode
}
func (o *CreateExperimentDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments][%d] CreateExperiment default %+v", o._statusCode, o.Payload)
}
func (o *CreateExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewDeleteExperimentParams creates a new DeleteExperimentParams object
// with the default values initialized.
func NewDeleteExperimentParams() *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeleteExperimentParamsWithTimeout creates a new DeleteExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewDeleteExperimentParamsWithTimeout(timeout time.Duration) *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
timeout: timeout,
}
}
// NewDeleteExperimentParamsWithContext creates a new DeleteExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewDeleteExperimentParamsWithContext(ctx context.Context) *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
Context: ctx,
}
}
// NewDeleteExperimentParamsWithHTTPClient creates a new DeleteExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewDeleteExperimentParamsWithHTTPClient(client *http.Client) *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
HTTPClient: client,
}
}
/*DeleteExperimentParams contains all the parameters to send to the API endpoint
for the delete experiment operation typically these are written to a http.Request
*/
type DeleteExperimentParams struct {
/*ID
The ID of the experiment to be deleted.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the delete experiment params
func (o *DeleteExperimentParams) WithTimeout(timeout time.Duration) *DeleteExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete experiment params
func (o *DeleteExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete experiment params
func (o *DeleteExperimentParams) WithContext(ctx context.Context) *DeleteExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete experiment params
func (o *DeleteExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete experiment params
func (o *DeleteExperimentParams) WithHTTPClient(client *http.Client) *DeleteExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete experiment params
func (o *DeleteExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the delete experiment params
func (o *DeleteExperimentParams) WithID(id string) *DeleteExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the delete experiment params
func (o *DeleteExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *DeleteExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,110 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// DeleteExperimentReader is a Reader for the DeleteExperiment structure.
type DeleteExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewDeleteExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewDeleteExperimentOK creates a DeleteExperimentOK with default headers values
func NewDeleteExperimentOK() *DeleteExperimentOK {
return &DeleteExperimentOK{}
}
/*DeleteExperimentOK handles this case with default header values.
A successful response.
*/
type DeleteExperimentOK struct {
Payload interface{}
}
func (o *DeleteExperimentOK) Error() string {
return fmt.Sprintf("[DELETE /apis/v1beta1/experiments/{id}][%d] deleteExperimentOK %+v", 200, o.Payload)
}
func (o *DeleteExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteExperimentDefault creates a DeleteExperimentDefault with default headers values
func NewDeleteExperimentDefault(code int) *DeleteExperimentDefault {
return &DeleteExperimentDefault{
_statusCode: code,
}
}
/*DeleteExperimentDefault handles this case with default header values.
DeleteExperimentDefault delete experiment default
*/
type DeleteExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the delete experiment default response
func (o *DeleteExperimentDefault) Code() int {
return o._statusCode
}
func (o *DeleteExperimentDefault) Error() string {
return fmt.Sprintf("[DELETE /apis/v1beta1/experiments/{id}][%d] DeleteExperiment default %+v", o._statusCode, o.Payload)
}
func (o *DeleteExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,204 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// New creates a new experiment service API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client {
return &Client{transport: transport, formats: formats}
}
/*
Client for experiment service API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
/*
ArchiveExperiment archives an experiment and the experiment s runs and jobs
*/
func (a *Client) ArchiveExperiment(params *ArchiveExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*ArchiveExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewArchiveExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "ArchiveExperiment",
Method: "POST",
PathPattern: "/apis/v1beta1/experiments/{id}:archive",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &ArchiveExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*ArchiveExperimentOK), nil
}
/*
CreateExperiment creates a new experiment
*/
func (a *Client) CreateExperiment(params *CreateExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*CreateExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "CreateExperiment",
Method: "POST",
PathPattern: "/apis/v1beta1/experiments",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &CreateExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateExperimentOK), nil
}
/*
DeleteExperiment deletes an experiment without deleting the experiment s runs and jobs to avoid unexpected behaviors delete an experiment s runs and jobs before deleting the experiment
*/
func (a *Client) DeleteExperiment(params *DeleteExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "DeleteExperiment",
Method: "DELETE",
PathPattern: "/apis/v1beta1/experiments/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &DeleteExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DeleteExperimentOK), nil
}
/*
GetExperiment finds a specific experiment by ID
*/
func (a *Client) GetExperiment(params *GetExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*GetExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "GetExperiment",
Method: "GET",
PathPattern: "/apis/v1beta1/experiments/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &GetExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetExperimentOK), nil
}
/*
ListExperiment finds all experiments supports pagination and sorting on certain fields
*/
func (a *Client) ListExperiment(params *ListExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*ListExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewListExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "ListExperiment",
Method: "GET",
PathPattern: "/apis/v1beta1/experiments",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &ListExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*ListExperimentOK), nil
}
/*
UnarchiveExperiment restores an archived experiment the experiment s archived runs and jobs will stay archived
*/
func (a *Client) UnarchiveExperiment(params *UnarchiveExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*UnarchiveExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewUnarchiveExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "UnarchiveExperiment",
Method: "POST",
PathPattern: "/apis/v1beta1/experiments/{id}:unarchive",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &UnarchiveExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*UnarchiveExperimentOK), nil
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetExperimentParams creates a new GetExperimentParams object
// with the default values initialized.
func NewGetExperimentParams() *GetExperimentParams {
var ()
return &GetExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetExperimentParamsWithTimeout creates a new GetExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewGetExperimentParamsWithTimeout(timeout time.Duration) *GetExperimentParams {
var ()
return &GetExperimentParams{
timeout: timeout,
}
}
// NewGetExperimentParamsWithContext creates a new GetExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewGetExperimentParamsWithContext(ctx context.Context) *GetExperimentParams {
var ()
return &GetExperimentParams{
Context: ctx,
}
}
// NewGetExperimentParamsWithHTTPClient creates a new GetExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewGetExperimentParamsWithHTTPClient(client *http.Client) *GetExperimentParams {
var ()
return &GetExperimentParams{
HTTPClient: client,
}
}
/*GetExperimentParams contains all the parameters to send to the API endpoint
for the get experiment operation typically these are written to a http.Request
*/
type GetExperimentParams struct {
/*ID
The ID of the experiment to be retrieved.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the get experiment params
func (o *GetExperimentParams) WithTimeout(timeout time.Duration) *GetExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get experiment params
func (o *GetExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get experiment params
func (o *GetExperimentParams) WithContext(ctx context.Context) *GetExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get experiment params
func (o *GetExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get experiment params
func (o *GetExperimentParams) WithHTTPClient(client *http.Client) *GetExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get experiment params
func (o *GetExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get experiment params
func (o *GetExperimentParams) WithID(id string) *GetExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the get experiment params
func (o *GetExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,112 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// GetExperimentReader is a Reader for the GetExperiment structure.
type GetExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewGetExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGetExperimentOK creates a GetExperimentOK with default headers values
func NewGetExperimentOK() *GetExperimentOK {
return &GetExperimentOK{}
}
/*GetExperimentOK handles this case with default header values.
A successful response.
*/
type GetExperimentOK struct {
Payload *experiment_model.APIExperiment
}
func (o *GetExperimentOK) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments/{id}][%d] getExperimentOK %+v", 200, o.Payload)
}
func (o *GetExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIExperiment)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetExperimentDefault creates a GetExperimentDefault with default headers values
func NewGetExperimentDefault(code int) *GetExperimentDefault {
return &GetExperimentDefault{
_statusCode: code,
}
}
/*GetExperimentDefault handles this case with default header values.
GetExperimentDefault get experiment default
*/
type GetExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the get experiment default response
func (o *GetExperimentDefault) Code() int {
return o._statusCode
}
func (o *GetExperimentDefault) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments/{id}][%d] GetExperiment default %+v", o._statusCode, o.Payload)
}
func (o *GetExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,327 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/swag"
strfmt "github.com/go-openapi/strfmt"
)
// NewListExperimentParams creates a new ListExperimentParams object
// with the default values initialized.
func NewListExperimentParams() *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
timeout: cr.DefaultTimeout,
}
}
// NewListExperimentParamsWithTimeout creates a new ListExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewListExperimentParamsWithTimeout(timeout time.Duration) *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
timeout: timeout,
}
}
// NewListExperimentParamsWithContext creates a new ListExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewListExperimentParamsWithContext(ctx context.Context) *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
Context: ctx,
}
}
// NewListExperimentParamsWithHTTPClient creates a new ListExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewListExperimentParamsWithHTTPClient(client *http.Client) *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
HTTPClient: client,
}
}
/*ListExperimentParams contains all the parameters to send to the API endpoint
for the list experiment operation typically these are written to a http.Request
*/
type ListExperimentParams struct {
/*Filter
A url-encoded, JSON-serialized Filter protocol buffer (see
[filter.proto](https://github.com/kubeflow/pipelines/
blob/master/backend/api/filter.proto)).
*/
Filter *string
/*PageSize
The number of experiments to be listed per page. If there are more
experiments than this number, the response message will contain a
nextPageToken field you can use to fetch the next page.
*/
PageSize *int32
/*PageToken
A page token to request the next page of results. The token is acquried
from the nextPageToken field of the response from the previous
ListExperiment call or can be omitted when fetching the first page.
*/
PageToken *string
/*ResourceReferenceKeyID
The ID of the resource that referred to.
*/
ResourceReferenceKeyID *string
/*ResourceReferenceKeyType
The type of the resource that referred to.
*/
ResourceReferenceKeyType *string
/*SortBy
Can be format of "field_name", "field_name asc" or "field_name desc"
Ascending by default.
*/
SortBy *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the list experiment params
func (o *ListExperimentParams) WithTimeout(timeout time.Duration) *ListExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the list experiment params
func (o *ListExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the list experiment params
func (o *ListExperimentParams) WithContext(ctx context.Context) *ListExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the list experiment params
func (o *ListExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the list experiment params
func (o *ListExperimentParams) WithHTTPClient(client *http.Client) *ListExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the list experiment params
func (o *ListExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithFilter adds the filter to the list experiment params
func (o *ListExperimentParams) WithFilter(filter *string) *ListExperimentParams {
o.SetFilter(filter)
return o
}
// SetFilter adds the filter to the list experiment params
func (o *ListExperimentParams) SetFilter(filter *string) {
o.Filter = filter
}
// WithPageSize adds the pageSize to the list experiment params
func (o *ListExperimentParams) WithPageSize(pageSize *int32) *ListExperimentParams {
o.SetPageSize(pageSize)
return o
}
// SetPageSize adds the pageSize to the list experiment params
func (o *ListExperimentParams) SetPageSize(pageSize *int32) {
o.PageSize = pageSize
}
// WithPageToken adds the pageToken to the list experiment params
func (o *ListExperimentParams) WithPageToken(pageToken *string) *ListExperimentParams {
o.SetPageToken(pageToken)
return o
}
// SetPageToken adds the pageToken to the list experiment params
func (o *ListExperimentParams) SetPageToken(pageToken *string) {
o.PageToken = pageToken
}
// WithResourceReferenceKeyID adds the resourceReferenceKeyID to the list experiment params
func (o *ListExperimentParams) WithResourceReferenceKeyID(resourceReferenceKeyID *string) *ListExperimentParams {
o.SetResourceReferenceKeyID(resourceReferenceKeyID)
return o
}
// SetResourceReferenceKeyID adds the resourceReferenceKeyId to the list experiment params
func (o *ListExperimentParams) SetResourceReferenceKeyID(resourceReferenceKeyID *string) {
o.ResourceReferenceKeyID = resourceReferenceKeyID
}
// WithResourceReferenceKeyType adds the resourceReferenceKeyType to the list experiment params
func (o *ListExperimentParams) WithResourceReferenceKeyType(resourceReferenceKeyType *string) *ListExperimentParams {
o.SetResourceReferenceKeyType(resourceReferenceKeyType)
return o
}
// SetResourceReferenceKeyType adds the resourceReferenceKeyType to the list experiment params
func (o *ListExperimentParams) SetResourceReferenceKeyType(resourceReferenceKeyType *string) {
o.ResourceReferenceKeyType = resourceReferenceKeyType
}
// WithSortBy adds the sortBy to the list experiment params
func (o *ListExperimentParams) WithSortBy(sortBy *string) *ListExperimentParams {
o.SetSortBy(sortBy)
return o
}
// SetSortBy adds the sortBy to the list experiment params
func (o *ListExperimentParams) SetSortBy(sortBy *string) {
o.SortBy = sortBy
}
// WriteToRequest writes these params to a swagger request
func (o *ListExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Filter != nil {
// query param filter
var qrFilter string
if o.Filter != nil {
qrFilter = *o.Filter
}
qFilter := qrFilter
if qFilter != "" {
if err := r.SetQueryParam("filter", qFilter); err != nil {
return err
}
}
}
if o.PageSize != nil {
// query param page_size
var qrPageSize int32
if o.PageSize != nil {
qrPageSize = *o.PageSize
}
qPageSize := swag.FormatInt32(qrPageSize)
if qPageSize != "" {
if err := r.SetQueryParam("page_size", qPageSize); err != nil {
return err
}
}
}
if o.PageToken != nil {
// query param page_token
var qrPageToken string
if o.PageToken != nil {
qrPageToken = *o.PageToken
}
qPageToken := qrPageToken
if qPageToken != "" {
if err := r.SetQueryParam("page_token", qPageToken); err != nil {
return err
}
}
}
if o.ResourceReferenceKeyID != nil {
// query param resource_reference_key.id
var qrResourceReferenceKeyID string
if o.ResourceReferenceKeyID != nil {
qrResourceReferenceKeyID = *o.ResourceReferenceKeyID
}
qResourceReferenceKeyID := qrResourceReferenceKeyID
if qResourceReferenceKeyID != "" {
if err := r.SetQueryParam("resource_reference_key.id", qResourceReferenceKeyID); err != nil {
return err
}
}
}
if o.ResourceReferenceKeyType != nil {
// query param resource_reference_key.type
var qrResourceReferenceKeyType string
if o.ResourceReferenceKeyType != nil {
qrResourceReferenceKeyType = *o.ResourceReferenceKeyType
}
qResourceReferenceKeyType := qrResourceReferenceKeyType
if qResourceReferenceKeyType != "" {
if err := r.SetQueryParam("resource_reference_key.type", qResourceReferenceKeyType); err != nil {
return err
}
}
}
if o.SortBy != nil {
// query param sort_by
var qrSortBy string
if o.SortBy != nil {
qrSortBy = *o.SortBy
}
qSortBy := qrSortBy
if qSortBy != "" {
if err := r.SetQueryParam("sort_by", qSortBy); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,112 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// ListExperimentReader is a Reader for the ListExperiment structure.
type ListExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *ListExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewListExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewListExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewListExperimentOK creates a ListExperimentOK with default headers values
func NewListExperimentOK() *ListExperimentOK {
return &ListExperimentOK{}
}
/*ListExperimentOK handles this case with default header values.
A successful response.
*/
type ListExperimentOK struct {
Payload *experiment_model.APIListExperimentsResponse
}
func (o *ListExperimentOK) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments][%d] listExperimentOK %+v", 200, o.Payload)
}
func (o *ListExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIListExperimentsResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewListExperimentDefault creates a ListExperimentDefault with default headers values
func NewListExperimentDefault(code int) *ListExperimentDefault {
return &ListExperimentDefault{
_statusCode: code,
}
}
/*ListExperimentDefault handles this case with default header values.
ListExperimentDefault list experiment default
*/
type ListExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the list experiment default response
func (o *ListExperimentDefault) Code() int {
return o._statusCode
}
func (o *ListExperimentDefault) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments][%d] ListExperiment default %+v", o._statusCode, o.Payload)
}
func (o *ListExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewUnarchiveExperimentParams creates a new UnarchiveExperimentParams object
// with the default values initialized.
func NewUnarchiveExperimentParams() *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewUnarchiveExperimentParamsWithTimeout creates a new UnarchiveExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewUnarchiveExperimentParamsWithTimeout(timeout time.Duration) *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
timeout: timeout,
}
}
// NewUnarchiveExperimentParamsWithContext creates a new UnarchiveExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewUnarchiveExperimentParamsWithContext(ctx context.Context) *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
Context: ctx,
}
}
// NewUnarchiveExperimentParamsWithHTTPClient creates a new UnarchiveExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewUnarchiveExperimentParamsWithHTTPClient(client *http.Client) *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
HTTPClient: client,
}
}
/*UnarchiveExperimentParams contains all the parameters to send to the API endpoint
for the unarchive experiment operation typically these are written to a http.Request
*/
type UnarchiveExperimentParams struct {
/*ID
The ID of the experiment to be restored.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithTimeout(timeout time.Duration) *UnarchiveExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithContext(ctx context.Context) *UnarchiveExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithHTTPClient(client *http.Client) *UnarchiveExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithID(id string) *UnarchiveExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *UnarchiveExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,110 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// UnarchiveExperimentReader is a Reader for the UnarchiveExperiment structure.
type UnarchiveExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *UnarchiveExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewUnarchiveExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewUnarchiveExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewUnarchiveExperimentOK creates a UnarchiveExperimentOK with default headers values
func NewUnarchiveExperimentOK() *UnarchiveExperimentOK {
return &UnarchiveExperimentOK{}
}
/*UnarchiveExperimentOK handles this case with default header values.
A successful response.
*/
type UnarchiveExperimentOK struct {
Payload interface{}
}
func (o *UnarchiveExperimentOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:unarchive][%d] unarchiveExperimentOK %+v", 200, o.Payload)
}
func (o *UnarchiveExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewUnarchiveExperimentDefault creates a UnarchiveExperimentDefault with default headers values
func NewUnarchiveExperimentDefault(code int) *UnarchiveExperimentDefault {
return &UnarchiveExperimentDefault{
_statusCode: code,
}
}
/*UnarchiveExperimentDefault handles this case with default header values.
UnarchiveExperimentDefault unarchive experiment default
*/
type UnarchiveExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the unarchive experiment default response
func (o *UnarchiveExperimentDefault) Code() int {
return o._statusCode
}
func (o *UnarchiveExperimentDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:unarchive][%d] UnarchiveExperiment default %+v", o._statusCode, o.Payload)
}
func (o *UnarchiveExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,135 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// APIExperiment api experiment
// swagger:model apiExperiment
type APIExperiment struct {
// Output. The time that the experiment created.
// Format: date-time
CreatedAt strfmt.DateTime `json:"created_at,omitempty"`
// Optional input field. Describing the purpose of the experiment
Description string `json:"description,omitempty"`
// Output. Unique experiment ID. Generated by API server.
ID string `json:"id,omitempty"`
// Required input field. Unique experiment name provided by user.
Name string `json:"name,omitempty"`
// Optional input field. Specify which resource this run belongs to.
// For Experiment, the only valid resource reference is a single Namespace.
ResourceReferences []*APIResourceReference `json:"resource_references"`
// Output. Specifies whether this experiment is in archived or available state.
StorageState APIExperimentStorageState `json:"storage_state,omitempty"`
}
// Validate validates this api experiment
func (m *APIExperiment) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCreatedAt(formats); err != nil {
res = append(res, err)
}
if err := m.validateResourceReferences(formats); err != nil {
res = append(res, err)
}
if err := m.validateStorageState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIExperiment) validateCreatedAt(formats strfmt.Registry) error {
if swag.IsZero(m.CreatedAt) { // not required
return nil
}
if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil {
return err
}
return nil
}
func (m *APIExperiment) validateResourceReferences(formats strfmt.Registry) error {
if swag.IsZero(m.ResourceReferences) { // not required
return nil
}
for i := 0; i < len(m.ResourceReferences); i++ {
if swag.IsZero(m.ResourceReferences[i]) { // not required
continue
}
if m.ResourceReferences[i] != nil {
if err := m.ResourceReferences[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("resource_references" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *APIExperiment) validateStorageState(formats strfmt.Registry) error {
if swag.IsZero(m.StorageState) { // not required
return nil
}
if err := m.StorageState.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("storage_state")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *APIExperiment) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIExperiment) UnmarshalBinary(b []byte) error {
var res APIExperiment
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,66 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// APIExperimentStorageState api experiment storage state
// swagger:model apiExperimentStorageState
type APIExperimentStorageState string
const (
// APIExperimentStorageStateSTORAGESTATEUNSPECIFIED captures enum value "STORAGESTATE_UNSPECIFIED"
APIExperimentStorageStateSTORAGESTATEUNSPECIFIED APIExperimentStorageState = "STORAGESTATE_UNSPECIFIED"
// APIExperimentStorageStateSTORAGESTATEAVAILABLE captures enum value "STORAGESTATE_AVAILABLE"
APIExperimentStorageStateSTORAGESTATEAVAILABLE APIExperimentStorageState = "STORAGESTATE_AVAILABLE"
// APIExperimentStorageStateSTORAGESTATEARCHIVED captures enum value "STORAGESTATE_ARCHIVED"
APIExperimentStorageStateSTORAGESTATEARCHIVED APIExperimentStorageState = "STORAGESTATE_ARCHIVED"
)
// for schema
var apiExperimentStorageStateEnum []interface{}
func init() {
var res []APIExperimentStorageState
if err := json.Unmarshal([]byte(`["STORAGESTATE_UNSPECIFIED","STORAGESTATE_AVAILABLE","STORAGESTATE_ARCHIVED"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
apiExperimentStorageStateEnum = append(apiExperimentStorageStateEnum, v)
}
}
func (m APIExperimentStorageState) validateAPIExperimentStorageStateEnum(path, location string, value APIExperimentStorageState) error {
if err := validate.Enum(path, location, value, apiExperimentStorageStateEnum); err != nil {
return err
}
return nil
}
// Validate validates this api experiment storage state
func (m APIExperimentStorageState) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAPIExperimentStorageStateEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,86 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIListExperimentsResponse api list experiments response
// swagger:model apiListExperimentsResponse
type APIListExperimentsResponse struct {
// A list of experiments returned.
Experiments []*APIExperiment `json:"experiments"`
// The token to list the next page of experiments.
NextPageToken string `json:"next_page_token,omitempty"`
// The total number of experiments for the given query.
TotalSize int32 `json:"total_size,omitempty"`
}
// Validate validates this api list experiments response
func (m *APIListExperimentsResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExperiments(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIListExperimentsResponse) validateExperiments(formats strfmt.Registry) error {
if swag.IsZero(m.Experiments) { // not required
return nil
}
for i := 0; i < len(m.Experiments); i++ {
if swag.IsZero(m.Experiments[i]) { // not required
continue
}
if m.Experiments[i] != nil {
if err := m.Experiments[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("experiments" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *APIListExperimentsResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIListExperimentsResponse) UnmarshalBinary(b []byte) error {
var res APIListExperimentsResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,66 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// APIRelationship api relationship
// swagger:model apiRelationship
type APIRelationship string
const (
// APIRelationshipUNKNOWNRELATIONSHIP captures enum value "UNKNOWN_RELATIONSHIP"
APIRelationshipUNKNOWNRELATIONSHIP APIRelationship = "UNKNOWN_RELATIONSHIP"
// APIRelationshipOWNER captures enum value "OWNER"
APIRelationshipOWNER APIRelationship = "OWNER"
// APIRelationshipCREATOR captures enum value "CREATOR"
APIRelationshipCREATOR APIRelationship = "CREATOR"
)
// for schema
var apiRelationshipEnum []interface{}
func init() {
var res []APIRelationship
if err := json.Unmarshal([]byte(`["UNKNOWN_RELATIONSHIP","OWNER","CREATOR"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
apiRelationshipEnum = append(apiRelationshipEnum, v)
}
}
func (m APIRelationship) validateAPIRelationshipEnum(path, location string, value APIRelationship) error {
if err := validate.Enum(path, location, value, apiRelationshipEnum); err != nil {
return err
}
return nil
}
// Validate validates this api relationship
func (m APIRelationship) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAPIRelationshipEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,72 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIResourceKey api resource key
// swagger:model apiResourceKey
type APIResourceKey struct {
// The ID of the resource that referred to.
ID string `json:"id,omitempty"`
// The type of the resource that referred to.
Type APIResourceType `json:"type,omitempty"`
}
// Validate validates this api resource key
func (m *APIResourceKey) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIResourceKey) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
if err := m.Type.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("type")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *APIResourceKey) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIResourceKey) UnmarshalBinary(b []byte) error {
var res APIResourceKey
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,97 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIResourceReference api resource reference
// swagger:model apiResourceReference
type APIResourceReference struct {
// key
Key *APIResourceKey `json:"key,omitempty"`
// The name of the resource that referred to.
Name string `json:"name,omitempty"`
// Required field. The relationship from referred resource to the object.
Relationship APIRelationship `json:"relationship,omitempty"`
}
// Validate validates this api resource reference
func (m *APIResourceReference) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateKey(formats); err != nil {
res = append(res, err)
}
if err := m.validateRelationship(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIResourceReference) validateKey(formats strfmt.Registry) error {
if swag.IsZero(m.Key) { // not required
return nil
}
if m.Key != nil {
if err := m.Key.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("key")
}
return err
}
}
return nil
}
func (m *APIResourceReference) validateRelationship(formats strfmt.Registry) error {
if swag.IsZero(m.Relationship) { // not required
return nil
}
if err := m.Relationship.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("relationship")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *APIResourceReference) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIResourceReference) UnmarshalBinary(b []byte) error {
var res APIResourceReference
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,75 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// APIResourceType api resource type
// swagger:model apiResourceType
type APIResourceType string
const (
// APIResourceTypeUNKNOWNRESOURCETYPE captures enum value "UNKNOWN_RESOURCE_TYPE"
APIResourceTypeUNKNOWNRESOURCETYPE APIResourceType = "UNKNOWN_RESOURCE_TYPE"
// APIResourceTypeEXPERIMENT captures enum value "EXPERIMENT"
APIResourceTypeEXPERIMENT APIResourceType = "EXPERIMENT"
// APIResourceTypeJOB captures enum value "JOB"
APIResourceTypeJOB APIResourceType = "JOB"
// APIResourceTypePIPELINE captures enum value "PIPELINE"
APIResourceTypePIPELINE APIResourceType = "PIPELINE"
// APIResourceTypePIPELINEVERSION captures enum value "PIPELINE_VERSION"
APIResourceTypePIPELINEVERSION APIResourceType = "PIPELINE_VERSION"
// APIResourceTypeNAMESPACE captures enum value "NAMESPACE"
APIResourceTypeNAMESPACE APIResourceType = "NAMESPACE"
)
// for schema
var apiResourceTypeEnum []interface{}
func init() {
var res []APIResourceType
if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION","NAMESPACE"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
apiResourceTypeEnum = append(apiResourceTypeEnum, v)
}
}
func (m APIResourceType) validateAPIResourceTypeEnum(path, location string, value APIResourceType) error {
if err := validate.Enum(path, location, value, apiResourceTypeEnum); err != nil {
return err
}
return nil
}
// Validate validates this api resource type
func (m APIResourceType) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAPIResourceTypeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,86 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIStatus api status
// swagger:model apiStatus
type APIStatus struct {
// code
Code int32 `json:"code,omitempty"`
// details
Details []*ProtobufAny `json:"details"`
// error
Error string `json:"error,omitempty"`
}
// Validate validates this api status
func (m *APIStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDetails(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIStatus) validateDetails(formats strfmt.Registry) error {
if swag.IsZero(m.Details) { // not required
return nil
}
for i := 0; i < len(m.Details); i++ {
if swag.IsZero(m.Details[i]) { // not required
continue
}
if m.Details[i] != nil {
if err := m.Details[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("details" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *APIStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIStatus) UnmarshalBinary(b []byte) error {
var res APIStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,172 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// ProtobufAny `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
// swagger:model protobufAny
type ProtobufAny struct {
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. This string must contain at least
// one "/" character. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
TypeURL string `json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
// Format: byte
Value strfmt.Base64 `json:"value,omitempty"`
}
// Validate validates this protobuf any
func (m *ProtobufAny) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateValue(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProtobufAny) validateValue(formats strfmt.Registry) error {
if swag.IsZero(m.Value) { // not required
return nil
}
// Format "byte" (base64 string) is already validated when unmarshalled
return nil
}
// MarshalBinary interface implementation
func (m *ProtobufAny) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProtobufAny) UnmarshalBinary(b []byte) error {
var res ProtobufAny
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,113 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package healthz_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetHealthzParams creates a new GetHealthzParams object
// with the default values initialized.
func NewGetHealthzParams() *GetHealthzParams {
return &GetHealthzParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetHealthzParamsWithTimeout creates a new GetHealthzParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewGetHealthzParamsWithTimeout(timeout time.Duration) *GetHealthzParams {
return &GetHealthzParams{
timeout: timeout,
}
}
// NewGetHealthzParamsWithContext creates a new GetHealthzParams object
// with the default values initialized, and the ability to set a context for a request
func NewGetHealthzParamsWithContext(ctx context.Context) *GetHealthzParams {
return &GetHealthzParams{
Context: ctx,
}
}
// NewGetHealthzParamsWithHTTPClient creates a new GetHealthzParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewGetHealthzParamsWithHTTPClient(client *http.Client) *GetHealthzParams {
return &GetHealthzParams{
HTTPClient: client,
}
}
/*GetHealthzParams contains all the parameters to send to the API endpoint
for the get healthz operation typically these are written to a http.Request
*/
type GetHealthzParams struct {
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the get healthz params
func (o *GetHealthzParams) WithTimeout(timeout time.Duration) *GetHealthzParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get healthz params
func (o *GetHealthzParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get healthz params
func (o *GetHealthzParams) WithContext(ctx context.Context) *GetHealthzParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get healthz params
func (o *GetHealthzParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get healthz params
func (o *GetHealthzParams) WithHTTPClient(client *http.Client) *GetHealthzParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get healthz params
func (o *GetHealthzParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WriteToRequest writes these params to a swagger request
func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,112 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package healthz_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
healthz_model "github.com/kubeflow/pipelines/backend/api/go_http_client/healthz_model"
)
// GetHealthzReader is a Reader for the GetHealthz structure.
type GetHealthzReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetHealthzOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewGetHealthzDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGetHealthzOK creates a GetHealthzOK with default headers values
func NewGetHealthzOK() *GetHealthzOK {
return &GetHealthzOK{}
}
/*GetHealthzOK handles this case with default header values.
A successful response.
*/
type GetHealthzOK struct {
Payload *healthz_model.APIGetHealthzResponse
}
func (o *GetHealthzOK) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/healthz][%d] getHealthzOK %+v", 200, o.Payload)
}
func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(healthz_model.APIGetHealthzResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetHealthzDefault creates a GetHealthzDefault with default headers values
func NewGetHealthzDefault(code int) *GetHealthzDefault {
return &GetHealthzDefault{
_statusCode: code,
}
}
/*GetHealthzDefault handles this case with default header values.
GetHealthzDefault get healthz default
*/
type GetHealthzDefault struct {
_statusCode int
Payload *healthz_model.APIStatus
}
// Code gets the status code for the get healthz default response
func (o *GetHealthzDefault) Code() int {
return o._statusCode
}
func (o *GetHealthzDefault) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/healthz][%d] GetHealthz default %+v", o._statusCode, o.Payload)
}
func (o *GetHealthzDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(healthz_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,59 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package healthz_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// New creates a new healthz service API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client {
return &Client{transport: transport, formats: formats}
}
/*
Client for healthz service API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
/*
GetHealthz gets healthz data
*/
func (a *Client) GetHealthz(params *GetHealthzParams, authInfo runtime.ClientAuthInfoWriter) (*GetHealthzOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetHealthzParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "GetHealthz",
Method: "GET",
PathPattern: "/apis/v1beta1/healthz",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &GetHealthzReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetHealthzOK), nil
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}

View File

@ -1,43 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package healthz_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// APIGetHealthzResponse api get healthz response
// swagger:model apiGetHealthzResponse
type APIGetHealthzResponse struct {
// Returns if KFP in multi-user mode
MultiUser bool `json:"multi_user,omitempty"`
}
// Validate validates this api get healthz response
func (m *APIGetHealthzResponse) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *APIGetHealthzResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIGetHealthzResponse) UnmarshalBinary(b []byte) error {
var res APIGetHealthzResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,86 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package healthz_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIStatus api status
// swagger:model apiStatus
type APIStatus struct {
// code
Code int32 `json:"code,omitempty"`
// details
Details []*ProtobufAny `json:"details"`
// error
Error string `json:"error,omitempty"`
}
// Validate validates this api status
func (m *APIStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDetails(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIStatus) validateDetails(formats strfmt.Registry) error {
if swag.IsZero(m.Details) { // not required
return nil
}
for i := 0; i < len(m.Details); i++ {
if swag.IsZero(m.Details[i]) { // not required
continue
}
if m.Details[i] != nil {
if err := m.Details[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("details" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *APIStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIStatus) UnmarshalBinary(b []byte) error {
var res APIStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,172 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package healthz_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// ProtobufAny `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
// swagger:model protobufAny
type ProtobufAny struct {
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. This string must contain at least
// one "/" character. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
TypeURL string `json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
// Format: byte
Value strfmt.Base64 `json:"value,omitempty"`
}
// Validate validates this protobuf any
func (m *ProtobufAny) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateValue(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProtobufAny) validateValue(formats strfmt.Registry) error {
if swag.IsZero(m.Value) { // not required
return nil
}
// Format "byte" (base64 string) is already validated when unmarshalled
return nil
}
// MarshalBinary interface implementation
func (m *ProtobufAny) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProtobufAny) UnmarshalBinary(b []byte) error {
var res ProtobufAny
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,139 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// NewCreateJobParams creates a new CreateJobParams object
// with the default values initialized.
func NewCreateJobParams() *CreateJobParams {
var ()
return &CreateJobParams{
timeout: cr.DefaultTimeout,
}
}
// NewCreateJobParamsWithTimeout creates a new CreateJobParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewCreateJobParamsWithTimeout(timeout time.Duration) *CreateJobParams {
var ()
return &CreateJobParams{
timeout: timeout,
}
}
// NewCreateJobParamsWithContext creates a new CreateJobParams object
// with the default values initialized, and the ability to set a context for a request
func NewCreateJobParamsWithContext(ctx context.Context) *CreateJobParams {
var ()
return &CreateJobParams{
Context: ctx,
}
}
// NewCreateJobParamsWithHTTPClient creates a new CreateJobParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewCreateJobParamsWithHTTPClient(client *http.Client) *CreateJobParams {
var ()
return &CreateJobParams{
HTTPClient: client,
}
}
/*CreateJobParams contains all the parameters to send to the API endpoint
for the create job operation typically these are written to a http.Request
*/
type CreateJobParams struct {
/*Body
The job to be created
*/
Body *job_model.APIJob
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the create job params
func (o *CreateJobParams) WithTimeout(timeout time.Duration) *CreateJobParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the create job params
func (o *CreateJobParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the create job params
func (o *CreateJobParams) WithContext(ctx context.Context) *CreateJobParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the create job params
func (o *CreateJobParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the create job params
func (o *CreateJobParams) WithHTTPClient(client *http.Client) *CreateJobParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the create job params
func (o *CreateJobParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithBody adds the body to the create job params
func (o *CreateJobParams) WithBody(body *job_model.APIJob) *CreateJobParams {
o.SetBody(body)
return o
}
// SetBody adds the body to the create job params
func (o *CreateJobParams) SetBody(body *job_model.APIJob) {
o.Body = body
}
// WriteToRequest writes these params to a swagger request
func (o *CreateJobParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Body != nil {
if err := r.SetBodyParam(o.Body); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,112 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// CreateJobReader is a Reader for the CreateJob structure.
type CreateJobReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *CreateJobReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewCreateJobOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewCreateJobDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewCreateJobOK creates a CreateJobOK with default headers values
func NewCreateJobOK() *CreateJobOK {
return &CreateJobOK{}
}
/*CreateJobOK handles this case with default header values.
A successful response.
*/
type CreateJobOK struct {
Payload *job_model.APIJob
}
func (o *CreateJobOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs][%d] createJobOK %+v", 200, o.Payload)
}
func (o *CreateJobOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIJob)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCreateJobDefault creates a CreateJobDefault with default headers values
func NewCreateJobDefault(code int) *CreateJobDefault {
return &CreateJobDefault{
_statusCode: code,
}
}
/*CreateJobDefault handles this case with default header values.
CreateJobDefault create job default
*/
type CreateJobDefault struct {
_statusCode int
Payload *job_model.APIStatus
}
// Code gets the status code for the create job default response
func (o *CreateJobDefault) Code() int {
return o._statusCode
}
func (o *CreateJobDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs][%d] CreateJob default %+v", o._statusCode, o.Payload)
}
func (o *CreateJobDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewDeleteJobParams creates a new DeleteJobParams object
// with the default values initialized.
func NewDeleteJobParams() *DeleteJobParams {
var ()
return &DeleteJobParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeleteJobParamsWithTimeout creates a new DeleteJobParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewDeleteJobParamsWithTimeout(timeout time.Duration) *DeleteJobParams {
var ()
return &DeleteJobParams{
timeout: timeout,
}
}
// NewDeleteJobParamsWithContext creates a new DeleteJobParams object
// with the default values initialized, and the ability to set a context for a request
func NewDeleteJobParamsWithContext(ctx context.Context) *DeleteJobParams {
var ()
return &DeleteJobParams{
Context: ctx,
}
}
// NewDeleteJobParamsWithHTTPClient creates a new DeleteJobParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewDeleteJobParamsWithHTTPClient(client *http.Client) *DeleteJobParams {
var ()
return &DeleteJobParams{
HTTPClient: client,
}
}
/*DeleteJobParams contains all the parameters to send to the API endpoint
for the delete job operation typically these are written to a http.Request
*/
type DeleteJobParams struct {
/*ID
The ID of the job to be deleted
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the delete job params
func (o *DeleteJobParams) WithTimeout(timeout time.Duration) *DeleteJobParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete job params
func (o *DeleteJobParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete job params
func (o *DeleteJobParams) WithContext(ctx context.Context) *DeleteJobParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete job params
func (o *DeleteJobParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete job params
func (o *DeleteJobParams) WithHTTPClient(client *http.Client) *DeleteJobParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete job params
func (o *DeleteJobParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the delete job params
func (o *DeleteJobParams) WithID(id string) *DeleteJobParams {
o.SetID(id)
return o
}
// SetID adds the id to the delete job params
func (o *DeleteJobParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *DeleteJobParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,110 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// DeleteJobReader is a Reader for the DeleteJob structure.
type DeleteJobReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteJobReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteJobOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewDeleteJobDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewDeleteJobOK creates a DeleteJobOK with default headers values
func NewDeleteJobOK() *DeleteJobOK {
return &DeleteJobOK{}
}
/*DeleteJobOK handles this case with default header values.
A successful response.
*/
type DeleteJobOK struct {
Payload interface{}
}
func (o *DeleteJobOK) Error() string {
return fmt.Sprintf("[DELETE /apis/v1beta1/jobs/{id}][%d] deleteJobOK %+v", 200, o.Payload)
}
func (o *DeleteJobOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteJobDefault creates a DeleteJobDefault with default headers values
func NewDeleteJobDefault(code int) *DeleteJobDefault {
return &DeleteJobDefault{
_statusCode: code,
}
}
/*DeleteJobDefault handles this case with default header values.
DeleteJobDefault delete job default
*/
type DeleteJobDefault struct {
_statusCode int
Payload *job_model.APIStatus
}
// Code gets the status code for the delete job default response
func (o *DeleteJobDefault) Code() int {
return o._statusCode
}
func (o *DeleteJobDefault) Error() string {
return fmt.Sprintf("[DELETE /apis/v1beta1/jobs/{id}][%d] DeleteJob default %+v", o._statusCode, o.Payload)
}
func (o *DeleteJobDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewDisableJobParams creates a new DisableJobParams object
// with the default values initialized.
func NewDisableJobParams() *DisableJobParams {
var ()
return &DisableJobParams{
timeout: cr.DefaultTimeout,
}
}
// NewDisableJobParamsWithTimeout creates a new DisableJobParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewDisableJobParamsWithTimeout(timeout time.Duration) *DisableJobParams {
var ()
return &DisableJobParams{
timeout: timeout,
}
}
// NewDisableJobParamsWithContext creates a new DisableJobParams object
// with the default values initialized, and the ability to set a context for a request
func NewDisableJobParamsWithContext(ctx context.Context) *DisableJobParams {
var ()
return &DisableJobParams{
Context: ctx,
}
}
// NewDisableJobParamsWithHTTPClient creates a new DisableJobParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewDisableJobParamsWithHTTPClient(client *http.Client) *DisableJobParams {
var ()
return &DisableJobParams{
HTTPClient: client,
}
}
/*DisableJobParams contains all the parameters to send to the API endpoint
for the disable job operation typically these are written to a http.Request
*/
type DisableJobParams struct {
/*ID
The ID of the job to be disabled
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the disable job params
func (o *DisableJobParams) WithTimeout(timeout time.Duration) *DisableJobParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the disable job params
func (o *DisableJobParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the disable job params
func (o *DisableJobParams) WithContext(ctx context.Context) *DisableJobParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the disable job params
func (o *DisableJobParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the disable job params
func (o *DisableJobParams) WithHTTPClient(client *http.Client) *DisableJobParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the disable job params
func (o *DisableJobParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the disable job params
func (o *DisableJobParams) WithID(id string) *DisableJobParams {
o.SetID(id)
return o
}
// SetID adds the id to the disable job params
func (o *DisableJobParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *DisableJobParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,110 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// DisableJobReader is a Reader for the DisableJob structure.
type DisableJobReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DisableJobReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDisableJobOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewDisableJobDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewDisableJobOK creates a DisableJobOK with default headers values
func NewDisableJobOK() *DisableJobOK {
return &DisableJobOK{}
}
/*DisableJobOK handles this case with default header values.
A successful response.
*/
type DisableJobOK struct {
Payload interface{}
}
func (o *DisableJobOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs/{id}/disable][%d] disableJobOK %+v", 200, o.Payload)
}
func (o *DisableJobOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDisableJobDefault creates a DisableJobDefault with default headers values
func NewDisableJobDefault(code int) *DisableJobDefault {
return &DisableJobDefault{
_statusCode: code,
}
}
/*DisableJobDefault handles this case with default header values.
DisableJobDefault disable job default
*/
type DisableJobDefault struct {
_statusCode int
Payload *job_model.APIStatus
}
// Code gets the status code for the disable job default response
func (o *DisableJobDefault) Code() int {
return o._statusCode
}
func (o *DisableJobDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs/{id}/disable][%d] DisableJob default %+v", o._statusCode, o.Payload)
}
func (o *DisableJobDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewEnableJobParams creates a new EnableJobParams object
// with the default values initialized.
func NewEnableJobParams() *EnableJobParams {
var ()
return &EnableJobParams{
timeout: cr.DefaultTimeout,
}
}
// NewEnableJobParamsWithTimeout creates a new EnableJobParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewEnableJobParamsWithTimeout(timeout time.Duration) *EnableJobParams {
var ()
return &EnableJobParams{
timeout: timeout,
}
}
// NewEnableJobParamsWithContext creates a new EnableJobParams object
// with the default values initialized, and the ability to set a context for a request
func NewEnableJobParamsWithContext(ctx context.Context) *EnableJobParams {
var ()
return &EnableJobParams{
Context: ctx,
}
}
// NewEnableJobParamsWithHTTPClient creates a new EnableJobParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewEnableJobParamsWithHTTPClient(client *http.Client) *EnableJobParams {
var ()
return &EnableJobParams{
HTTPClient: client,
}
}
/*EnableJobParams contains all the parameters to send to the API endpoint
for the enable job operation typically these are written to a http.Request
*/
type EnableJobParams struct {
/*ID
The ID of the job to be enabled
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the enable job params
func (o *EnableJobParams) WithTimeout(timeout time.Duration) *EnableJobParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the enable job params
func (o *EnableJobParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the enable job params
func (o *EnableJobParams) WithContext(ctx context.Context) *EnableJobParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the enable job params
func (o *EnableJobParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the enable job params
func (o *EnableJobParams) WithHTTPClient(client *http.Client) *EnableJobParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the enable job params
func (o *EnableJobParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the enable job params
func (o *EnableJobParams) WithID(id string) *EnableJobParams {
o.SetID(id)
return o
}
// SetID adds the id to the enable job params
func (o *EnableJobParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *EnableJobParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,110 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// EnableJobReader is a Reader for the EnableJob structure.
type EnableJobReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *EnableJobReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewEnableJobOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewEnableJobDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewEnableJobOK creates a EnableJobOK with default headers values
func NewEnableJobOK() *EnableJobOK {
return &EnableJobOK{}
}
/*EnableJobOK handles this case with default header values.
A successful response.
*/
type EnableJobOK struct {
Payload interface{}
}
func (o *EnableJobOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs/{id}/enable][%d] enableJobOK %+v", 200, o.Payload)
}
func (o *EnableJobOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewEnableJobDefault creates a EnableJobDefault with default headers values
func NewEnableJobDefault(code int) *EnableJobDefault {
return &EnableJobDefault{
_statusCode: code,
}
}
/*EnableJobDefault handles this case with default header values.
EnableJobDefault enable job default
*/
type EnableJobDefault struct {
_statusCode int
Payload *job_model.APIStatus
}
// Code gets the status code for the enable job default response
func (o *EnableJobDefault) Code() int {
return o._statusCode
}
func (o *EnableJobDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs/{id}/enable][%d] EnableJob default %+v", o._statusCode, o.Payload)
}
func (o *EnableJobDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,136 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetJobParams creates a new GetJobParams object
// with the default values initialized.
func NewGetJobParams() *GetJobParams {
var ()
return &GetJobParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetJobParamsWithTimeout creates a new GetJobParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewGetJobParamsWithTimeout(timeout time.Duration) *GetJobParams {
var ()
return &GetJobParams{
timeout: timeout,
}
}
// NewGetJobParamsWithContext creates a new GetJobParams object
// with the default values initialized, and the ability to set a context for a request
func NewGetJobParamsWithContext(ctx context.Context) *GetJobParams {
var ()
return &GetJobParams{
Context: ctx,
}
}
// NewGetJobParamsWithHTTPClient creates a new GetJobParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewGetJobParamsWithHTTPClient(client *http.Client) *GetJobParams {
var ()
return &GetJobParams{
HTTPClient: client,
}
}
/*GetJobParams contains all the parameters to send to the API endpoint
for the get job operation typically these are written to a http.Request
*/
type GetJobParams struct {
/*ID
The ID of the job to be retrieved
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the get job params
func (o *GetJobParams) WithTimeout(timeout time.Duration) *GetJobParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get job params
func (o *GetJobParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get job params
func (o *GetJobParams) WithContext(ctx context.Context) *GetJobParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get job params
func (o *GetJobParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get job params
func (o *GetJobParams) WithHTTPClient(client *http.Client) *GetJobParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get job params
func (o *GetJobParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get job params
func (o *GetJobParams) WithID(id string) *GetJobParams {
o.SetID(id)
return o
}
// SetID adds the id to the get job params
func (o *GetJobParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetJobParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,112 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// GetJobReader is a Reader for the GetJob structure.
type GetJobReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetJobReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetJobOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewGetJobDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGetJobOK creates a GetJobOK with default headers values
func NewGetJobOK() *GetJobOK {
return &GetJobOK{}
}
/*GetJobOK handles this case with default header values.
A successful response.
*/
type GetJobOK struct {
Payload *job_model.APIJob
}
func (o *GetJobOK) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/jobs/{id}][%d] getJobOK %+v", 200, o.Payload)
}
func (o *GetJobOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIJob)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetJobDefault creates a GetJobDefault with default headers values
func NewGetJobDefault(code int) *GetJobDefault {
return &GetJobDefault{
_statusCode: code,
}
}
/*GetJobDefault handles this case with default header values.
GetJobDefault get job default
*/
type GetJobDefault struct {
_statusCode int
Payload *job_model.APIStatus
}
// Code gets the status code for the get job default response
func (o *GetJobDefault) Code() int {
return o._statusCode
}
func (o *GetJobDefault) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/jobs/{id}][%d] GetJob default %+v", o._statusCode, o.Payload)
}
func (o *GetJobDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,204 +0,0 @@
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// New creates a new job service API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client {
return &Client{transport: transport, formats: formats}
}
/*
Client for job service API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
/*
CreateJob creates a new job
*/
func (a *Client) CreateJob(params *CreateJobParams, authInfo runtime.ClientAuthInfoWriter) (*CreateJobOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateJobParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "CreateJob",
Method: "POST",
PathPattern: "/apis/v1beta1/jobs",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &CreateJobReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateJobOK), nil
}
/*
DeleteJob deletes a job
*/
func (a *Client) DeleteJob(params *DeleteJobParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteJobOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteJobParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "DeleteJob",
Method: "DELETE",
PathPattern: "/apis/v1beta1/jobs/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &DeleteJobReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DeleteJobOK), nil
}
/*
DisableJob stops a job and all its associated runs the job is not deleted
*/
func (a *Client) DisableJob(params *DisableJobParams, authInfo runtime.ClientAuthInfoWriter) (*DisableJobOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDisableJobParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "DisableJob",
Method: "POST",
PathPattern: "/apis/v1beta1/jobs/{id}/disable",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &DisableJobReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DisableJobOK), nil
}
/*
EnableJob restarts a job that was previously stopped all runs associated with the job will continue
*/
func (a *Client) EnableJob(params *EnableJobParams, authInfo runtime.ClientAuthInfoWriter) (*EnableJobOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewEnableJobParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "EnableJob",
Method: "POST",
PathPattern: "/apis/v1beta1/jobs/{id}/enable",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &EnableJobReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*EnableJobOK), nil
}
/*
GetJob finds a specific job by ID
*/
func (a *Client) GetJob(params *GetJobParams, authInfo runtime.ClientAuthInfoWriter) (*GetJobOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetJobParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "GetJob",
Method: "GET",
PathPattern: "/apis/v1beta1/jobs/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &GetJobReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetJobOK), nil
}
/*
ListJobs finds all jobs
*/
func (a *Client) ListJobs(params *ListJobsParams, authInfo runtime.ClientAuthInfoWriter) (*ListJobsOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewListJobsParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "ListJobs",
Method: "GET",
PathPattern: "/apis/v1beta1/jobs",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &ListJobsReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*ListJobsOK), nil
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}

Some files were not shown because too many files have changed in this diff Show More