Compare commits

...

637 Commits

Author SHA1 Message Date
Tommy Li 0b89419544
chore(owners): Promote rafalbigaj as the approver (#1484)
Signed-off-by: tomcli <tommy.chaoping.li@ibm.com>
2024-04-17 22:13:39 +00:00
Tommy Li 3e7950ffd3
chore(deps): sync kfp deps with the latest commit (#1485)
Signed-off-by: tomcli <tommy.chaoping.li@ibm.com>
2024-04-16 23:32:26 +00:00
Tommy Li bb47bcd892
chore(ci): Add clean up step for tekton ci (#1480) 2024-03-27 17:36:21 +00:00
Tommy Li c0d25310d5
chore(kfp-task): Update driver package to 2.1.0 release (#1478) 2024-03-27 00:11:19 +00:00
Tommy Li b49f959db9
chore(tekton-driver): Update tekton v2 driver to support the latest k8s spec from upstream (#1464)
Signed-off-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2024-03-06 03:38:07 +00:00
Helber Belmiro 16e781dce9
fix(docs): Updated legal info due to migration from CLA to DCO (#1463)
* Updated legal info due to migration from CLA to DCO

Signed-off-by: hbelmiro <helber.belmiro@gmail.com>

* Fixed TOC

Signed-off-by: hbelmiro <helber.belmiro@gmail.com>

---------

Signed-off-by: hbelmiro <helber.belmiro@gmail.com>
2024-03-05 17:18:07 +00:00
Tommy Li 803377e899
chore(sdk): Add sdk 1.9.3 release (#1462)
Signed-off-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2024-02-27 23:12:59 +00:00
Tommy Li db6d85ece6
feat(sdk): add verify_ssl flag to support self cert (#1461)
Signed-off-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2024-02-27 22:52:59 +00:00
Tommy Li 9f568f2a72
feat(ci): Update github actions to also test python 3.12 (#1456)
* Update github actions to also test python 3.12

* Update setup.py

* Update setup.py

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml

* Update README.md
2024-02-13 18:35:05 +00:00
Tommy Li a9d7df96d2
fix(README): Update instructions to only use kustomize (#1455) 2024-02-13 00:07:04 +00:00
Tommy Li b77e6f38d5
Chore(docs) Update kfp_tekton_install V2 instructions for KFP-Tekton 2.0.5 release (#1453) 2024-01-18 17:35:59 +00:00
Tommy Li bb06e5e721
chore(release): add kfp-tekton backend 1.9.2 release (#1451) 2024-01-18 00:21:58 +00:00
Tommy Li 550a827b05
feat(tekton-kfptask): Update kfptask to support pod metadata (#1449)
* update kfptask to support pod metadata

* fix type
2024-01-17 08:53:54 +00:00
Tommy Li d5fc9fd5c9
chore(README): Remove deprecated MLX wording (#1447) 2024-01-10 22:50:11 +00:00
Tommy Li a71ba164ad
feat(pipelineloop): Update pipelineloop v2 to have failed and cancelled status (#1445)
* update pipelineloop v2 to have failed and cancelled status

* fix execution id update bug
2024-01-05 19:25:09 +00:00
Tommy Li ff8bb50dc4
chore(tests): add unit tests for tekton template v1 (#1444)
* add unit tests for tekton template v1

* update license
2024-01-03 23:34:11 +00:00
Tommy Li 08e438099a
feat(pipelineloop): Optimize pipelineloop get performance with Lister (#1443) 2024-01-02 20:32:14 +00:00
Tommy Li 9d36c8c32d
chore(release): Add sdk 1.9.2 release (#1441) 2023-12-26 17:34:09 +00:00
Tommy Li cab410f886
feat(tekton-catalog): Add basic test to kfptask (#1439) 2023-12-26 14:19:08 +00:00
Tommy Li 21a0171bd0
chore(release): Add kfp-tekton 1.9.1 release (#1437) 2023-12-21 21:17:04 +00:00
Tommy Li 09b39c7a9f
fix(tekton-catalog): Refactor kfptask (#1435)
* add driver to pipelineloop reconcile

* update pipelineloop startup args

* update klog error

* update driver logic

* update image build and fix update logic

* update kfptask to be more module friendly

* revert deps

* add const
2023-12-19 22:37:19 +00:00
dependabot[bot] f93c8d2888
chore(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /tekton-catalog/pipeline-loops (#1434)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-18 22:32:38 +00:00
dependabot[bot] 6d30324b85
chore(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /tekton-catalog/tekton-kfptask (#1433)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-18 22:31:37 +00:00
dependabot[bot] 747e6640e7
chore(deps): bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /tekton-catalog/tekton-driver (#1432)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-18 22:30:38 +00:00
Tommy Li b6fcff9f0e
feat(pipeline-loop v2): Merge loop driver and publisher into the pipelineloop controller logic (#1428)
* add driver to pipelineloop reconcile

* update pipelineloop startup args

* update klog error

* update driver logic

* update image build and fix update logic
2023-12-18 22:13:37 +00:00
Tommy Li 11964891c2
chore(README.md): Update kfp upstream version for v2 branch (#1430) 2023-12-18 22:00:37 +00:00
Tommy Li 066940a0f1
fix(sdk): Fix datetime import (#1429) 2023-12-18 21:59:37 +00:00
Tommy Li eaa2de687c
feat(tekton-kfptask): Update kfptask to publish completed dag status (#1426)
* update kfptask to update completed dag

* update comment

* update error message
2023-12-07 17:35:56 +00:00
Tommy Li 191ad3907d
chore(README): update v2 info and new diagrams (#1423)
* update v2 info and new diagrams

* Update README.md

* fix grammar
2023-12-04 21:01:06 +00:00
Tommy Li a75d4b3711
chore(release): Add kfp-tekton 1.9.0 release (#1422) 2023-11-27 19:50:01 +00:00
Tommy Li e7d0cd0680
chore(backend): update to tekton 0.53.2 (#1420) 2023-11-22 22:39:57 +00:00
Tommy Li 509f53c809
chore(ci): remove driver build code since it is no longer necessary (#1418) 2023-11-22 01:55:56 +00:00
Tommy Li 1811dc7fb4
chore(docs): Update v2 dev branch version in main readme (#1414)
* Update v2 dev branch version in main readme

* Update blog link to the latest
2023-11-09 17:41:51 +00:00
Tommy Li 8a7b8b3905
chore(docs): Update kfp-tekton v2 version in the docs (#1413)
* Update kfp-tekton v2 version in the docs

* Update guides/kfp_tekton_install.md

Co-authored-by: Yihong Wang <yh.wang@ibm.com>

---------

Co-authored-by: Yihong Wang <yh.wang@ibm.com>
2023-11-08 01:14:08 +00:00
Tommy Li fcc7996028
feat(tekton-catalog): merge driver and kfptask into one controller (#1411)
* merge driver and kfptask into one controller

* update parameters

* update parameters

* update parameters to hyphen
2023-11-02 22:07:51 +00:00
Tommy Li d183b563fa
fix(tekton-catalog): sync pipeline package to 2.0.3 release (#1405) 2023-10-27 17:10:10 +00:00
Tommy Li 0010c60efa
chore(deps): update golang grpc deps to 1.56.3 (#1404) 2023-10-26 05:15:09 +00:00
Tommy Li bb9806f8be
chore(docs): Update kfp-tekton install versions (#1397) 2023-10-23 22:48:07 +00:00
Humair Khan 333c1c9ede
feat(backend): Add CA injection to step-copy-artifacts step. Fixes #1394. (#1395)
Signed-off-by: Humair Khan <HumairAK@users.noreply.github.com>
2023-10-20 18:25:05 +00:00
Tommy Li 6dc3fd9db0
chore(docs): Update main readme entrypoint (#1393) 2023-10-19 22:01:03 +00:00
Tommy Li 01f1d11f98
feat(tekton-catalog): Move kfp-tekton v2 driver to master branch (#1391) 2023-10-19 21:50:03 +00:00
Tommy Li f3e6a8e7f3
fix(tekton-catalog): fix kfptask custom task (#1389) 2023-10-18 21:46:56 +00:00
dependabot[bot] 943f982fc6
chore(deps): bump urllib3 from 1.26.17 to 1.26.18 in /sdk/python (#1388)
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.17 to 1.26.18.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.17...1.26.18)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-18 16:07:57 +00:00
Tommy Li 15bffccc5b
chore(docs): update manual release process and minor ci display name change (#1387)
* update manual release process and minor ci display name change

* fix script
2023-10-17 21:55:55 +00:00
Tommy Li 96d5205326
feat(Tekton CI): add v2 ci image publish pipeline tasks (#1386)
* add v2 ci image publish pipeline tasks

* update tekton catalog version
2023-10-17 20:51:56 +00:00
Tommy Li f038e03625
chore(release): Add kfp-tekton 1.8.1 release (#1385)
* add kfp-tekton 1.8.1 release

* update install docs
2023-10-17 18:37:56 +00:00
Tommy Li e624e9f067
fix(deps): Update to Tekton 0.50.2 to fix regression and security bugs (#1382) 2023-10-17 17:17:56 +00:00
Tommy Li 44df2424d1
chore(github): Update github action deps (#1383) 2023-10-17 17:11:56 +00:00
Tommy Li b9607d5ca9
chore(deps): Fix golang.org/x/net vulnerability (#1377)
* fix golang.org/x/net vulnerability

* fix typo

* fix spacing on makefile
2023-10-12 00:23:21 +00:00
Tommy Li 964c6e53f8
feat(tekton-catalog): Add V2 Tekton kfptask to Tekton catalog (#1373)
* add tekton kfptask to tekton catalog

* fix controller path
2023-10-11 22:26:20 +00:00
dependabot[bot] d41081b459
chore(deps): bump certifi from 2022.12.7 to 2023.7.22 in /sdk/python (#1372)
Bumps [certifi](https://github.com/certifi/python-certifi) from 2022.12.7 to 2023.7.22.
- [Commits](https://github.com/certifi/python-certifi/compare/2022.12.07...2023.07.22)

---
updated-dependencies:
- dependency-name: certifi
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-11 17:04:21 +00:00
dependabot[bot] bbd47b9edf
chore(deps): bump urllib3 from 1.26.15 to 1.26.17 in /sdk/python (#1356)
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.15 to 1.26.17.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.15...1.26.17)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-11 16:53:20 +00:00
Tommy Li cf4d57974a
chore(cleanup): remove old manifests and tools to reduce security checklist (#1371) 2023-10-11 16:48:20 +00:00
Tommy Li f94859acae
feat(tekton-catalog): Move v2 tekton-exithandler custom task to tekton catalog (#1370)
* move v2 tekton-exithandler custom task to tekton catalog

* rename podman to docker

* add go.sum
2023-10-11 16:47:20 +00:00
Tommy Li a3c899a1b8
chore(docs): Add SECURITY.md to tell supporting kfp-tekton version (#1368)
* Add SECURITY.md to tell supporting kfp-tekton version

* Update SECURITY.md
2023-10-10 16:45:21 +00:00
Tommy Li fd3b3b1466
chore(docs): Update kfp_tekton_install.md with kfp-tekton v2 install instructions (#1367)
* Update kfp_tekton_install.md with kfp-tekton v2 install instructions

* Update kfp_tekton_install.md
2023-10-10 16:29:20 +00:00
Tommy Li c1510f09a8
fix(manifests): Update istio-authorization-config.yaml for multi-user (#1362) 2023-10-04 19:37:15 +00:00
Tommy Li b0a2ac97d9
fix(sdk): Fix nested loop with cel outputs (#1351) 2023-09-28 14:58:42 +00:00
Tommy Li 8b93ce6719
fix(docs) Update Kind install command to use kubectl -k (#1348) 2023-09-21 17:27:38 +00:00
Tommy Li b71d07b02e
chore(docs): Add grpc gateway version to user guides. (#1346) 2023-09-19 23:17:29 +00:00
Humair Khan 79272ef439
fix(backend): add PR name substitution for scheduled runs. (#1344)
Signed-off-by: Humair Khan <HumairAK@users.noreply.github.com>
2023-09-11 18:21:36 +00:00
Tommy Li 84f2a85023
feat(api): upgrade grpc-gateway to v2 with protoname (#1337)
* upgrade grpc-gateway to v2

* fix common client code for grpc gateway v2

* fix license and test case

* regenerate python client

* fix python http client code

* update healthz class to new python openapi client

* update new python client function class name to new openapi standard;

* overwrite upstream old client function convention

* fix python upload class

* fix python upload class

* update api class

* update upload api swagger codegen manually

* revert old fixes

* fix new sdk payload parameter mapping

* update names to protoname

* revert backend changes to use protoname

* update proto spec

* update tests

* update sdk client to handle new json name

* fix typo
2023-09-01 22:22:26 +00:00
Tommy Li 0fe70dac08
feat(sdk): update new image config via env variables (#1336)
* update new image config via env variables

* add env var for condition task image

* update readme
2023-08-31 21:31:34 +00:00
Tommy Li 7576a6ce47
fix(sdk): update pyyaml dependencies (#1340) 2023-08-31 20:46:08 +00:00
Tommy Li 972c8817f1
feat(sdk): add bash script name config (#1334)
* add bash script name config

* add bash script name config
2023-08-25 21:56:33 +00:00
Tommy Li 4ccd1867a0
chore(cleanup): clean up visualization repo to reduce false positive scan (#1333)
* clean up visualization repo to reduce false positive scan

* clean up visualization repo to reduce false positive scan

* clean up visualization repo to reduce false positive scan

* clean up visualization repo to reduce false positive scan
2023-08-23 20:29:46 +00:00
Giulio Frasca a2007f7389
feat(backend): Source ObjStore Creds from Env in Tekton Template (#1259) 2023-08-22 21:38:45 +00:00
Tommy Li 5007b6bab5
chore(release): kfp-tekton 1.8 release patch and tekton 0.50.1 patch (#1331) 2023-08-22 21:01:09 +00:00
Tommy Li 2f99e8bad3
fix(pipelineloop): update pipelineloop v1 bug (#1330) 2023-08-18 18:16:49 +00:00
Tommy Li 73fb66aa93
chore(test): update ci script (#1329)
* update ci script

* update pipelineloop test for ci
2023-08-17 20:29:38 +00:00
Tommy Li f8741801cb
chore(build): Update .readthedocs.yml to build.os format to avoid warning. (#1328) 2023-08-16 18:19:04 +00:00
Tommy Li 746c570e53
feat(sdk): add tekton pipeline config into sdk client function (#1327) 2023-08-16 16:05:05 +00:00
Tommy Li dc413a30b9
chore(docs): Update dead links and v2 information (#1325)
* Update dead links and v2 information

* Update README.md
2023-08-15 20:50:03 +00:00
Tommy Li dc7dcdd2de
feature(backend): update backend to be backward compatible with old status (#1324) 2023-08-15 15:14:03 +00:00
Tommy Li 136e7a9352
feature(pipelineloop): Tekton v1 migration pipelineloop (#1312)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme

* migrate backend services to tekton v1 api

* add v1beta1 pipelinerun conversion for backward compatibility

* fix new python lint

* add new function to cover basic pipeline conversion

* update prune script to get more resources

* migrate sdk generation code to tekton v1 only

* migrate pipelineloop to tekton v1 pipeline spec
2023-08-03 16:04:20 +00:00
Tommy Li 213a50874d
feature(sdk): Tekton v1 migration sdk (#1311)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme

* migrate backend services to tekton v1 api

* add v1beta1 pipelinerun conversion for backward compatibility

* fix new python lint

* add new function to cover basic pipeline conversion

* update prune script to get more resources

* migrate sdk generation code to tekton v1 only
2023-08-02 21:54:18 +00:00
Tommy Li fde417c41a
feature(backend): Tekton v1 migration (#1309)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme

* migrate backend services to tekton v1 api

* add v1beta1 pipelinerun conversion for backward compatibility

* fix new python lint

* add new function to cover basic pipeline conversion

* update prune script to get more resources

* update status to store in annotations
2023-08-02 18:31:40 +00:00
Tommy Li 933f4afe81
backend(chore): update backend to tekton 0.50 (#1297)
* update backend to tekton 0.50

* update manifests folder

* update cache server and readme
2023-08-01 15:17:08 +00:00
Tommy Li 05617f3f86
chore(readme): Add kfp-tekton v2 branch link (#1305)
* add kfp-tekton v2 branch link

* Update README.md
2023-07-27 18:23:51 +00:00
Tommy Li 77e4754987
chore(samples): Add readme for prompt tuning samples (#1304) 2023-07-27 18:12:51 +00:00
Tommy Li 1f6ed5ee76
fix(ci): fix end to end github action test (#1300)
* test-ci

* trigger backend test

* update prune script

* revert test changes

* update comments
2023-07-26 20:24:33 +00:00
Tommy Li 4fe9611d2b
chore(release): add 1.7.1 backend release (#1295) 2023-07-21 23:34:26 +00:00
Greg Sheremeta 7afbf94753
fix(doc): fix typo tekton -> openshift (#1290) 2023-07-18 18:05:22 +00:00
Tommy Li 2b0e3adcd0
chore(docs): Update kfp-tekton openshift instructions for openshift v1.11 (#1278) 2023-07-10 23:34:07 +00:00
Tommy Li c007dced10
chore(sdk): Add sdk 1.7.1 release (#1267) 2023-06-27 21:24:15 +00:00
Tommy Li 7780820ce4
feat(sdk): Parametrize Tekton condition "task" image name (#1265)
* make condition task parametized

* update docs
2023-06-26 22:41:14 +00:00
Tommy Li 691f225cf4
fix(fronted): Fix Tensorboard.tsx endpoint (#1263) 2023-06-26 15:56:15 +00:00
Tommy Li 66a129c63f
feat(samples): add peft sample with modelmesh (#1258)
* add peft sample with kserve

* lint files
2023-06-20 21:08:40 +00:00
Yihong Wang 4377646b99
samples(fix): update lightweight component sample (#1257)
use `create_component_from_func` instead

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-06-20 20:07:40 +00:00
Tommy Li 7b916ced28
Upload huggingface demo for tutorial (#1256)
* Add files via upload

* Rename samples/prompt-tuning-demo.py to samples/huggingface-prompt-tuning/prompt-tuning-demo.py
2023-06-16 20:20:37 +00:00
Yihong Wang ca7514c33e
fix(test): update docker image for dind (#1254)
use new docker image for dind

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-06-15 18:40:36 +00:00
Tommy Li 64bf7dba36
Fix(backend): Upgrade Tekton 0.47.1 to address timeout bug (#1253)
* upgrade tekton 0.47.1 to address timeout bug

* upgrade tekton 0.47.1 to address timeout bug
2023-06-08 22:35:58 +00:00
Tommy Li 032db82204
chore(docs): Update kfp_tekton_install.md with new compatibility map (#1249) 2023-05-30 21:25:04 +00:00
Tommy Li f8ebbd47b3
feat(backend): Optimize persistence agent to pass yaml directly to db and minio client (#1244)
* optimize persisten agent to pass yaml directly to db and minio client

* add legacy mode for users to revert back to old kfp behavior
2023-05-24 19:59:19 +00:00
Yihong Wang 3ca939f162
feat(backend): Use Informer in persistence agent (#1247)
use SharedInformer to optimize the query performance

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-05-24 19:58:20 +00:00
Tommy Li 1188e82d7c
chore(sdk): bump kfp sdk version to 1.8.22 (#1241) 2023-05-17 22:54:06 +00:00
Tommy Li 95e23f723a
fix(manifests): Update manifests to make it work on both k8s and openshift (#1239)
* refactor manifests to separate k8s and openshift deployment

* update install readme

* fix lint
2023-05-15 17:33:31 +00:00
Tommy Li 76181ac287
chore(release): Add KFP-Tekton 1.7.0 release files (#1237)
* add 1.7.0 release files

* fix readme
2023-05-11 21:30:30 +00:00
Tommy Li de76970835
chore(release): Add 1.6.6-backend release (#1236) 2023-05-08 18:02:55 +00:00
Tommy Li 48d983b7d0
fix(sdk): Update wait_for_run_completion function to handle tekton status; (#1234)
* update wait_for_run_completion function to handle tekton status;

* optimize status check
2023-05-05 19:08:24 +00:00
Tommy Li 63e1e76a1e
fix(sdk): Update kfp version to fix appengine bug (#1235)
* update kfp version to fix appengine bug

* update tests
2023-05-05 18:40:24 +00:00
Tommy Li a6029c73f0
feat(manifests): opt-out sidecar injection feature flag to increase performance (#1230) 2023-05-05 17:43:24 +00:00
Tommy Li 08819b551f
fix(manifests): fix unsaved kustomization.yaml for openshift pipelines (#1229) 2023-05-04 17:49:45 +00:00
Tommy Li e7b5231c9a
fix(backend): Fix global cache flag (#1224)
* fix global cache flag

* update deprecated flag name
2023-05-03 23:03:45 +00:00
Tommy Li 1c1db1d320
feat(backend): Add metadata field to scheduled workflow (#1221)
* add metadata field to scheduled workflow

* add backend changes

* downgrade urilib3 from to 1.x
2023-05-03 20:28:27 +00:00
Yihong Wang 2dc9314d8d
fix(backend): add license files (#1222)
Add license/version files to modules under
tekton-catalog directory, including:
- pipeline-loops
- cache
- objectstore

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-05-02 22:30:14 +00:00
Tommy Li 5ce5507434
feat(manifest): Add openshift pipelines kustomize integration (#1220)
* update deployment manifests to work well on the latest openshift pipelines

* fix readme

* add unsaved files
2023-05-02 20:20:44 +00:00
Yihong Wang 030a7c9a12
fix(test): update ibmcloud command (#1217)
remove depreciated options for ibmcloud CLI
2023-04-27 18:23:39 +00:00
Tommy Li 08044c99fe
chore(docs): Remove inactive community links (#1218) 2023-04-27 18:13:39 +00:00
Tommy Li c72f05d93d
fix(backend): Fix integration test template object bugs (#1216) 2023-04-26 22:34:35 +00:00
Tommy Li 2d7cc156d1
feat(backend):Update backend to support Tekton 0.47 (#1215)
* initial tekton 0.47 patch

* update licenses

* update pipelineloop deps to tekton 0.47
2023-04-25 20:46:45 +00:00
Tommy Li 1bca3555ce
chore(release): Add backend 1.6.5 release (#1214) 2023-04-24 21:55:44 +00:00
Tommy Li 6c5f7f7714
chore(requirements.txt): Remove unused lock files to better scan security vulnerability (#1213) 2023-04-24 18:31:44 +00:00
Tommy Li 98023e4aec
chore(manifests): update mysql log config to align with upstream (#1212) 2023-04-24 16:00:44 +00:00
Yihong Wang 92a09f1fa5
fix(backend): Use childReference instead of taskRuns (#1211)
After tekton 0.45. status.taskRuns will be removed.
Switch to status.childReferences since we can use
them to get all taskRuns and their pipelineTaskNames.
2023-04-19 17:18:55 +00:00
Tommy Li 4597d98715
chore(deps): Update go deps to fix high cves (#1210)
* update go deps to fix high cves

* update license
2023-04-18 23:13:54 +00:00
Tommy Li 647d6b9003
chore(samples): Update data passing samples to a more meaningful folder (#1209) 2023-04-14 20:56:18 +00:00
Tommy Li b0875fac5a
fix(python): Remove python 3.7 support (#1207)
* remove python 3.7 support

* update broken links
2023-04-12 21:55:42 +00:00
Tommy Li 2fdcd81947
fix(sdk): Update sdk to 1.6.5 with new kfp 1.8.20 to address kubernetes cves (#1206)
* update sdk to 1.6.5 with new kfp 1.8.20 to address kubernetes cves

* fix tests to use the latest k8s version
2023-04-11 19:45:55 +00:00
Tommy Li 57d2847bd5
chore(samples): Remove deprecated samples (#1205) 2023-04-11 18:40:54 +00:00
Tommy Li 446e8d751b
chore(cleanup): Remove deprecated api directory (#1204) 2023-04-11 18:38:55 +00:00
Tommy Li e0aecb9bfc
chore(release): Add backend 1.6.4 release (#1203) 2023-04-10 21:01:54 +00:00
Tommy Li 77ebb59f65
fix(deps): Tekton 0.44.2 patch and dependencies update (#1202)
* tekton 0.44.2 patch and dependencies update

* update license and missing tekton package dependencies
2023-04-08 05:54:51 +00:00
Tommy Li d5eb860720
feat(CI): Create periodic codeql code scan to detect possible static bugs (#1201)
* Create codeql.yml

* Update codeql.yml

add comments
2023-04-05 01:12:02 +00:00
Yihong Wang 4764448e76
fix(test): enhance build scripts (#1200)
* increase retry for many edge task

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* fix(test): update the logic of waiting pods

Don't fail the script if there is no pod after the deployment.
Instead, do the check again after the sleep.

* test(fix): randomize the pipeline name

use randomized pipeline name to support retry

---------

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-04-03 20:21:00 +00:00
Tommy Li 1f24ba5bb9
fix(sdk): Remove print statement for compiler to reduce log size (#1198) 2023-03-30 20:04:05 +00:00
Tommy Li 89ccf0bf39
chore(docs): Update kfp-tekton openshift instruction to also include Tekton SCC (#1197) 2023-03-24 15:59:23 +00:00
Tommy Li 70533aab4b
chore(release): Add 1.6.3 backend and 1.6.4 sdk release (#1196)
* add 1.6.3 backend and 1.6.4 sdk release

* add 1.6.3 release yaml
2023-03-22 19:53:43 +00:00
Yihong Wang fd1f6e53ab
fix(test): update GH action and toolchain task (#1195)
update the GH action script to use default kustomize 5.0.0.
update the toolchain task to use newer image as well.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-22 15:37:42 +00:00
Tommy Li 0c91fc1df7
fix(backend): Fix metadata writer dependencies (#1193)
* fix metadata writer dependencies

* fix metadata writer dependencies
2023-03-21 19:15:42 +00:00
Yihong Wang 21eaa6df5d
feat(manifests): use kustomize v5 (#1194)
* use kustomize v5

remove deprecated fields in manifests, including:
- base
- vars
- patchesStrategicMerge
- patchesJson6903

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update manifests for all envs

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

---------

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-21 19:02:43 +00:00
Yihong Wang 819005040e
fix(test): fix typo in the toolchain task (#1190)
fix the typo of one of the params in the deploy task

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-17 16:13:08 +00:00
Tommy Li 82793c595a
chore(tekton-catalog): Remove legacy v1alpha1 condtion since it is no longer supported (#1188) 2023-03-16 16:33:28 +00:00
Yihong Wang 7414106c1d
fix(test): update toolchain image/script (#1189)
update the toolchain image and update the script to
be able to specify the url of public container registry
and scripts

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-03-16 16:17:29 +00:00
Tommy Li 3b006b98b4
fix(sdk): Update aipipeline images to quay (#1186) 2023-03-16 08:37:28 +00:00
Tommy Li d8d6e4a4ad
chore(docs): Update docs on custom task parameters (#1187) 2023-03-15 20:35:11 +00:00
Tommy Li b7a4c03b40
chore(release): Add 1.6.2 backend release, 1.6.3 sdk release (#1184)
* add 1.6.2 backend release, 1.6.3 sdk release

* add release yaml

* update image to quay

* remove tests
2023-03-14 22:52:10 +00:00
Tommy Li c595c32b79
fix(sdk): fix v1 api package typo (#1180) 2023-03-10 22:07:21 +00:00
Tommy Li b766fb4751
fix(sdk): fix v1 api sdk client package bug (#1179) 2023-03-09 17:43:09 +00:00
Tommy Li d196a94d5e
feat(sdk): make metadata component spec gen flag configurable on pipeline level (#1177) 2023-03-06 17:15:37 +00:00
Tommy Li 4e5a49e01f
chore(cleanup): Remove unnecessary cloud build files from google and travis (#1178) 2023-03-02 22:47:00 +00:00
Tommy Li e069b75a37
chore(manifests): upgrade mysql image to kfp v2 standards for cves mitigation (#1175) 2023-03-02 16:47:04 +00:00
Tommy Li ce4ce6f203
Fix(any-sequencer): Make any sequencer able to detect child reference status (#1172)
* Make any sequencer able to detect child reference status

* update test script
2023-02-28 17:47:02 +00:00
Yihong Wang f4298068c5
fix(test): call kfp api instead of kube api (#1171)
Use kfp api to get run details instead of kube api

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-02-27 23:35:40 +00:00
Tommy Li 40a52874b6
chore(manifests): Update embedded status to minimal (#1168) 2023-02-24 17:22:25 +00:00
Yihong Wang 6fe856aa67
fix(backend): proper handle customrun status (#1167)
When setting embedded-status feature flag to minimal,
customrun could have either customRef or customSpec where stores the
kind information. Improve the logic to handle the customruns' statuses
in these cases.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-02-23 15:58:24 +00:00
Tommy Li a04cdae330
chore(sdk): update package dependency source (#1166)
* update package dependencies

* fix lint
2023-02-21 17:42:40 +00:00
Tommy Li 7210555e8f
Chore(release): Add 1.6.1 release patch (#1165)
* add 1.6.1 release patch

* add release yaml
2023-02-20 22:46:39 +00:00
Tommy Li 034d427d38
Chore(backend) update kube and python dependency for backend (#1162)
* update kube and python dependency for backend

* update kind dep
2023-02-20 18:43:39 +00:00
Yihong Wang ac757b44c0
fix(backend): Add the permissions to access customruns (#1161)
Add the permissions to access customruns for the following
roles/clusterroles:
- ml-pipeline
- ml-pipeline-persistenceagent-role
- ml-pipeline-scheduledworkflow-role

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-02-17 21:56:36 +00:00
Tommy Li 1816fc14db
chore(test): add python 3.10 into github actions (#1160)
* add python 3.10 into github actions

* update param to string to prevent github trimming
2023-02-16 17:56:06 +00:00
Tommy Li dbbcd94d4a
chore(release): Update release version to 1.6.0 (#1157) 2023-02-15 22:29:05 +00:00
Tommy Li 1de489cf2c
fix(sdk): optimize find item search for large amount of params (#1159) 2023-02-15 22:24:05 +00:00
Tommy Li 07f3cf7cfc
feat(sdk): Add task workspace support (#1153)
* add task workspace support

* fix lint and docs

* remove unnecessary workspaces annotations
2023-02-13 23:29:24 +00:00
Prashant Sharma cdfdcdd0f1
Feat(pipelineloop-cli): support customruns validation as well. (#1152)
* Feat(pipelineloop-cli): support customruns validation as well.

* fix seg fault.

* fix seg fault.
2023-02-10 17:58:03 +00:00
Tommy Li 464197b4c9
feat(sdk): Add pipeline workspace config (#1151) 2023-02-10 13:18:01 +00:00
Tommy Li ea5821fc68
chore(manifests): Fix broken github link (#1147) 2023-02-07 19:08:32 +00:00
Tommy Li 850e086469
Chore: Update readme and license headers (#1146)
* update readme and license header

* update readme and license header

* update license headers
2023-02-06 22:49:31 +00:00
Tommy Li e4d837ad9b
feat(sdk): Add env support for podtemplate in sdk (#1145)
* add env support for podtemplate in sdk

* update readme usage

* address comment
2023-02-06 21:59:31 +00:00
Tommy Li 37f45c0dde
chore(backend): Update backend to Tekton 0.44 (#1144)
* update backend to tekton 0.44

* update license
2023-02-06 20:10:01 +00:00
Tommy Li fd17202e7c
fix(pipelineloop): Update security context with group id (#1142)
* Update 500-webhook.yaml

* Update 500-controller.yaml
2023-02-02 22:33:01 +00:00
Prashant Sharma 8897d88a97
Feat(pipelineloop): Migration guide for migrating from v1alpha1.Run t… (#1141)
* Feat(pipelineloop): Migration guide for migrating from v1alpha1.Run to v1beta1.CustomRun.

* Update guides/Custom-run-migration-guide.md

---------

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2023-02-02 17:21:30 +00:00
Tommy Li 1fc5d56d50
fix(tests): update e2e tests with new api commands (#1140) 2023-02-01 21:30:29 +00:00
Prashant Sharma cdbe855444
Custom run migration for examples of PipelineLoop custom task. (#1139)
* wip

* Fixed examples to run with tekton v0.44.0

* Fixed test
2023-02-01 19:32:32 +00:00
Prashant Sharma 4637dcb1e3
Feat: switch to v1 API paths for backend. (#1121)
* switch to v1 API paths for backend.

* python api update.

* updated deployment to query v1

* generated swagger api for frontend and replaced previous Api with V1.

* changed remaining API -> V1 api

* Generated python api client

1. from modified swagger definition and python generator script.
2. Also deleted old python client files.

* Updating the requirements to import the kfp-tekton-server-api package containing the V1 API.

* fixed the lint

* add sdk v0 api patch

---------

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2023-02-01 10:12:30 -08:00
Tommy Li aca2280383
Fix(pipelineloop): Manifest flag patch and api version change for pipelineloop (#1137)
* update custom task default version

* update new rbac for customrun

* update owner reference
2023-01-27 22:52:03 +00:00
Tommy Li 5512f1bfb2
Revert "Revert "Feat(pipelineloop): upgrade to tekton version 0.43.2, tekton v1beta1 customRun ."" (#1136)
This reverts commit b3579dd88c.
2023-01-27 17:52:11 +00:00
Yihong Wang 7a45ca124a
fix(backend): Add format arg to printf command (#1135)
add format arg to printf command to avoid cached string
being interpreted.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-01-27 00:17:10 +00:00
Tommy Li 9720b3f534
fix(sdk): add command to resource op spec (#1134) 2023-01-26 21:54:10 +00:00
Tommy Li d37f21d713
chore(release): add 1.5.1 release patch (#1133) 2023-01-26 18:51:10 +00:00
Yihong Wang a7db1c189e
fix(backend): Add securitycontext for k8s 1.25 (#1132)
For k8s 1.25, a securityContext definition is needed for a pod.
Add proper security context to pipelineloop controler and webhook

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2023-01-26 18:11:10 +00:00
Tommy Li b3579dd88c
Revert "Feat(pipelineloop): upgrade to tekton version 0.43.2, tekton v1beta1 customRun ." (#1131)
This reverts commit 3425f03a4e.
2023-01-26 01:47:09 +00:00
Tommy Li 95f0abeada
chore(sdk): Update sdk comments (#1128)
* update sdk comments

* fix lint
2023-01-24 21:35:52 +00:00
Tommy Li 218a608ef8
chore(sdk): Update sdk requirement packages (#1127) 2023-01-24 18:35:25 +00:00
Prashant Sharma 3425f03a4e
Feat(pipelineloop): upgrade to tekton version 0.43.2, tekton v1beta1 customRun . (#1119)
* Tekton v1alpha.Run to v1beta1.CustomRun

* support both runs and customruns
2023-01-24 17:07:25 +00:00
Tommy Li 33fa86d314
chore(backend): Update Tekton version to 0.43.2 (#1123)
* tekton 0.43 patch

* update licenses

* update licenses

* update readme and tekton install
2023-01-18 08:23:22 +00:00
Prashant Sharma 2aa67806ed
Feat: Add V1 endpoint support. (#1085)
* wip: applying patch1

* generating v1 APIs

* code gen

* delete patch reject fiiles.

* WIP: creating both v1 and v1beta1 endpoints.

* removed v1beta1

* updated-license
2023-01-12 17:13:01 +00:00
Tommy Li 08da8cc771
chore(release): Add 1.5.0 release patch (#1118) 2023-01-10 23:42:15 +00:00
Tommy Li 3a20c43dd1
chore(release): Add 1.4.1 release patch (#1117)
* add 1.4.1 release patch

* update kubectl-wrapper to latest to avoid image version conflicts
2023-01-10 21:45:15 +00:00
Tommy Li 1ec34bfd95
feat(backend): Update db table config with Primary key to improve db read performance (#1116) 2023-01-09 22:48:57 +00:00
Tommy Li 466cde61bf
fix(sdk): Update custom task arg parsing for dash arguments (#1114) 2023-01-09 20:26:57 +00:00
Tommy Li fb32672585
feat(sdk): add pipeline_conf.timeout sdk support (#1108)
* add pipeline_conf.timeout sdk support

* update unit test script to take kfp config object
2022-12-14 20:22:21 +00:00
Yihong Wang 05baded0ef
feat(backend): specify the image for result step (#1104)
Allow users to specify the image for `step-move-all-results-to-tekton-home`
step. Add an env variable for the custom image: `MOVERESULTS_IMAGE`
in the api server.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-12-13 19:08:29 +00:00
Tommy Li d39886de10
chore(sdk): update sdk version to 1.4.1 (#1093) (#1106) 2022-12-13 19:04:29 +00:00
Tommy Li e6027dd14c
fix(sdk): fix nested param logic for pl (#1091) 2022-11-29 21:31:15 +00:00
Tommy Li 0ab85f9762
fix(sdk): fix nested param generation issue (#1089)
* fix nested param generation issue

* change logic to only apply to parent counter
2022-11-28 22:17:42 +00:00
Tommy Li a7f2b04cf0
fix(sdk): Fix inline spec for iterateParamPassStyle and itemPassStyle (#1087)
* cheerypick(sdk): Fix counter param cherrypick (#1080) (#1082)

* fix nested loop counter param bug

* Update _tekton_handler.py

* fix inline spec for new features

* Update _tekton_handler.py
2022-11-21 18:50:13 +00:00
Tommy Li 1a70d222f0
chore(v2): cleanup old v2 content from v1 branch (#1083) 2022-11-15 20:11:27 +00:00
Tommy Li 57676731e3
fix(sdk): fix nested loop counter param bug (#1080)
* fix nested loop counter param bug

* address comments
2022-11-14 23:06:27 +00:00
Tommy Li 6117df5a7e
update tekton version to 0.41 (#1070)
* update tekton 0.41

* Update Dockerfile

* update license

* update license

* update tekton 0.41 manifests

* update kind kustomize

* update kind kubectl ver

* Update kfp-tekton-unittests.yml

* Update kfp-tekton-unittests.yml
2022-11-14 21:18:27 +00:00
Prashant Sharma 5c20b80f05
chore(PipelineLoop): upgrade to tekton v0.41.0 (#1072)
* upgrade to tekton v0.40.0

* fix tests
2022-11-11 21:42:23 +00:00
Tommy Li 1ba195e4dd
fix(sdk): add default labels for caching (#1078) 2022-11-11 21:39:23 +00:00
Tommy Li 2ec7b6e988
fix(bug): fix underscore param by checking pipeline param instead of task param (#1077)
* fix underscore param by checking pipeline param instead of task param

* update test cases
2022-11-11 17:15:51 +00:00
Tommy Li b4e322aa66
fix(sdk): fix nested loop underscore param (#1076) 2022-11-09 21:32:59 +00:00
Yihong Wang 880f37e775
update toolchain image (#1071)
update pkg, utilities, commands to newer version, including:
- base image ubuntu:20.04
- kubectl 1.25.3
- ibmcloud
- node 18.6
- go v1.19.2

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-11-02 23:16:03 +00:00
Tommy Li a517d08c24
chore(CI): Update go version in CI to 1.19.x (#1069) 2022-11-02 21:11:02 +00:00
Tommy Li 93f726f60e
chore(docs): Move loop to custom feature since it's not Tekton native (#1067) 2022-10-28 17:19:47 +00:00
Tommy Li de32d586a6
fix(sdk): update python protobuf package for security update (#1066) 2022-10-28 17:11:46 +00:00
Tommy Li 25d116603d
feat(backend): add new annotation flags to enable artifact tracking at pipeline and task level (#1065)
* add new annotation flags to enable artifact tracking at pipeline and task level

* fix artifact metrics bug for new tekton version

* fix path bug

* fix path bug

* fix path bug

* fix path

* fix feature flag

* add feature doc
2022-10-28 17:10:47 +00:00
Tommy Li f1ed822920
chore(OWNERS): Remove Animesh as reviewer (#1064) 2022-10-26 21:13:20 +00:00
Tommy Li c49a005304
chore(release): add 1.4.0 release changes (#1063) 2022-10-26 21:12:18 +00:00
Tommy Li 96e573ecf6
fix(sdk): update loop dsl to handle same argument in nested loop (#1052)
* update loop dsl to handle same argument in nested loop

* fix lint

* fix sdk dependency
2022-10-24 21:43:12 +00:00
Prashant Sharma 98a2332cc0
fix(pipelineloop): caching should include the params for making caching key. (#1056)
* fix(pipelineloop): caching should include the params for makeing caching key.

* Get params from run spec itself.

* Migrated cache for custom task controllers to gorm v2.

* code cleanup.

* Added retry for cache connect until timoeut.

* improved tests to be able to detect config maps. Better error reporting.
2022-10-24 21:39:12 +00:00
Tommy Li 9b9b932ae0
chore(sdk): Remove unnecessary code (#1062) 2022-10-20 23:01:10 +00:00
Tommy Li 7e8d5ade7c
feat(sdk): add iterateParamPassStyle and itemPassStyle params to loop (#1059)
* add iterateParamPassStyle and itemPassStyle params to loop

* update new fields to store in extra configs

* update dsl to take extra_fields as new arguments

* make extra field values configurable

* update error message
2022-10-20 16:21:33 +00:00
Tommy Li d0a0706eaa
feat(sdk): add custom task param support in command spec (#1061) 2022-10-19 21:21:31 +00:00
Tommy Li d005b7bad6
fix(manifests): Patch cache config and break ct to default pl deployment (#1057) 2022-10-12 00:15:00 +00:00
Yihong Wang 11ff90e43a
feature(backend): Add QPS and Burst settings (#1055)
Add QPS and Burst settings for the RestClient
in api-server and tekton-pipelines-controllers.
Set both values to 50 for a medium workload.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-10-07 17:24:25 +00:00
Tommy Li 1f3093c9d0
Update kfp-sdk to 1.8.14 (#1050) 2022-10-05 20:33:22 +00:00
Yihong Wang 5a96b111c1
test: Add a test case for the validation (#1051)
Add a test to verify the performance of webhook
validation. huge amount of edges in DAG causes
a performance issue in tekton earlier. adding
this test case to avoid the performance degradation.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-09-30 20:33:55 +00:00
Prashant Sharma 30eb17811a
feat(pipelineloop): Support last_idx and last_elem (#1044)
* WIP: Support last_idx and last_elem

* WIP: SDK changes to support last-idx variable.

* fix

* fix sdk param

* fix controller to correctly update results

* fix tests

* fix unit tests and lint

* fix test cases to be executable (#5)

* fix unit tests and lint

* fix test cases to be executatble

* removed unused code and added test

* fix test

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2022-09-30 16:15:55 +00:00
Tommy Li 69b84e84fb
fix(backend): Update Tekton to 0.38.4 (#1049)
* update tekton to 0.38.4

* update tekton to 0.38.4
2022-09-28 23:30:20 +00:00
Tommy Li 42a433e88e
fix(sdk): Fix nested numeric parameters (#1047)
* fix nested numeric parameters

* fix conflict
2022-09-19 23:54:31 +00:00
Tommy Li 33e3ab2a82
feat(sdk): Remove timeout since we moved to default unlimited timeout (#1043)
* remove timeout since we moved to default unlimited timeout

* remove old global flag

* fix python lint
2022-09-09 19:29:30 +00:00
Yihong Wang bbd72037b0
feat(backend): handle pipelineloop status (#1039)
* handle pipelineloop status

When using `embedded-status: minimal`, the nested pipelineloop status
is missing. Add logic in persistence agent to retrieve runs and taskruns
status for nested pipelineloop.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Add an arg to specify the kinds

Use an arg to specify the kinds which contain
childReferences information. Persistence agent
uses this list to retrieve taskrun/run status
and embedded them into the final PipelineRun yaml.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-09-01 17:43:56 +00:00
Tommy Li b3c1ec417c
Update tekton default timeout in global config (#1042) 2022-08-30 21:49:55 +00:00
Yihong Wang 37efcf09c9
fix(backend): add check for casting (#1040)
Add check for type cast. make sure the watcher
receives Pod event.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-08-29 20:26:53 +00:00
Tommy Li 51e7623c6b
optimize(sdk): Further moving common caching annotations to pipeline level (#1038) 2022-08-26 16:08:50 +00:00
David Wang 5e6e4cf36e
do not mark failed until all running PRs are done (#1037) 2022-08-25 16:07:09 +00:00
Tommy Li 24afc390b6
Add finally context var test case for #1035 (#1036) 2022-08-23 21:09:05 +00:00
Michalina Kotwica 2900f9e772
fix(sdk) include finally tasks in placeholder indir (#1035) 2022-08-23 16:27:25 +00:00
Tommy Li 8dbce394f4
Update kfp-tekton release to 1.3.1 (#1033) 2022-08-22 22:07:25 +00:00
Tommy Li aa3366f28d
Update pipelinerun timeout format to timeouts (#1031) 2022-08-22 19:24:30 +00:00
Tommy Li 5f2e4a3e12
feat(sdk): Add alpha env flag to opt out artifact tracking annotations (#1032)
* add alpha env flag to opt out artifact tracking annotations

* reduce annotations further
2022-08-19 16:53:54 +00:00
Tommy Li 9df587341e
optimize(sdk): Rewrite data passing script into more optimized code (#1029) 2022-08-16 23:04:50 +00:00
Tommy Li 71f1cc1da6
sdk(feat): add metadata support for loop (#1028) 2022-08-15 22:49:05 +00:00
Tommy Li 364d10fb3f
Remove google and argo specific tests to reduce repo size (#1027)
* remove google and argo specific tests

* update removed links
2022-08-15 17:51:33 +00:00
Yihong Wang b2971bf50c
Add test cases for sequence and cache (#1026)
add two test cases to verify
- taskruns/runs execution sequence
- caching function

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-08-15 16:15:34 +00:00
Tommy Li 68bf69758f
Update tekton to 0.38.3 to resolve pipeline cancel issue with custom task (#1025)
* init commit for tekton-0.38.3 dependency

* update tekton 0.38.3 changes

* update pipelineloop to remove old Tekton alpha resources

* update pipelineloop to remove old Tekton alpha resources

* update pipelineloop to remove old Tekton alpha resources

* update missing licenses due to local network issue

* fix step injection bug for custom task
2022-08-12 19:36:54 +00:00
Tommy Li 5d46f229a9
KFP 1.8.4 Rebase (#1024)
* KFP 1.8.4 Rebase

* KFP 1.8.4 Rebase

* KFP  Rebase

* KFP  Rebase

Co-authored-by: kfp-tekton-bot <65624628+kfp-tekton-bot@users.noreply.github.com>
2022-08-11 16:39:03 +00:00
Tommy Li 1abffb662f
optimize(sdk): Move common task labels to pipeline labels to reduce yaml size (#1020)
* move common task labels to pipeline labels

* move common task labels to pipeline labels

* update new python lint requirements
2022-08-03 16:17:42 +00:00
Tommy Li d0079acbe0
Update s3 csi deployment (#1019) 2022-07-29 21:46:45 +00:00
Tommy Li 42408909ad
fix(backend): fix cache server output (#1018) 2022-07-29 00:11:43 +00:00
Tommy Li ea126bf2fa
fix(sdk): fix unit test merging conflicts (#1017) 2022-07-28 21:02:57 +00:00
Tommy Li 144f9e9881
Add kfp-tekton 1.3.0 release template (#1011) 2022-07-28 20:38:55 +00:00
Tommy Li b219b2ee1e
fix(sdk): Add new logic to cover new tekton runafter condition (#1016)
* add new logic to cover new tekton runafter condition

* fix loop condition error
2022-07-28 20:30:55 +00:00
Tommy Li 909c4f2953
fix(sdk): fix exit handler param with empty exit task (#1015) 2022-07-27 19:18:11 +00:00
Tommy Li eaac53de79
Update result name to be case sensitive (#1013)
* update result name to be case sensitive

* add custom task test case
2022-07-26 21:03:16 +00:00
Tommy Li e2aa560a4c
Add alpha feature to output big data passing file path instead of task run name (#993)
* stage

* regenerate tests

* regenerate tests

* update comments

* resolve conflicts

* fix immediate bug

* parametize path suffix name
2022-07-19 22:20:20 +00:00
Tommy Li 26b445551b
fix(sdk): Fix big data passing with multiple type of volume templates (#1006)
* fix big data passing with multiple type of volume templates

* fix lint and license

* fix multi input step bug

* refactor update volume code

* add missing tests
2022-07-19 17:53:51 +00:00
Federica Filippini a9f148343b
fix(sdk): Support passing directories in step-copy-results-artifacts and step-copy-artifacts. Fixes #996 (#997)
* Added support to directories in step-copy-results-artifacts script

Check if the provided result (src) is a directory; if so, create a tar.gz archive and compute the ARTIFACT_SIZE on {src}.tar.gz.
Moreover, print on {dst} file the list of files in {src}.tar.gz (namely, the result of `tar -tzf {src}.tar.gz`) instead of copying directly {src} into {dst}

* Add support to directories in step-copy-artifacts script

Added check [ -d "$2" ] to allow directories to be transmitted as artifacts

* Update unit tests

* Update sdk/python/kfp_tekton/compiler/_data_passing_rewriter.py

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

* reducing script size

* generated tests

* removed whitespace identified by lint test

* fixed error (missing suffix)

* generated tests

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2022-07-19 17:51:49 +00:00
Tommy Li 8d694c01a2
Remove old build dependencies (#1005) 2022-07-19 15:51:49 +00:00
Tommy Li 7d9f8c9a30
Add alpha preview feature into compiler (#991)
* add alpha preview feature into compiler

* address comments

* fix typo
2022-07-18 17:07:09 +00:00
Ricardo Martinelli de Oliveira 852b9ac2af
chore(apiserver): Object Storage connection abstraction (#977) 2022-07-14 21:10:34 +00:00
Tommy Li 126021b9d0
sdk(feat): Add big data passing with custom volume on kfp-tekton (#995)
* add big data passing with custom volume on kfp-tekton

* fix vscode typos and address errors

* Update compiler.py
2022-07-13 21:15:33 +00:00
Tommy Li 00339aa99f
Update sdk version to 1.2.3 (#1002)
* update sdk version to 1.2.3

* Update README.md
2022-07-12 21:26:02 +00:00
Alan Chin 243c15c87f
`chore(sdk) Bump kfp dependency to 1.8.13` (#999) 2022-07-12 17:03:31 +00:00
Yihong Wang 933945480e
eliminate the use of `taskSpec.script` (#992)
when using script, it generates an init container to process the script.
we'd like to avoid the init container in pipelinerun.
remove `script` and use `command` and `arg` instead.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-07-08 18:43:52 +00:00
Maximilien de Bayser bce9814651
fix(sdk): Verify binary data in step copy-results-artifacts. Fixes #984 (#985)
* Add binary character check to copy-results-artifacts

* Update unit tests
2022-07-06 19:46:45 +00:00
Tommy Li 52f06b66bf
feat(sdk): Add labels for artifact outputs when tasks are not referred (#967)
* add labels for artifact outputs when tasks are not referred

* do not generate noninline test

* add option to also do individual artifactoutput

* add option to also do individual artifactoutput

* remove all output to artifact label

* fix sorting for test

* fix upstream conflicts
2022-06-20 16:20:37 +00:00
Michalina Kotwica b899035336
fix(backend): nil ptr exception for empty run status in pipeline-loops, fixes #981 (#982) 2022-06-20 16:09:37 +00:00
Tommy Li 7c0cc3a365
Update sdk dependency and release version to address private python package pull (#979)
* update sdk dependency and releases

* update sdk dependency and releases
2022-06-17 22:32:34 +00:00
Yihong Wang 5fd2b73022
feature(backend): support embedded-status: minimal (#976)
* feature(backend): support embedded-status: minimal

Add logic to support embedded-status feature when using
minimal setting. For now, the TaskRun/Run status is retrieved
and inserted to PipelineRun.Status and stored into ml-pipeline
backend storage.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Add embedded-status feature flag

Use `full` as the default setting for embedded-status
feature flag.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-06-16 20:15:42 +00:00
Yihong Wang 66c09441a9
fix(sdk): update condition template (#975)
eliminate the use of `script`, replace it with
command and args. this also removes the init container that
prepares the script.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-06-07 22:11:15 +00:00
David Wang f043833b3f
make cache to support multiple containers in task pod (#974)
* support multiple containers in task pod

* support multiple containers in task pod
2022-06-07 16:04:31 +00:00
Tommy Li 0b00328838
fix(docs): Add explicit commands for Tekton versoned deployment to avoid doc con… (#973) 2022-06-07 14:57:30 +00:00
Tommy Li 58e029797c
fix(pipelineloop): Add param with empty space test (#970)
* add param with empty space test

* add param with empty space test
2022-06-06 13:15:29 +00:00
Yihong Wang 80739c0239
upgrade tekton to 0.36.0 (#938)
* upgrade tekton to 0.35.1

Upgrade tekton to 0.35.1, including:
- tekton manifests
- go.mod to use new pkg and update all related pkgs
- update pipeline-loops to also use new tekton pkg

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update tekton to 0.36

update tekton, license files, and cache server

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* fix compile error on test code

for taskrun's step, it doesn't use container data struct
any more

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update compiler

add `type` for the taskSpec.results and update all
yaml files accordingly

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-06-03 21:57:26 +00:00
David Wang 7e6cabce39
handle step from task results (#964)
* handle step from task results

* address review comments
2022-06-01 16:33:49 +00:00
Joanna 514da93c52
update install guide (#965)
* update install guide

* update
2022-05-31 20:41:11 +00:00
Rafał Bigaj 65e203c6ea
fix(pipeline-loops): PipelineLoop consumes significant amount of memory (#963)
Storing PipelineSpec and TaskSpec in PipelineRunStatus is a source of significant memory consumption and OOM failures.
PipelineSpec and TaskSpec seems to be redundant in this place.

Issue: https://github.com/kubeflow/kfp-tekton/issues/962
2022-05-27 18:23:13 +00:00
Tommy Li 566b587a9d
Add pipelinerun pod template support for security context and automou… (#955)
* add pipelinerun pod template support for security context and automount_service_account_token

* update flag to the correct type

* update flag to the correct type

* update flag to the correct type

* update usage docs
2022-05-27 17:34:13 +00:00
David Wang 6b3ef559ec
fix 954, trim whitespace and newline char for range input (#961)
* fix 954, trim whitespace and newline char for range input

* address review comments
2022-05-27 12:14:12 +00:00
Yihong Wang a84363c388
feat(test): add backend integration test (#956)
* [test] tryout kind on github

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* build images

build and use the images inside the kind cluster

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* remove unnecessary step

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* build multiple images in a script

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* check if any change for backend files

check changes for backend files and trigger the integration
testing if any.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-26 15:06:05 +00:00
Yihong Wang faa409aa61
fix(backend): refine copy-artifacts step (#942)
* fix(backend): refine copy-artifacts step

to avoid resullts duplication in copy-artifacts step, add a step
to move result files to /tekton/home/tep-results.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* disable eof strip if results are moved

when results are moved to /tekton/home/tep-results,
the eof strip shalln't be applied. disable it in
this case for now.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-24 21:14:33 +00:00
Yihong Wang ca3c417a53
feat(backend): update go version and license files (#949)
update the go version in Dockerfiles, go-license tool,
and license files. these changes come from upstream and
reduce the build time.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-24 21:13:33 +00:00
Tommy Li 00eaffc4ea
fix(sdk): Fix param prefix replacement (#952) 2022-05-24 15:36:57 +00:00
Tommy Li 9307b361fc
Update manifests with 1.2.1 release (#948) 2022-05-23 17:50:01 +00:00
Tommy Li 7a0e6bc3e9
Store the uploaded pipeline as original yaml format (#945) 2022-05-23 16:24:01 +00:00
David Xia 16157a021f
fix: typo in frontend message (#946) 2022-05-23 16:05:01 +00:00
David Wang 3ab41efc92
iteration limit support, default limit is 10000 (#944) 2022-05-20 16:11:25 +00:00
Prashant Sharma 27db2e348f
Loop ranges should handle from = to, and generate one iteration. (#941)
* Loop ranges should handle start = 0 and end  = 0, should generate one iteration.

* When from = to, then generate one iteration. emulating how enumerate works.

* Fixed edge cases.

* if from and to are unset, we should not pick defaults either.
2022-05-18 16:50:26 +00:00
Yihong Wang 8f034ffd9f
[build] update github workflow to use go v1.17 (#939)
the whole repo needs go v1.17 now.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-13 16:06:34 +00:00
Yihong Wang 3f1d589892
[build] update toolchain image (#937)
upgrade the golang to 1.17.6 and other
ibmcloud plugins

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-13 16:05:35 +00:00
David Wang 425d4be4ee
wait for pipelineloop-break-operation task final status (#936) 2022-05-12 16:11:04 +00:00
Yihong Wang 282353d3f9
feat(sdk): Add helper class AddOnGroup (#920)
* feat(sdk): Add helper class AddOnGroup

add a helper class for users to write a custom
OpsGroup which maps to a custom task in Tekton.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* move finally under pipelineSpec

`finally` shall be  under `pipelineSpec`.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Enhance params of AddOnGroup

make `params` argument of AddOnGroup class support
static and dynamic values.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Add post_param() api in AddOnGroup

Add `post_param()` api to update the params of
the task

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* Handle intermediate params

Create a dedicated API to create intermediate params
which are only used by downstream Ops/OpsGroup, but not
list in the spec.params

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* fix incorrect param value

properly handle params that are created
from AddOnGroups in downstream Ops

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* handle nested AddOnGroup case

An AddOnGroup can contains another AddOnGroup
as downstream OpsGroup. The group name of an AddOnGroup
needs to follow the correct name pattern to allow
the compiler to calculate the nested case properly.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* clean up params inside the taskSpec.pipelineSpec

only keep params that are used by downstream tasks in
taskSpec.pipelineSpec.params

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* support annotations and labels

Add properties in AddOnGroup to store annotations
and labels. Then add their key/value pairs to
metadata.annotation and metadata.labels.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* rename the annotation property to annotations

amend the property name to plural

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-05-10 17:51:07 +00:00
Tommy Li 1ef7b03af4
Update sdk release to 1.2.1 (#929) 2022-05-06 15:04:50 +00:00
Tommy Li ea1221a264
Fix multiple nested condition in loops issue (#928)
* fix multiple nested condition in loops issue

* update test cases to work with community cel custom task;
2022-05-04 20:58:33 +00:00
Michalina Kotwica 284891c8b0
fix(sdk) add test for any-seq inside of loop (#927)
* add test for any-seq inside of loop

* fix style: newline at the end of file

* recur when handling tekton pipeline variables

* update test name

* refactor adding a param

* add type to extracted task params inside loop

* fix style: no spaces in single-elem dict
2022-05-04 16:10:32 +00:00
Tommy Li c854c27108
Add necessary env variables for resource op owner reference (#925) 2022-04-29 19:52:00 +00:00
Tommy Li 7dd94985b3
Add resource op owner reference mapping (#922) 2022-04-28 16:27:32 +00:00
Tommy Li 8ba08e28ef
fix(SDK): Update sanitization logic to sync with kfp dsl (#919)
* map sanitization logic to original kfp

* update test data to reproduce sanitization bug

* fix upstream dsl bugs on undeterministic params on long names
2022-04-22 18:17:41 +00:00
David Wang 892bbaa8e6
Support the comma as default string param separator (#921)
* support the default string param separator comma

* support the default string param separator comma
2022-04-22 15:25:40 +00:00
Prashant Sharma c7dfca07a7
feat: Support loop ranges with -ve values. (#911)
* feat: Support loop ranges with -ve values.

* Test cases from github issue.
2022-04-15 17:02:40 +00:00
Yihong Wang b14f952ceb
fix(sdk): Apply node_selector in PipelineConf (#918)
* fix(sdk): Apply node_selector in PipelineConf

Apply the node_selector in PipelineConf to spec.podTemplate
instead of taskPodTemplate of each task.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* update Makefile under pipelineloop

when running cli, it should do update target first

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-04-13 19:46:53 +00:00
Tommy Li 51c5cdd3a7
fix(sdk): fix nested loop global param bug (#917)
* fix nested loop global param bug

* fix nested loop global param bug

* add license

* fix recursion bug
2022-04-13 15:44:54 +00:00
Yihong Wang eac02f0633
fix(sdk): handle the node selectors (#916)
Handle the node selector info from PipelineConf in
compiler. Node selector info in Op level could overide
PipelinConf.
settings.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-04-12 20:43:56 +00:00
David Wang f083d6f0f2
pass down the podTemplate and serviceAccount from run to loopSpec (#909) 2022-04-07 20:11:15 +00:00
Tommy Li 219f2d0904
Update Tekton client and manifests to v0.31.4 (#905)
* update tekton client to 0.31.4

* update tekton manifests

* add missing go mod files
2022-04-07 00:09:14 +00:00
Prashant Sharma e9f61fcdea
IterateNumeric behavior changed to contain the value of current interation item. (#904) 2022-04-04 23:32:36 +00:00
Yihong Wang 53fa9e7521
feat(sdk): extension function `Loop.enumerate()` (#901)
* feat(SDK): extension function `Loop.enumerate()`

Add helper function: `enumerate()` in Loop to
return indices and item. It could be used to get
current iteration number and item value.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* add logic to handle nested loop

Add logic to handle nested loop and test cases

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-04-01 22:30:05 +00:00
Tommy Li 9adc788bb4
Parameterize kubelet path for s3-csi manifests (#900) 2022-04-01 22:29:05 +00:00
Prashant Sharma 7b692b6def
feat: New variable iterationNumberParam to track current item. (#898)
* New variable iterationNumberParam to track current item.

* New variable iterationNumberParam to track iteration no.

* New variable iterationNumberParam to track iteration no.

* review comments
2022-04-01 22:22:05 +00:00
Tommy Li 82aea99741
Add missing csi plugin for multi-user manifests (#899) 2022-03-31 04:29:30 +00:00
Tommy Li aa90528bc8
Add missing openshift standalone permission (#897) 2022-03-29 16:47:22 +00:00
Prashant Sharma 06487acc90
fix(controller): Added fields serviceAccountName and PodTemplate to PipelineLoop. (#891)
* wip

* Added fields serviceAccountName and PodTemplate to PipelineLoop.

* review comment

* Update pipelineloop_types.go
2022-03-28 22:28:21 +00:00
Michalina Kotwica b85d4e146f
fix(sdk) add tests for non-literal separator (#896)
* add tests for non-literal separator

* plug-in the tests

* process 'separator' like 'from'-'step'-'to'

* explicitly allow PipelineParam as a separator
2022-03-28 16:21:51 +00:00
Tommy Li f4086039b2
Add yhwang as approver (#893) 2022-03-26 05:22:14 +00:00
David Wang a7f6c0a634
Add current iteration item to the PipelineLoopPipelineRunStatus (#894) 2022-03-25 18:36:12 +00:00
Prashant Sharma 6a9f88fa8e
fix: objectstore refactoring (#887) 2022-03-24 15:19:47 +00:00
Yihong Wang fed74c1bca
[test] Update mnist-e2e sample (#888)
Use KServe for the model serving

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-03-22 21:12:50 +00:00
Prashant Sharma 763d42c1d4
chore: Objectstore package refactoring. (#884) 2022-03-22 01:30:30 +00:00
Rafał Bigaj 41e50fa61a
fix(PipelineLoop): integer divide by zero (#883)
- Use default `step` value: `1` in case of `0` provided in `PipelineLoop`.

Issue: https://github.com/kubeflow/kfp-tekton/issues/882
2022-03-18 17:53:40 +00:00
Michalina Kotwica d5e2e43992
fix(sdk) add test for custom-tasks with literals. Fixes #880 (#881)
* add test for custom-tasks with literals

* fix orig_params check for custom tasks

* fix: style: min 2 spaces before inline comment

* fix: style: 2 empty line before function decl
2022-03-17 16:19:10 +00:00
Prashant Sharma 3ab7a40055
feat(objectstore) Metadata object store (#876)
* Objectstore v1

* Objectstore v1

* Objectstore v1
2022-03-15 23:11:36 +00:00
Tommy Li be74f6612d
Add 1.2 release changes (#875)
* add 1.2 release changes

* update install readme
2022-03-10 22:46:54 +00:00
Tommy Li b302354298
Patch kfp 1.8.1 new config (#872) 2022-03-09 20:24:11 +00:00
Prashant Sharma b00e8ccf82
Break task sdk update (#871)
* Update SDK to support break task as custom task.

* Added tests

* updated testdata
2022-03-08 22:24:10 +00:00
Tommy Li 94dbd2a65a
Add caching labels for inline loops and custom tasks (#870) 2022-03-08 00:25:28 +00:00
Tommy Li 119e6acf62
Update kfp sdk dependency to 1.8.10+ (#868)
* update kfp sdk dependency to 1.8.10+

* Remove local env details

* update upper bound kfp dependency to 1.8.11 for more stable releases
2022-03-04 18:06:59 +00:00
Prashant Sharma dc2eeff827
Feat(pipelineloop): Break task as custom task. (#854)
* wip

* break task status update

* failed to force update a run.

* wip

* Own a break task and then update it to succeed.

* removed unused code
2022-03-04 17:26:59 +00:00
Tommy Li 559f8b6df0
Kfp 1.8.0 api and frontend rebase (#862)
Co-authored-by: kfp-tekton-bot <65624628+kfp-tekton-bot@users.noreply.github.com>
2022-03-03 00:58:28 +00:00
Tommy Li 9843872b3a
Fix Tekton conditional dependency on sub-dag (#866)
* Fix Tekton conditional dependency on sub-dag

* fix tekton conditional dependency on group ops
2022-03-02 17:35:04 +00:00
Prashant Sharma 50eb94ae76
feat(pipelineloop): Add caching configuration support through config-map. (#861)
* caching flags

* fix tests

* fix the broken link for kustomize.
2022-03-01 20:02:03 +00:00
Michalina Kotwica 42cc312ad0
fix(sdk) add test for empty loops (#864)
* add test for empty loops

* treat no-tasks and tasks-list-empty the same

* add licenses
2022-02-28 17:07:37 +00:00
Yihong Wang 1997919632
[test] fix typo in pipeline-loop example (#860)
add double quotes to the value of `true`
2022-02-24 02:15:04 +00:00
Prashant Sharma 14c619598c
feat: caching for custom task. (#852)
* WIP: caching for custom task.

* Extracted cache as a standalone package.

* added go.mod

* fix tests

* feedback

* Added environment variable check, tests and refactored code.

* updated readme

* Apply suggestions from code review

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

* Update pipelineloop_types.go

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2022-02-22 19:26:24 +00:00
Tommy Li acd5d53717
Add KFP-Tekton 1.1.1 release (#847)
* add 1.1.1-release patch

* add 1.1.1-release patch

* update patche for openshift permission
2022-02-10 22:27:40 +00:00
Yihong Wang 293b5ca467
[test] replace the cache-server image (#848)
* [test] replace the cache-server image

Replace cache-server image while running testing

Signed-off-by: Yihong Wang <yh.wang@ibm.com>

* [test] Replace frontend image

Replace frontend image while running testing

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2022-02-10 18:04:16 +00:00
Tommy Li c0bf2e970e
Update manifests for multi-users k8s 1.22 support (#846) 2022-02-09 23:28:32 +00:00
Tommy Li 6166c7c2cc
Add loop numeric sdk support (#838)
* add loop numeric sdk support

* address comments

* fix comments

* add pipelineparam as input type for loop range function
2022-02-04 17:52:44 +00:00
Michalina Kotwica eab7b4c606
fix(sdk) separator param. Part of #791 (#842)
* m: typo in a comment

* remove separator param in loop spec

* update test
2022-02-01 17:53:59 +00:00
Tommy Li 0163d4c989
Remove unsynced components (#840)
* remove unsynced components

* add back filesystem components for running unit tests
2022-01-31 18:08:32 +00:00
Tommy Li ca81bf0c75
Add support to parse pipeline param in json string (#841)
* add support to parse pipeline param in json string

* update license
2022-01-27 19:47:11 +00:00
Tommy Li 2923a1e802
Update KFP-Tekton installation to give guidance on GPU worker nodes. (#836) 2022-01-26 23:16:10 +00:00
Michalina Kotwica dc74cb2374
feat(sdk): implement loop DSL ext: from_string. Part of #791 (#837)
* handle DSL for loop item separator

* handle compilation for separator

* add tests for loop with separator

* style: self -> cls in classmethods

* fix: dsl+compile

* update test results

* style: remove unused import

* style: blank lines

* add license to tests

* fix tests: no value passing of str loop src
2022-01-25 17:38:20 +00:00
Tommy Li 89f4e418bc
Update performance script to include more metrics (#832) 2022-01-22 02:34:46 +00:00
Prashant Sharma 4b532e3041
feat(objectstorelogger): objectstorelogger as a separate module. (#829)
* cos-logger as a separate module.

* temp

* temporary hack: using scrapcodes as repository.

* correction of import in README.md

* found a golang trick to use local packages.

* naming changes

* renaming update to README.md
2022-01-21 18:14:21 +00:00
ted chang 793854829e
Additional param to enable running copies of same pipeline concurrently (#828)
Signed-off-by: ted chang <htchang@us.ibm.com>
2022-01-20 18:50:53 +00:00
Tommy Li afe05fba76
Clean up owners file (#827) 2022-01-19 18:27:28 +00:00
Tommy Li 1f7507120b
Enable pipelineparam for container env (#825) 2022-01-19 18:26:28 +00:00
Prashant Sharma 6d2682dad6
fix(pipelineloop): break loop should cancel all pipeline runs. (#824)
* fix break loop

* fixed the break loop logic.

* run status added back
2022-01-19 18:16:28 +00:00
Tommy Li b1bb32b4e9
kfp-tekton 1.1 release (#814)
* add kfp-tekton 1.1 release

* update manifest with the latest commits

* migrating rest of v1beta1 to v1 api
2022-01-12 19:11:10 +00:00
Prashant Sharma 50c172fcce
feat(pipelineloop): Support logging to Object store. Built as an extension for zap. (#806)
* Object store logging as an extension for zap.

* Do not add object store logger if it is not enabled.
2022-01-11 23:22:09 +00:00
Tommy Li ed7673cd37
Add option to produce pipelinerun taskspec as taskref (#810)
* add option to produce taskspec as taskref

* add rbac for task templates
2022-01-11 01:19:42 +00:00
Tommy Li c8b258cf5c
Move postcss to version 8 and fix Tekton UI dependency chain (#815)
* move postcss to version 8

* move postcss to version 8
2022-01-11 00:58:42 +00:00
Tommy Li 3d09ad20a8
Update pipelineloop and manifests with k8s 1.22 API (#813)
* update pipelineloop and manifests with k8s 1.22 API

* remove preserveUnknownFields since it already enabled by default
2022-01-10 14:45:17 +00:00
Tommy Li 3bb2767d1b
Chore: regenerate python tests (#812) 2022-01-06 22:01:41 +00:00
Prashant Sharma 4fa2dd97cb
fix(pipelineloop): Nested call stack depth limit (#808)
* wip

* recursion depth cannot be set on the nested pipelineloop resource.

* recursion depth can be set on the nested pipelineloop resource.

* removed an extraneous log statement.

* review feedback - change default stack limit to 30

* fixed tests
2022-01-06 18:58:35 +00:00
Tommy Li 2fac70f1d4
Reduce component spec annotations (#805)
* reduce component spec annotations

* add option to opt out component spec annotations

* fix lint
2022-01-06 18:28:36 +00:00
Tommy Li 77fcaaa34a
update pipelineloop operator to check break tasks with taskrun (#809) 2022-01-06 06:10:36 +00:00
Prashant Sharma 2a65c33b79
feat(pipelineloop): Break operation for pipelineloop. (#807)
* Break operation for pipelineloop.

* Break operation for pipelineloop.
2022-01-04 18:29:53 +00:00
Tommy Li b16bd8863a
Add tekton loop dsl extension skeleton (#799) 2021-12-16 01:55:35 +00:00
Tommy Li c4f52e5f1c
Add taskrun name and uid into big data passing midpath (#771)
* add uid into big data passing midpath and update uid replacement to also replace with Tekton uid variable

* use taskrun name as midpath

* regenerate tests

* fix tests

* fix tests

* fix lint

* fix tests

* update midpath from uid to original pipelinerun

* fix lint

* replace original pipelinerun name in api to make metadata writer to work. Also add big data passing format to pipeline annotations

* update sdk loop logic to pass workspaces to sub-pipelines

* fix tests
2021-12-09 01:32:55 +00:00
Tommy Li e50795f125
Upgrade tekton api client and manifest to 0.30.0 & update default timeout (#798)
* upgrade tekton api client to 0.30.0

* update default timeout to non-zero
2021-12-08 20:41:59 +00:00
Tommy Li 88de0c42a9
Fix performance test script and update testing examples (#795)
* fix performance test script

* fix lint

* add missing license

* revert script changes

* update testing example
2021-12-01 22:45:04 +00:00
Christian Kadner 2c61c8ade5
Load performance test pipelines from files (#796)
* Add perf_test_config.yaml with paths of test pipelines
* Add a 'Status' column to output CSV file
* Update compile_pyfile to remove loaded pipeline module after
  compilation to allow subsequent imports of another module
  with the same name

Resolves #778

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-12-01 21:38:03 +00:00
Yudong Wang 0bb387382d
Update workflow.go (#794)
* Update workflow.go

In verifyParameters, check if the parameter is on the list of unused parameters, and if so --- log a warning instead of raising a hard error.
If there are too many parameters, it is hard to determine which one is in use, which one is unused, so here just a warning will be better for this case.

* Update workflow.go

* Update workflow.go
2021-11-30 07:56:30 +00:00
David Wang 6551207729
support separator string for iteration string param (#790)
* support separator string for iteration string param

* address review comments
2021-11-30 02:04:30 +00:00
Tommy Li 61e4f36578
Move loop retry test as a different example (#789) 2021-11-24 22:39:34 +00:00
Yihong Wang 54af3c90ad
[test] Add taskRun and run into the report (#785)
Get taskRun and run details into the output CSV

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-11-24 20:55:47 +00:00
Prashant Sharma e0e60bdc55
feat(pipelineloop): Support workspaces for pipelineLoop. (#787)
* Support workspaces for pipelineLoop.

* fix doc link test
2021-11-24 19:17:48 +00:00
Prashant Sharma 1a861306d8
feat(pipelineloop): Implement retry for pipelineloop controller. (#781)
* update to the latest tekton API i.e. 0.30

* Implement retry for pipelineloop controller.

* fix test after rebase.
2021-11-23 22:23:47 +00:00
Michalina Kotwica e2ea6bf1f1
feat(sdk): factor out the images to use in copying steps (#782) 2021-11-23 16:50:06 +00:00
David Wang a871eba224
expose current iteration item to pipelinerun's annotation (#776)
* expose current iteration item to pipelinerun's annotation

* corret typo
2021-11-22 01:32:36 +00:00
Tommy Li 6dc9d61061
Fix archived logs for custom tasks (#777) 2021-11-19 23:02:34 +00:00
Christian Kadner 121c40f9dc
Add performance test script (#773)
* Add performance test script

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Enable script in single-user mode, unauthenticated

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Use explicit names for duplicate pipeline function names

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Run performance tests in parallel

Pipeline compilation still runs in sequence due to
compiler implementation limitations.

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Improve readability of console output

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Use decorator to track method execution times

This allows accurately recording compile times when running tests
in parallel, despite the restriction that pipeline compilation is
synchronized.

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Assert presence of 'pipeline_name' keyword argument

Instead of raising a ValueError in '@time_it' decorator

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-11-19 23:01:35 +00:00
Tommy Li 5dfc36bb41
Add UI backward compatibility for dsl.container syntax (#768)
* add ui backward compat for dsl.container syntax

* add ui backward compat for dsl.container syntax
2021-11-09 11:58:05 -08:00
Tommy Li 424c79eee8
Add csi-s3 storage class as big data passing storage (#753)
* add csi-s3 as big data passing storage

* update storageclass to kfp-csi-s3 to avoid conflicts with datashim

* Update README.md
2021-11-02 05:08:00 -07:00
Tommy Li c87e70c638
Fix a typo for the openshift docs (#763) 2021-10-28 13:28:51 -07:00
Andrew Butler 27504c3089
Add tag to frontend code with changes for Tekton (#764)
Signed-off-by: Andrew-Butler <Andrew.Butler@ibm.com>
2021-10-28 10:10:26 -07:00
Michalina Kotwica 3e4f4d8d39
fix(sdk): "when" in some ParallelFor loops (#762)
* fix: "when" in some ParallelFor loops

* refactor: split long line
2021-10-27 09:17:28 -07:00
Tommy Li 99d28bbca1
Update trusted AI pipeline to have unique job names (#760) 2021-10-26 03:57:42 -07:00
Prashant Sharma 1eea71606b
feat(sdk): Add an API to return prepared workflow as python dicts. (#757)
* Add a API to return prepared workflow as python dicts.

* Added guide and improved the prepare workflow signature.
2021-10-22 10:20:03 -07:00
Jenna Ritten fc7b05e107
Update README.md typos. (#756)
* Update README.md typos.

Signed-off-by: Jenna Ritten <jritten@ibm.com>

* Update README.md typos.

Signed-off-by: Jenna Ritten <jritten@ibm.com>

* Update README.md

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

* Apply suggestions from code review

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-10-22 10:17:03 -07:00
Yihong Wang 478df66d43
[test] use emptyDir for mysql and minio (#759)
for testing, no persistent storage is needed.
Use `emptyDir:{}` for minio and mysql

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-10-22 09:08:03 -07:00
Prashant Sharma 3f13201efe
Proposal: Adding ScrapCodes as approver. (#754)
Some of the recent PRs.

#750 #709 #706 #633 #636 #638 #646
2021-10-20 14:57:37 -07:00
Yihong Wang d3ff4acc46
[test] update build-image-dind.sh (#758)
listing images shouldn't fail the build. update
the script to ignore the return value of those
commands.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-10-20 10:12:14 -07:00
Prashant Sharma 0ba4831d5d
Remove noninlined files for those tests, it is not required. Added a CI check. (#755)
* Remove noninlined files for those tests, it is not required. Added a CI check.

* added the check to CI

* fixed the build

* fixed the build
2021-10-19 22:02:14 -07:00
Tommy Li f2fb08477c
Add artifact mid path to big data passing (#751)
* add artifact mid path to big data passing

* add artifact mid path to big data passing

* address comments
2021-10-19 04:04:33 -07:00
Tommy Li e2468832da
Update install readme with KF 1.4.0 deployment (#752)
* Update install readme with KF 1.4.0 deployment

* Update kfp_tekton_install.md
2021-10-15 10:42:37 -07:00
Prashant Sharma cb7abf1535
Support data passing pipeline. (#750) 2021-10-14 09:44:08 -07:00
Gang Pu 01ed23392b
Add doc for break feature for loop (#749)
* Add doc for break feature for loop

* Fix typos

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-10-14 09:12:08 -07:00
Shrey Shrivastava e9749dc2de
Install links update (#741)
* updated troubleshooting links in Installation readme

* updated install links

* Update guides/kfp_tekton_install.md

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>
2021-10-12 08:27:44 -07:00
Tommy Li eb44737195
Enable strip eof and archive logs as default (#747) 2021-10-08 23:06:43 -07:00
Tommy Li 04ce4b08fa
Relax kfp sdk dependencies (#746)
* relax kfp sdk dependencies

* update readme version reference
2021-10-08 17:49:43 -07:00
Yihong Wang c835d1470b
[test] fix variable overriding (#743)
rename the variable in the script to avoid using the variable in
the build.properties
also add more time for waiting the kfp-tekton deployment

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-10-04 12:40:17 -07:00
Michalina Kotwica d15acfd472
fix(sdk) orig_params being overwritten in custom-tasks (#742) 2021-10-04 09:33:50 -07:00
huixa b5c907d380
[Only review]Fix #733 the pipeline parameter consumed as file (#740) 2021-09-29 15:36:10 -07:00
Michalina Kotwica a9684171c1
fix(sdk) sanitize paths in artifact_items. Fixes #738 (#739)
* sanitize paths in artifact_items

* update tests
2021-09-27 01:36:40 -07:00
Tommy Li 418528496b
Optimize kfp-tekon SDK compile time (#736)
* optimize kfp-tekon compile time

* fix lint
2021-09-22 18:42:35 -07:00
Tommy Li 043d734147
Fix(sdk): chain nested conditions to work on non-dependable condition tasks. (#732)
* chain nested conditions

* add missing license

* add dependency check to remove unnecessary when expression

* minor improvement to check for the exact condition rather than just the condition task
2021-09-15 15:08:55 -07:00
Yihong Wang 1e526f9141
[test] update sample for katib (#730)
Update pip packages and remove the usage of
`dsl.ContainerOp`.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-09-13 17:32:31 -07:00
Tommy Li 5daa744199
Remove Argo template UI reference to avoid UX conflicts with v2 compatible mode (#723)
* remove argo template reference

* address the suggestions
2021-09-01 12:17:31 -07:00
Christian Kadner 5d3c041586
Update SDK versions (#724)
* Reference to `kfp` SDK from 1.7.1 to 1.7.2 in sdk/python/README.md
* PyPI distro comment in sdk/python/setup.py

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-08-31 21:10:44 -07:00
Animesh Singh 264eb0240e
Update reference to 1.0 release (#722) 2021-08-31 11:15:44 -07:00
Tommy Li 8caa81e5a4
Update CI pipeline with new dependency flow (#721) 2021-08-30 23:09:03 -07:00
Tommy Li 1fc59e636b
Fix(sdk): fix KFP 1.7.2 SDK dependency (#719)
* skip license check due to go vanity website is down

* regenerate sdk dependency

* skip copy from genereated license

* add back license to verify
2021-08-30 19:06:03 -07:00
Tommy Li e291986106
Update KFP SDK version to 1.7.2, create KFP-TEKTON 1.0.0 release (#718)
* update KFP SDK version to 1.7.2, create KFP-TEKTON 1.0.0 release

* update standalone deployment readme
2021-08-30 17:23:03 -07:00
kfp-tekton-bot 0708a5699b
KFP 1.7.0 Rebase (#713)
* KFP 1.7.0-rc.4 Rebase

* Resolve API and Backend conflicts

* Apply 1.7.0 changes since rc.4

* Revert unwanted SDK changes

* Revert unwanted samples changes

* Revert unwanted components changes

* Fix 1.7.0 backend conflicts

* Revert unwanted SDK changes (2)

* Fix SDK bugs in backend/Dockerfile

* Fix frontend conflicts
2021-08-30 15:02:25 -07:00
Animesh Singh 17ddecab15
name consistency (#717)
refer the project as Kubeflow Pipelines on Tekton, plus rearrange a bit
2021-08-27 16:15:22 -07:00
Christian Kadner 9938d93271
Update KFP Rebase guide (#716)
* Exclude files/folders that should not get rebased
* Add paragraph on finding undesired code changes
* Add reference to previous rebase PRs for guidance

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-08-26 17:14:05 -07:00
Tommy Li 225877d49e
fix(samples): Fix e2e-mnist sample for kfp-tekton (#714)
* fix e2e-mnist sample for kfp-tekton

* address comments

* update comment for adding auth token
2021-08-25 17:27:04 -07:00
Christian Kadner 1e57f41886
Add KFP Rebase Guide (#712)
* Add KFP Rebase Guide

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>

* Remove Troubleshooting section

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-08-25 12:49:46 -07:00
Prashant Sharma cdbb588c52
feat(sdk): Update SDK to support custom task timeout on Tekton 0.27 (#709) 2021-08-23 17:52:39 -07:00
Tommy Li 386ad234a4
Remove containerOp for pipelineparam and raw input samples (#708)
* remove containerOp for pipelineparam and raw input samples

* fix-typo
2021-08-23 14:13:39 -07:00
Tommy Li 9fbccd6276
Fix(sdk): Tekton copy result step sanitization (#711) 2021-08-23 13:51:39 -07:00
Prashant Sharma 3cb7187d8c
feat(pipelineloop): Upgrade Tekton to 0.27 for pipelineloop controller. (#703) (#706)
* Upgrade Tekton to 0.27 for pipelineloop controller. (#703)

* Upgrade Tekton to 0.27 for pipelineloop controller. (#703)

* Update pipelinelooprun.go
2021-08-23 10:43:57 -07:00
Tommy Li b5fe66cd9c
Make results to store in multi-steps if needed (#702)
* make results to store in multi-steps if needed

* add warning messages on oversized parameter

* regenerate tesrs

* fix copy-results format

* optimize the bin packing problem

* add docs and warning test cases
2021-08-20 10:55:54 -07:00
David Wang 0f6fd191b0
define cancel patch as global var and initialize it at package init (#707) 2021-08-20 00:36:54 -07:00
Tommy Li 1482b8e2ad
Move kfp sdk dependency to 1.7.1 (#705) 2021-08-19 11:16:54 -07:00
Tommy Li 1c7e70eb87
Upgrade Tekton to 0.27 (#700) 2021-08-16 16:03:41 -07:00
Tommy Li fba9ee2241
Update kfp sdk dependency to 1.7 (#699) 2021-08-13 11:53:04 -07:00
Gang Pu 0ff4a8d08b
Fit unittest for loop (#698) 2021-08-13 09:08:55 -07:00
Tommy Li 87d12ce04e
Fix e2e test case (#696)
* fix e2e test case

* fix e2e test case
2021-08-12 15:10:54 -07:00
Tommy Li 8d8d2b3c6d
Migrate volume and loop unit tests to component.yaml and verify with v2 (#692)
* migrate unit tests to component.yaml and verify with v2

* update readme to remove volumesnapshot

* address comments
2021-08-10 13:21:25 -07:00
Yihong Wang 9d4643bd66
remove ContainerOp from test cases - part 3 (#693)
eliminate the usage of ContainerOp in testcases and
update the testcase accordingly, including filenames starting
with `o` to `u`

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-08-10 10:23:25 -07:00
Yihong Wang 6c7c5d24fe
remove ContainerOp from test cases - part 2 (#691)
eliminate the usage of ContainerOp in testcases and
update the testcase accordingly.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-08-09 17:10:27 -07:00
Tommy Li 37d0021580
Add kfp name and id example (#690)
* add kfp name and id example

* address comments

* fix minor wording
2021-08-06 10:23:43 -07:00
Yihong Wang d463f16450
remove ContainerOp from test cases - part 1 (#685)
eliminate the usage of ContainerOp in testcases and
update the testcase accordingly.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-08-05 11:58:42 -07:00
Tommy Li 58b01f9281
Enhance unit tests with local Tekton pipeline spec validation (#689) 2021-08-04 18:21:18 -07:00
Michalina Kotwica 982ad5565d
fix(sdk): add test for taskRef-vs-taskSpec params (#688)
* add test for taskRef-vs-taskSpec params

* fix: taskSpec accepting params

* add license headers

* m: no spaces around "buzz"

* fix: test file custom_task_spec.yaml

* m: follow links on newlines

* fix: big_data_passing test result yamls

* fix: yamls of other tests

* factor out conditions to process params

* fix big data rewrite for regular tasks
2021-08-03 15:01:41 -07:00
Tommy Li 186a9e301f
Update code to limit loop resource group name (#686)
* update code to limit loop resource group name

* add license
2021-08-02 19:57:11 -07:00
Christian Kadner ba1a9d52dd
Update Argo link (#684)
Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-07-30 12:59:36 -07:00
Tommy Li 008855a3f0
Update custom task to work with v2 compiler (#680)
* update custom task to work with v2 compiler

* fix broken links
2021-07-28 20:22:19 -07:00
Gang Pu 18c9cc8bcc
Change to use component to replace containerOp for any sequencer (#675)
* Change to use component to replace containerOp for any sequencer

* Remove context input and change results path

* 1: Update to use component rather than containerOp
2: Fix lint/license issue
2021-07-28 19:39:19 -07:00
Gang Pu afc8460ef1
Fix broken link (#681) 2021-07-28 08:51:45 -07:00
Andrew Butler 5aa55a5eee
Add check for missing pod in cache (#679) 2021-07-26 16:37:11 -07:00
Tommy Li 2ddcec757e
Convert loop and lightweight example to component yaml (#677)
* convert loop and lightweight example to component yaml

* Update python values to float
2021-07-26 16:36:11 -07:00
Yihong Wang d9496dfc6a
remove ContainerOp in some samples (#676)
update flip-coin, nested-pipeline and trusted-ai

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-07-26 13:04:11 -07:00
Tommy Li df63b7853a
Fix merging issue from #512 (#674) 2021-07-22 12:42:06 -07:00
Tommy Li a90d7a348c
Convert user custom task from containerOp to component yaml (#671) 2021-07-21 18:56:25 -07:00
Tommy Li e1b2d20480
Update sdk and release tags to 0.9.0 (#667)
* update sdk and release tags to 0.9.0

* regenerate tests

* update kubeclient source version

* generate new deployment manifests
2021-07-21 16:33:25 -07:00
Tommy Li d9b8539b0d
feature(backend): Allow KFP apiserver to create recursion pipelineLoop CRs for the users (#512)
* add auto apply custom task CRs before pipelineRun creation

* fix lint

* store loop resource in ordered dict

* Update kfp-admin-guide.md

* sorted pipeline params to avoid randomness

* sorted pipeline params to avoid randomness

* regenerate tests

* convert template string from yaml to json

* regenerate tests

* clean up unnecessary helper function

* resolve merge conflicts

* resolve merge conflicts

* remove unnecessary go mod package

* only opt-in to embed the recursion loop annotations

* only opt-in to embed the recursion loop annotations

* enable opt-in only via package variable

* add example for how to inline recursive loops

* resolve conflicts

* regenerate tests

* make auto generate and apply recursive pipelineloop in annotations as default

* move parameter sorting to the end

* add parameter sorting for recursion loops

* sort recursive task parameters

* add more details on how to opt-out auto apply custom resource
2021-07-21 00:26:50 -07:00
Prashant Sharma f1f1a1cae6
fix(pipelineloop): sort params to fix flakiness in tests. (#669) 2021-07-19 09:18:17 -07:00
Tommy Li 1a29e1dedd
Regenerate tests for #665 (#666) 2021-07-14 19:24:50 -07:00
Tommy Li 42ee2fa20f
Update loop instructions to use inline spec (#658) 2021-07-14 19:21:51 -07:00
Gang Pu 592b5d2205
Fix the yaml dump issue of boolean value (#665)
* Fix the yaml dump issue of boolean value

* update conditions_with_global_params.py file
2021-07-12 22:12:17 -07:00
Prashant Sharma 9d79f8438c
feat(sdk): Add pipelineloop validation for testdata yamls. (#660) 2021-07-08 10:53:49 -07:00
Prashant Sharma 0ad68be3b3
fix(pipelineloop): restored pipelineloop conditions test. (#663) 2021-07-08 10:07:49 -07:00
Prashant Sharma bd2f5f669e
fix(sdk): sort params to fix flaky tests. (#662) 2021-07-08 06:35:49 -07:00
Tommy Li d80fbedada
Make default terminate API configurable to support graceful termination (#631)
* make terminate API status configurable

* make terminate API status configurable
2021-07-06 19:44:21 -07:00
Prashant Sharma da32ba6404
Add Prashant(scrapcodes) to reviewers (#655) 2021-07-06 12:34:20 -07:00
Michalina Kotwica 95a961c2d1
fix(sdk): loop argument jsonified, not stringified (#657)
Signed-off-by: Michalina Kotwica <michalina.kotwica.ibm@gmail.com>
2021-07-06 09:32:47 -07:00
Jin Chi He 77963c3cea
Enhance Any Sequencer to hanle cases that some tasks skipped (#653) 2021-07-05 01:25:51 -07:00
Tommy Li 54e8aaf59f
Fix argo redirect link in the docs (#654) 2021-07-02 17:32:49 -07:00
Prashant Sharma dbd92cb7ec
feat(sdk): inline user defined custom tasks (#636) 2021-07-02 10:26:11 -07:00
Prashant Sharma 3611c44b81
Fix: go tests for pipelineLoop controller. (#646)
* Fix: go tests for pipelineLoop controller.

* Skip adding timeout, if custom task.
2021-07-02 09:49:10 -07:00
Gang Pu 791fe5304a
Fix the issue when loop/graph inside a graph (#650) 2021-07-01 17:09:10 -07:00
Tommy Li 2cfc78da65
Remove runid labels from pipelineloop's child pipelinerun (#648) 2021-06-30 19:44:23 -07:00
Animesh Singh 3163801e6b
add mlx-reference (#649) 2021-06-30 18:00:42 -07:00
Prashant Sharma 64d4324f2b
feat(sdk): Sdk ParallelFor task spec inline (#633)
* TODO1: PipelineLoop inlined including nested loops and added tests.

* Supported recursive and parallel tasks inlining.

    For recursive tasks, we simply skip them.
    For Parallel tasks, relevant tests were added.

* Run all tests against inlined taskSpec and normal taskRefs

* Added CLI Flag disable/enable spec inlining.
2021-06-29 19:07:36 -07:00
Gang Pu 2fc23fa87d
Add Gang Pu as approver (#645) 2021-06-29 11:46:35 -07:00
Jin Chi He 8bfe60d0f1
Add jinchihe in the OWNER list (#643) 2021-06-29 11:44:36 -07:00
Tommy Li 6d16a95e89
Update manifests and standalone deployment to 0.9.0-rc0 (#635) 2021-06-28 16:20:45 -07:00
Prashant Sharma 12acff8af5
fix: PipelineLoop nested validation logic fixed. (#638)
* PipelineLoop nested validation logic fixed.

It was missing enablement of feature-flag enable-custom-task.
Also added nested task validation.

* added an example.

* add feature flag to reconciler context

* Added test, passing

Co-authored-by: tomcli <tommy.chaoping.li@ibm.com>
2021-06-28 10:12:15 -07:00
Jin Chi He da87237b4d
Support custom image for any Seq (#642) 2021-06-28 02:19:14 -07:00
Jin Chi He ad25492212
Enhance Any Sequencer (#641) 2021-06-27 23:46:14 -07:00
Tommy Li f76951e34f
Update Github Action PR trigger (#632) 2021-06-22 12:28:31 -07:00
Tommy Li aae4671e7b
Fix nested recursion with condition custom task and parameter mapping (#625)
* fix nested recursion with condition custom task and parameter mapping

* fix alpine commands and add licenses
2021-06-18 14:56:14 -07:00
Prashant Sharma 078c7428d2
fix(pipelineLoop): Update to latest tekton release v0.25.0. (#623) 2021-06-18 13:54:14 -07:00
Tommy Li 67c6d43d13
Migrate Travis tests to Github Actions (#619)
* add init github actions

* Remove old comments

* Update test_kfp_samples.sh

* add condition for venv
2021-06-18 12:00:14 -07:00
Tommy Li 15918434c4
Make kustomize patch backward compatible with v3.2 (#629) 2021-06-17 11:06:14 -07:00
Jin Chi He 39ba87c832
Add original PipelineRun name (#628) 2021-06-17 01:51:28 -07:00
Tommy Li f444a31bea
Update tekton client and deployment to 0.25.0 (#626) 2021-06-16 20:22:28 -07:00
Prashant Sharma 87dd3a728d
fix(scripts): deploy-ibm-vpc.sh public gateway detection. (#622)
A VPC may have more than one public gateways and they may be in
different cluster zones. So now filtering on cluster_zone and
picking the first public gateway.
2021-06-16 09:12:10 -07:00
Abiola Asojo 22354b364a
doc: Update README.md typos (#616)
* Update README.md

* Update README.md
2021-06-14 15:33:37 -07:00
Prashant Sharma b507dca6d6
feat(pipelineLoop): Create a validation CLI for commonly used Tekton custom task CRDs (#614) 2021-06-14 10:16:37 -07:00
Christian Kadner 5a3ef4d6fc
Upgrade Python SDK to kfp==1.6.3 (#617)
Resolves #595

Signed-off-by: Christian Kadner <ckadner@us.ibm.com>
2021-06-14 10:03:37 -07:00
Yihong Wang be2f5b97e7
[test] Add pipeline-loop flag (#613)
Add a flag to turn on/off the pipeline-loop e2e test case.
By default, the pipeline-loop test case is turned on. Use
`skip-pipeline-loop:1` to turn it off.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-06-08 06:51:46 -07:00
Tommy Li af9f03d000
fix(sdk): Fix nested recursion runafter and param mapping; (#609) 2021-06-03 20:07:37 -07:00
Prashant Sharma 052726a2f0
feat(pipelineloop) : Support latest tektoncd pipeline version. (#602)
* Support latest tektoncd pipeline version.

* update readme for support latest tektoncd pipeline version.
2021-06-02 08:13:02 -07:00
Gang Pu 7c42346afa
Change status for successful from "Succeeded" to "Completed" (#604) 2021-06-01 09:10:04 -07:00
Gang Pu a033f33b30
Fix and verify nested recursive workflow (#600)
* Fix and verify nested recursive workflow

* Fix unit_test error

* Update compiler.py

* Try to fix travis error

* Refresh yaml

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-06-01 00:25:04 -07:00
Gang Pu 9a83eb9a43
Support expose status for any sequencer(backend) (#597) 2021-05-24 19:29:08 -07:00
Tommy Li 5b29a7f1b5
Add Custom Task Docs and Usage (#588)
* add initial custom task docs

* remove regex

* add custom task condition benefits

* add toc

* Apply suggestions from code review

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>

* add hyperlinks

* add descriptions for custom tasks

* Update advanced_user_guide.md

* Update README.md

* Update advanced_user_guide.md

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>
2021-05-24 13:15:08 -07:00
Gang Pu c05fc4bbeb
Support expose status for any sequencer(DSL side) (#598) 2021-05-24 03:55:32 -07:00
Gang Pu 0ebcd7e1b3
Permit pipeline-runner to operate on runs (#596) 2021-05-22 14:36:30 -07:00
Tommy Li 73d5445482
Add support to generate custom task with custom task cr (#593) 2021-05-22 02:53:30 -07:00
Gang Pu 95557c6d36
Support parallelism feature on DSL side (#592) 2021-05-20 00:51:46 -07:00
Gang Pu 7a6423edbc
Add Parallelism support on backend. (#589)
- Parallelism is defaulted to 1
- if Parallelism is bigger then total iteration numbers, then start all
- normally start the same number of pipelineruns as Parallelism value
2021-05-18 22:52:42 -07:00
Tommy Li 1bf5aa5d13
Fix custom task condition I/O behaviors on recursion and for loop (#587)
* fix custom task I/O behaviors on recursion and while loop

* Fix comment typo
2021-05-15 02:00:29 -07:00
Yihong Wang 4ae035e7f6
[test] Add trusted-ai into e2e testing (#586)
Add trusted-ai into the e2e testing and a flag (extra-test-cases)
to enable it. Also refactor the e2e-test task to support more test cases.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-05-13 10:47:27 -07:00
Jin Chi He 8c22f682dc
Enhance Any Sequencer to support watching run (#583) 2021-05-07 06:31:06 -07:00
Tommy Li 4c09f46de6
Fix /logs api and add tests (#582) 2021-05-06 16:33:06 -07:00
Tommy Li c1b4d84809
Add instructions on upgrade kfp-tekton multi-users and fix new multi-users permission (#581)
* add instrctions on upgrade kfp-tekton and fix multi-users permission

* Apply suggestions from code review

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>

* Fix markdown links after suggestions

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>
2021-05-06 14:06:06 -07:00
Tommy Li 633848134d
Sanitize Tekton result parameters when passing to custom task inputs (#579) 2021-05-06 12:07:06 -07:00
Andrew Butler de00ccadef
Add taskSpec check to account for custom tasks without a taskSpec (#576) 2021-05-04 16:27:05 -07:00
Andrew Butler 8b289ceba4
fix(ui, backend) Add pipelineRun to cache to assist in finding artifact data (#574)
* Add pipelineRun to cache for finding artifact data

* Remove extra container and account for custom tasks in frontend

* Apply suggestions from code review

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-05-03 13:01:04 -07:00
Prashant Sharma 26eaee2eb6
feat(script): Deploy kubeflow to IBM Cloud IKS vpc-gen2, using this script. (#500)
* Deploy to IBM Cloud IKS vpc-gen2, using this script.

* added user guide

* Added kf deploy.

* Fixed bugs while testing.

* Based on feedback, fixed the script. Improved documentation to clarify the behaviour.

* Changed the handling of config file to be per cluster/vpc.

* Added an option to choose, whether to deploy kubeflow or not.

* Improved the script to wait for the cluster delete and improved UX.
Updated README.md accordingly.

* Suppressing errors for cluster resources delete.

* Review feedback.

* Review feedback.
2021-04-30 10:01:31 -07:00
Tommy Li 8db274ada2
Update release manifests and openshift docs (#575)
* update release manifests and openshift docs

* update release manifests and openshift docs

* add optional kustomization.yaml
2021-04-29 15:46:30 -07:00
Tommy Li 76e249d4d9
Enhance error messages for resource op and artifact tracking (#572)
* enchance error messages

* update logs

* update logs
2021-04-29 14:53:30 -07:00
Christian Kadner 3179556504
Add bagdes to SDK Readme (#573) 2021-04-29 14:42:30 -07:00
Christian Kadner 649152cddd
Regenerate E2E test logs for release 0.8.0 (#566)
* Regenerate E2E test logs for release 0.8.0

* Regerate "golden" log files

* Regenerate "golden" logs after PR #567

* Update list of ignored tests

* Add cond_recur.yaml to list of ignored tests

* Regenerate E2E logs after updating "golden" YAML files of unit tests

* Regenerate E2E logs after enabling auto-strip EOF

* Regenerate "golden" YAML for unit tests

* Rename loop-static CRDs

* Regenerate E2E logs, enable EOF stripping only for E2E test
2021-04-29 13:00:30 -07:00
Yihong Wang 6672261f76
[test] move kubeclient test case (#571)
move kubeclient test case to e2e toolchain pipelines
2021-04-28 12:34:29 -07:00
Yihong Wang bfa869c179
[test] add more test cases to toolchain pipelines (#568)
Add pipelineloop-controller, pipelineloop-webhook and
anysequencer to the toolchain pipelines
2021-04-26 10:20:46 -07:00
Tommy Li a9d49bca81
Clean up and revert cache image env on manifests (#567) 2021-04-23 16:18:44 -07:00
Gang Pu 23e4acc79c
Apply to be reviewer (#565) 2021-04-23 11:49:43 -07:00
Gang Pu 98b356cbb1
Fix the issue when there's a conditional loop (#562) 2021-04-23 09:44:43 -07:00
Andrew Butler 1d5582fae7
(ui) Update any-sequencer dependency parsing (#564)
* Update any-sequencer dependency parsing

* Update formatting

* Add checks for tasks that have not ran yet
2021-04-23 09:36:43 -07:00
Tommy Li 8f2d07b5e3
Fix broken Kubeflow doc links to map with the KF 1.3 website (#563) 2021-04-22 12:24:42 -07:00
Gang Pu adc87507ee
Fix multiple any-sequencer issues (#557)
2: Fix the issue when task list is not provided.
3: Fix the issue when there's multiple results provided
4: Update the document
2021-04-22 10:29:43 -07:00
Tommy Li a68b6c172e
Create release 0.8.0 standalone install (#559) 2021-04-22 02:39:42 -07:00
Yihong Wang cf5910259c
Update katib sample (#560)
Update the notebook to use newer packages and image.
Also add instructions for multi-user environment.
2021-04-21 17:02:42 -07:00
kfp-tekton-bot 1e91649958
KFP 1.5.0 Rebase (#555)
* Merge KFP changes between 1.5.0-rc.2 and 1.5.0

Resolves #554

* Resolve backend and manifest conflicts

* Resolve frontend conflicts
2021-04-21 12:06:42 -07:00
Tommy Li e68e03e6c3
Fix recursive loop bug with no sub_group.ops (#556) 2021-04-21 06:15:41 -07:00
Tommy Li 103ae3b79b
Fix long param compile errors (#549) 2021-04-20 10:13:41 -07:00
Gang Pu 8bbcd26a53
Adjust the any sequencer example to make it more reasonable (#550) 2021-04-20 09:29:40 -07:00
Gang Pu ae56385ab2
Support the simple condition for any sequencer (#539) 2021-04-19 06:32:16 -07:00
Tommy Li 0f1147e55e
Support Tekton PipelineRunStopping status (#545) 2021-04-16 08:30:14 -07:00
Andrew Butler bf46e8edd1
(ui) Update UI to render custom tasks (#544)
* Add parsing for custom tasks

* Account for empty custom task runs and add formatting

* Add check for pipeline entirely made of custom tasks
2021-04-15 15:57:13 -07:00
Tommy Li e3c3013891
Fix viper flags and labels/annotations injections on custom task (#537) 2021-04-15 14:44:13 -07:00
Peng Li 7dd6e00374
Add extra label to sub pr (#543) 2021-04-15 05:53:12 -07:00
Michalina Kotwica b370cbd1bc
fix(sdk): fix artifacts of ops with long names (#541)
* add test for artifacts of ops with long names

* add test on correct artifact_items

* fix: stepTemplate generated

* fix: artifact_items name collision

* refactor: mount_path to a separate variable

* add license to the new test file

* m: two free lines after imports in testcase

* m: other linting
2021-04-14 12:29:12 -07:00
Yihong Wang 24c2e5bcac
[test] Remove tekton installation task (#538)
the kfp-tekton install manifest already includes
tekton, therefore, remove the tekton deploy/undeploy tasks
also add tasks to build cacheserver and frontend
2021-04-13 16:19:11 -07:00
Tommy Li 6adfcc64f3
feature(sdk): Optimize unnecessary condition task when using User definable CEL Custom task (#478)
* add cel custom task poc

* apply suggestions with minor fixes

* add initial version of plugable cel template

* fix lint

* update staging code for converting containerOp to custom task

* update staging code for converting containerOp to custom task

* remove duplicate cel conditions

* update flip-coin example to use custom task

* update flip-coin example to use custom task

* remove dsl dependencies;

* add POC comments

* fix lint

* fix self-defined CEL custom task mapping

* move custom task condition example to a separate folder

* fix tests

* make the new custom task spec and CEL condition feature as opt-in

* update instructions and generalize custom task images

* only let custom task result comparasion to skip the condition container to provide backward compatibility

* only let custom task result comparasion to skip the condition container to provide backward compatibility

* update example

* update tests

* fix comment
2021-04-13 09:43:11 -07:00
Tommy Li d7bd9b7144
add manifest for 0.8.0-rc0 (#531)
* add manifest for 0.8.0-rc0

* fix document links

* remove knative conflicted crd
2021-04-09 12:56:04 -07:00
Yihong Wang 600aa89bc8
[test] fix tekton-catalog test script (#533)
update the script to adopt a change in
`open-toolchain/tekton-catalog` repo
2021-04-09 11:05:03 -07:00
kfp-tekton-bot 4fcabb8390
KFP 1.5.0-rc.2 Rebase (#513)
* KFP 1.5.0-rc.0 Rebase

* Resolve backend and API conflicts

* Resolve UI conflicts

* Apply changes from KFP 1.5.0-rc.1

* Resolve backend and API conflicts from RC1

* Resolve UI conflicts from RC1

* Apply changes from KFP 1.5.0-rc.2

* Resolve backend conflicts from RC2

* Build SDK based on kfp RC2 from Github instead of PyPI

* Regenerate unittest's Golden YAML files
2021-04-08 16:49:03 -07:00
Tommy Li b74d4a5cbf
fix(sdk): Fix regex parsing for custom task to handle multiple pipeline params (#528)
* fix regex parsing for custom task

* fix lint
2021-04-07 18:48:02 -07:00
Tommy Li 48ac3bce5b
feature(sdk): Add custom task definition support (#526)
* add custom task template support

* fix typo

* fix lint

* update cel example to use official custom task

* add troubleshooting for example
2021-04-06 18:53:02 -07:00
Feng Li 8d5d584cb7
Fix(sdk): Fix nested loop params issue, and ensure multi nested loop works well (#525)
* fix nested loop params issue, and ensure multi nested loop works well

* address the issue of ut failed on python3.6

* Trigger CI

* address the issue of ut failed on python3.6
2021-04-06 16:10:01 -07:00
Tommy Li b55bea07e3
Clean up sdk (#524) 2021-04-01 10:40:19 -07:00
Feng Li 7fa902bff0
Make pipelineloop test cases work well after upgrade to tekton-v0.20 (#523) 2021-04-01 10:11:19 -07:00
Tommy Li 991d7dcf16
fix(sdk): Fix long resource name (#521)
* fix long resource name;

* fix python 3.6 test
2021-03-31 20:21:19 -07:00
Andrew Butler f522706dc3
Add single-user cluster role binding for Trusted AI sample (#520) 2021-03-30 17:48:18 -07:00
Gang Pu 504dc25c63
Support graph and recursion for tekton backend (#515)
* 1: Support graph dsl for tekton backend
2: Support recursive graph for tekton backen

* Make check license and lint pass
2021-03-30 02:13:17 -07:00
Gang Pu 79b972f447
Roll back the default_flow_style to fix the double quote issue (#517) 2021-03-30 02:10:17 -07:00
Andrew Butler 4b5230b694
Add automated labels and annotations for pipeline caching (#507)
* Add disable-cache flag and default caching for compiler

* Add updated testdata and features doc

* Update docs

* Add task level cache enabling

* Remove disable-cache flag and add warning

* Update docs

* fix typo

* Overwrite pipeline level label with task level label and add examples

* Update imports

* update format

* Update features docs
2021-03-24 11:07:45 -07:00
Tommy Li cdda44e0c3
Fix broken links from kubeflow website (#510) 2021-03-24 10:25:45 -07:00
Tommy Li 16db79be28
Fix any sequencer typo (#503)
* fix any sequencer typo

* Update sdk/FEATURES.md

Co-authored-by: Animesh Singh <singhan@us.ibm.com>

* update any_sequencer location

* update test

* rephrase code comments

* fix lint

* fix lint

* rephrase wording

Co-authored-by: Animesh Singh <singhan@us.ibm.com>
2021-03-17 17:21:16 -07:00
Gang Pu 05e6f4aba5
Enhance AnySequencer to make it an op (#499) 2021-03-16 19:24:16 -07:00
Peng Li f838e5ce24
Upgrade tekton version to v0.20.0 for pipeline-loop (#502) 2021-03-16 17:38:15 -07:00
Andrew Butler 631d7766ff
(ui) Fix duration of runs not showing (#495) 2021-03-11 13:53:24 -08:00
Gang Pu 7c1e4ec54e
Fix the copy result issue when it's already tekton result path (#494) 2021-03-10 20:34:24 -08:00
Feng Li 19c779b378
fix(sdk): fix issue of nested dict param defined (#491) 2021-03-10 18:14:24 -08:00
Tommy Li 440cc3d042
Add unique uuid to loop resources for pipeline versioning (#485)
* add uuid to loop resources for versioning

* add back license

* add uuid comment

* stage nested loop changes

* fix dependencies mapping

* fix name matching

* add back license

* remove test files

* fix previous typo filename

* Update compiler.py

* regenerate test yaml
2021-03-08 12:44:23 -08:00
Tommy Li 6b1e02a46b
add configuration for tekton pipeline spec (#487)
* add configuration for tekton pipeline spec

* sort annotation order
2021-03-05 17:20:49 -08:00
Tommy Li d73cafbab5
Add nested loops example (#484)
* add nested loops example

* update sample to generate unique loop cr

* Update wording for tekton backend

* update link to the ibm tekton kfdef
2021-03-05 16:54:49 -08:00
Christian Kadner af861733e6
Update E2E tests to use kfp client (#301) (#480)
* Use kfp client to run E2E tests

* Update Tekton server and tkn client versions

* Regenerate pipelinerun logs

* Give unique names to pipelineruns resembling file names

* Delete previous pipelineruns

* Mark tests as skipped instead of not generating them

* Regenerate pipeline-loop CRDs, update copyright statements

   for f in sdk/python/tests/compiler/testdata/*_pipelineloop_cr*.yaml; do
     echo ${f/_pipelineloop_cr*.yaml/.py};
   done | sort -u | while read f; do
     echo $f; dsl-compile-tekton --py $f --output ${f/.py/.yaml};
   done;
   make unit_test GENERATE_GOLDEN_YAML=True
2021-03-05 15:02:49 -08:00
Prashant Sharma 4ebca9d502
fix: update the api to latest kubeflow pipelines for katib sample[ Fixes #467] (#468)
* fix: update the api to latest kubeflow pipelines for katib sample[ Fixes #467]

* Interesting set of changes seemed to fix that error.

* added return statements.

* Looks like a bug in compiler forces us to do this.

* move contrib/katib to sample/katib.

* review feedback

* Remove mpi operator based samples as it does not come installed by default.

* resolve k8s client version conflict by rearranging the deps in notebook.

* Contains all the outputs of a successfully executed notebook.
2021-03-03 13:33:48 -08:00
Prashant Sharma df59ee4034
fix: Migrate away from old katib api to ResourceOp. (#483)
* fix: Migrate away from old katib api to ResourceOp.

* added success and failure condition.
2021-03-02 09:19:48 -08:00
Tommy Li 9e36c9d316
Add 0.7.0 release template (#479)
* add 0.7.0 release template

* remove manifest headers to be consistent

* remove extra empty line

* update the missing version to 0.7.0

* move custom controller to tekton-pipelines for consistency

* generate kfp 1.4.0 yaml
2021-03-01 10:57:25 -08:00
kfp-tekton-bot ede6750224
KFP 1.4.0 Rebase (#481)
* KFP 1.4.0 Rebase

Resolves #469

* Remove duplicated KFP samples

* Resolve api and backend and SDK test conflicts

* Upgrade kfp requirements to 1.4.0

* Update testdata regenerate YAML files

* Resolve CONTRIBUTING.md

* Regenerate KFP samples compile report

* Update README.md files for KFP version 1.4.0

* Resolve frontend conflicts

* Revert list-wrapped loop arguments

* Remove compiler_tests.py.rej
2021-03-01 10:21:26 -08:00
Tommy Li bd49a2da7f
Update Tekton API client to 0.21 (#476)
* test

* update tekton reference to 0.21
2021-02-19 13:07:41 -08:00
Evan Hataishi a1022cbad6
test: Add delivery pipeline scripts for tekton-catalog images. (#472)
* Add delivery pipeline scripts for tekton-catalog images

* Combine build binaries tasks and scripts into generic

* Delete secret during cleanup
2021-02-19 09:22:25 -08:00
Tommy Li e105d36bb2
Fix deprecated link (#473) 2021-02-18 17:44:24 -08:00
Christian Kadner f096b95d9f
Fix parallel_join_with_argo_vars pipeline (#470) (#471)
* Fix parallel_join_with_argo_vars pipeline

Resolves #470

* Fix broken doc links for IKS install
2021-02-16 14:29:06 -08:00
Prashant Sharma 14a1db203c
docs: Fixed the code formatting of the echo example. (#464) 2021-02-11 12:15:28 -08:00
Yihong Wang faaabf1f67
test: Send results to slack channel (#466)
Send the e2e-test results to slack channel. The
message also encloses the link to details page of the
pipeline run.
2021-02-11 09:32:15 -08:00
Christian Kadner 8170d59d48
Remove Python 3.5 support (2) (#465)
Updating a few SDK files remaining after #463

Related #463
2021-02-10 11:02:15 -08:00
Tommy Li 624a1f803c
Remove python 3.5 support (#463) 2021-02-10 10:34:15 -08:00
Feng Li 99e506c244
feat(backend): Support loop pipelineruns of defining dictionaries params (#460) 2021-02-08 10:37:37 -08:00
Yihong Wang ac8b69741f
fix: failed to build api-server image (#462)
Some python dependencies changes cause the api-server image
failure. Some dependencies of kfp sdk need higher version then
the requirement.txt in `backend` directory. Since the second
stage is to compile the samples by using kfp sdk, directly
install the requirements.txt from `sdk/python` instead of
`backend`.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-02-08 09:30:31 -08:00
Yihong Wang 895d5b9f51
test: Add pipeline yaml for tekton pipeline (#459)
In order to run tekton pipeline on IKS, need to define
listener, pipeline and task YAMLs. Inside the pipeline YAML,
it defines a series of tasks to git clone, run unittest, build images,
deploy tekton, deploy kfp-tekton, run e2e test and undeploy.

Signed-off-by: Yihong Wang <yh.wang@ibm.com>
2021-02-04 17:10:52 -08:00
Tommy Li 35185d9f34
Add CI and makefile tests for cache server (#458) 2021-02-04 16:44:52 -08:00
Peng Li daf17b9948
Enable cache server for tekton (#424) 2021-02-03 18:06:28 -08:00
Feng Li 1f943852ae
fix(backend): Fix condition with loop together issue (#456)
* enhance the when condition UI display

* Fix condition with loop together issue
2021-02-03 18:04:28 -08:00
Yihong Wang 2986643629
test: Add IKS toolchains scripts (#450)
Add scripts to run test and build image via `ibmcloud cr`.
Also add Dockerfile which contains all the tools/utilities that
needed by the scripts for IKS
2021-02-03 15:54:28 -08:00
Feng Li 6e1f367841
fix(backend): Fix condition with multi parameter issue (#455)
* enhance the when condition UI display

* Fix condition with multi parameter issue
2021-02-03 08:10:29 -08:00
Andrew Butler 8c04dfb259
fix(ui) Use task_display_name annotation when displaying run metrics (#451) 2021-02-02 11:10:28 -08:00
Feng Li 9b89a082bc
Fix loop issue that the loop param is from task output (#452)
* enhance the when condition UI display

* Fix loop issue that the loop param is from task output
2021-02-02 00:16:28 -08:00
Jin Chi He 71c649453e
Enhance Email notification according to feedback (#444) 2021-02-01 18:26:27 -08:00
Evan Hataishi 71c5f8ba28
Add iks deployment scripts (#435)
* Add deployment scripts

* Small fixes

* Add kustomize image edit during deploy

* Fix issues due to kustomize/kubectl compatibility

* Fix copyright year

* Remove unused script
2021-02-01 10:15:49 -08:00
Animesh Singh c5d3ad4912
fix AnySequencer syntax (#449)
* fix AnySequencer syntax

* Update tekton-catalog/any-sequencer/README.md

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2021-01-29 19:09:48 -08:00
Tommy Li 7feb3aae5d
Rephrase sidecars doc and fix some grammar (#448) 2021-01-29 18:41:48 -08:00
Animesh Singh 9e24225a9a
followon- fix loops example syntax (#446) 2021-01-29 17:15:48 -08:00
Animesh Singh 14ec21893b
fix references and syntax for Loops (#445) 2021-01-29 17:09:50 -08:00
Feng Li 3f750f12a0
feat(backend): Support dsl to define the pipeline loops via tekton (#440)
* enhance the when condition UI display

* Support dsl to define the pipeline loops via tekton
2021-01-29 14:25:48 -08:00
Andrew Butler 43c895df0a
(ui) Fix status parser bug (#441)
* fix runtime execution graph status parser bug

* remove logging

* Fix error when condition status is missing
2021-01-29 11:47:48 -08:00
Christian Kadner 7286c839a6
Move kubernetes from SDK requirements to test requirements (#442) 2021-01-28 15:55:40 -08:00
Tommy Li 40ca3e4398
Add user helper functions for loading env from k8s secrets (#436)
* add public k8s helper functions

* fix typo
2021-01-27 17:17:07 -08:00
Yihong Wang 22203dcd14
fix(backend): Update python version in Dockerfile (#437)
fix get-pip.py error while using old version
of python (3.5)
2021-01-26 13:33:40 -08:00
Christian Kadner d2f4328b6d
Update docs for release 0.6.0 (#433) 2021-01-25 16:44:59 -08:00
Tommy Li 549049af26
generate init 0.6.0 release manifests (#434) 2021-01-25 16:38:59 -08:00
Tommy Li 142c9d1256
Add tekton 0.20 api support and update the corresponding go client packages (#428)
* add tekton 0.20 support and upgrade the corresponding go client packages

* add tekton 0.20 pointer
2021-01-24 18:00:53 -08:00
kfp-tekton-bot d5e86d763e
KFP 1.3.0 Rebase (#423)
* KFP 1.3.0 Rebase

Resolves #410

* Fix license headers, markdown ToC, hyperlinks

* Fix merge conflicts in frontend and backend

* Update KFP version references in doc files

* Regenerate "Golden" YAML files any_sequencer, big_data_passing, create_component_from_func

* Regenerate compile report

* Regenerate "golden" YAML files for any_sequencer and parallel_join_with_argo_vars
2021-01-19 13:43:59 -08:00
Feng Li 34c168f407
feat(backend): Support pipeline loops via tekton custom task (#412)
* enhance the when condition UI display

* Support pipeline loops via tekton custom task

* Address comments from Tommy
2021-01-19 10:19:45 -08:00
Jin Chi He 33cde6399a
Enhance Any Sequencer by using Tekton Pipeline Variables Fature (#422) 2021-01-17 22:19:44 -08:00
Jin Chi He 6c1d68cce6
Handle Tekton pipeline level variables (#421) 2021-01-17 18:27:43 -08:00
Evan Hataishi 3825b192e2
test: include persistence agent unit tests in makefile run (#420)
* Add persistence agent unit tests command

* Fix spelling
2021-01-13 10:42:37 -08:00
Animesh Singh df000288c2
nit fix (#419) 2021-01-12 16:50:25 -08:00
Animesh Singh 1fa2ad585c
add any sequencer design pointer (#417)
point to any sequencer design, and correct some verbiage
2021-01-12 16:46:35 -08:00
Jin Chi He dc83ce8634
Enhance Any Sequencer (#414) 2021-01-12 15:08:36 -08:00
Evan Hataishi 1a4bdea06d
test: update persistence agent unit tests. Part of #388 (#403)
* Update metrics reporter unit tests

* Update persistence worker unit tests

* Update workflow saver unit tests

* Fix spelling typo
2021-01-12 13:00:37 -08:00
Tommy Li 99636983df
Move argo fake client to Tekton fake client. Also update apiserver, common, and crd tests (#395)
* updare apiserver and common tests;

* fix crd tests

* update travis tests

* update twitter endpoints to https

* revert contributing readme

* update test comments

* update function locations for the records
2021-01-12 12:36:35 -08:00
Tommy Li d8a9d5ba2b
Add SMTP server email notification example (#386)
* add email notification example

* add workaround for passing possible files

* Update send-email.py
2021-01-10 19:19:07 -08:00
Andrew Butler 7563d2f11b
fix(frontend) fixes tasks being erroneously marked as 'skipped' (#408) 2021-01-07 11:38:52 -08:00
Evan Hataishi 744d817f48
test: fix backend docker build used in travis. Fixes #405 (#407) 2021-01-06 14:11:45 -08:00
Andrew Butler 4bec15fa5e
Add a check for parent condition status in nested conditions (#404) 2021-01-06 09:13:51 -08:00
Feng Li 4174b64080
fix(frontend): Any Sequencer UI support (#401)
* enhance the when condition UI display

* address the comments from drewbutlerbb4
2021-01-06 08:01:52 -08:00
Jin Chi He fb765134d6
Any Sequencer implements (#399) 2021-01-05 22:11:52 -08:00
Tommy Li dc6014ded8
fix(sdk): Fix nested condition bug (#400)
* fix nested condition bug

* fix lint
2021-01-04 15:29:57 -08:00
Christian Kadner 66f33514d8
Limit scope of Markdown link verification to 404 errors (#396) 2020-12-18 13:00:25 -08:00
Evan Hataishi defdbcc7c7
test: Add docker image build tests for backend (#385) 2020-12-11 16:13:26 -08:00
Tommy Li 11d9962960
Update 0.5 release reference (#394)
* update 0.5 release reference

* Update guides/kfp_tekton_install.md

Co-authored-by: Animesh Singh <singhan@us.ibm.com>
2020-12-11 15:59:26 -08:00
Christian Kadner 47cefe00ba
Prepare SDK 0.5.0 release on PyPI (#393) 2020-12-11 14:01:27 -08:00
Tommy Li 904577d50a
Update the backend test instructions with go test (#387)
* Update the backend test instructions with go test

* Apply suggestions from code review

Co-authored-by: Animesh Singh <singhan@us.ibm.com>

Co-authored-by: Animesh Singh <singhan@us.ibm.com>
2020-12-11 13:27:27 -08:00
Tommy Li 0ab8117aee
Use fixed version of minio image (#392) 2020-12-11 12:43:27 -08:00
Michalina Kotwica edaea2dc85
fix(sdk): dump "y" and "n" in enforced string mode (#390)
* fix: dump "y" and "n" in enforced string mode

* import OrderedDit from collections instead of typing

* m: remove unused import in compiler.py

* m: formatting issues in yaml_utils (from linter)

* add copyrights to yaml_utils.py
2020-12-10 11:40:14 -08:00
Tommy Li a81ae715d7
Update broken link to pass travis test (#391) 2020-12-10 10:42:14 -08:00
Tommy Li 51b9824382
fix(ui and sdk): Support display name annotations (#383)
* fix ui and sdk to support display name

* lint test file
2020-12-04 12:25:26 -08:00
Evan Hataishi 7f05014167
test: Add build test for apiserver, agent, and workflow (#384)
* Add build test for apiserver, agent, and workflow

* Separate backend build steps

* Rename backend build targets
2020-12-04 10:37:58 -08:00
Tommy Li 449314060a
Fix race condition on stand alone deployment (#379)
* update stand alone deployment

* add warning note
2020-12-02 15:37:00 -08:00
Andrew Butler 142e0cf28e
fix(frontend): Fixes "Report an issue" link and cleanup UI warnings (#378) 2020-12-02 14:21:00 -08:00
Evan Hataishi c4831d3e9e
Fix broken clone link (#380) 2020-12-02 14:15:00 -08:00
Feng Li 1f4db492a0
fix(frontend): Fix UI display issue if custom task definend for Tekton Task (#377)
* enhance the when condition UI display

* Fix UI display issue if custom task definend for Tekton Task
2020-12-02 08:28:51 -08:00
Feng Li a497ebfcd2
fix(frontend): enhance the when condition UI display (#373) 2020-12-01 11:42:51 -08:00
Feng Li 5c444f4129
Fix the yaml cannot show in Safari browser issue (#369) 2020-12-01 08:58:50 -08:00
Tommy Li bbff3ee7f7
Simplify deployment docs for 1.2 (#374) 2020-11-30 10:40:50 -08:00
Animesh Singh 3fd5253bc5
add reference to watson blog post (#370) 2020-11-23 17:41:00 -08:00
Andrew Butler 29bab7f4e7
feat(ui) Update UI to account for the When expression (#328)
* Add edges for 'when' expressions

* format fix

* Add 'Skipped' tasks for when expression
2020-11-13 11:35:06 -08:00
Tommy Li 7cd39a7335
Feature branch for moving to Tekton 0.16 (#321)
* add tekton 0.16.3 go mod

* update dev setup

* update dev docs

* update readme

* support more argo variable mapping

* update feature doc on the list of supported argo variables translation

* add task-wise metadata support

* add beta whenexpression for condition, unblock runAfter for conditional task

* update condition docs

* update ui image for tekton-0.16

* Apply suggestions from code review

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>

* address comments and fix conflicts

* update doc reference to tekton 0.16

* regenerate tests

* updated latest images with tekton 0.16

* regenerate tests

Co-authored-by: Andrew Butler <Andrew.Butler@ibm.com>
2020-11-13 11:33:04 -08:00
Christian Kadner 65bb814d6f
Verify all Markdown links are valid (#363)
* Verify all Markdown links are valid

* Add Python script to verify links in Markdown files are valid
* Add check_doc_links target to Makefile
* Add check_doc_links job to Travis/CI

* Fix the 4 broken Markdown links
2020-11-12 15:10:25 -08:00
Feng Li cdd5625c7c
Fix #340: handle big data passing with multi dependencies (#360) 2020-11-10 17:09:56 -08:00
Christian Kadner 4545ff9b7a
Fix broken doc links (#361)
* Fix broken doc links

* Remove "&" from generated ToC links

* Use Permalink for input artifact copy step
2020-11-10 16:44:42 -08:00
Animesh Singh 7a14599cc5
Making Andrew Butler a committer in KFP-Tekton project (#356) 2020-11-10 16:42:41 -08:00
Animesh Singh f6e61444a5
quick fix for SDK instructions (#358)
* Quick fix in SDK documentation

Takes some excerpts from Christian's website PR. 

I will be revisiting after release to figure out the overlap with User guide.

* SDK Packages Overview (#359)

Co-authored-by: Christian Kadner <ckadner@us.ibm.com>
2020-11-09 15:46:41 -08:00
Christian Kadner e6f2eb4247
Update docs for KFP 1.0.4 and prepare for release 0.4.0 (#357) 2020-11-09 14:18:29 -08:00
Animesh Singh f60887d665
restructure installation and user instructions (#355)
* reconcile .md files in one single place

* add guides folder

* update install links

* update install links

* add all guides
2020-11-06 17:43:37 -08:00
jfigura 816d33730a
docs: migration from Argo (#351)
* add docs on migration from Argo

* Update samples/kfp-user-guide/README.md

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>

* Update README.md

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2020-11-06 13:02:29 -08:00
Tommy Li f3b315f9f3
fix(sdk): Address default pipeline timeout behavior (#354)
* address timeout bug

* update default timeout behavior

* add flag to use tekton global default
2020-11-04 13:18:54 -08:00
Alex 3d3ecfb05f
feat(sdk): added sanitizing k8s object process and test. Fixes: #332 (#345)
* added sanitizing k8s object process and test

* fix lint error by adding blank line at last

* fix lint error by adding blank line at last

* fix lint error remove not used import

* fix lint errors

* removed unnecessary _to_str() and use str() directly
2020-11-02 14:08:15 -08:00
Animesh Singh eaeb9a8e99
add a warning label to kustomize page (#350) 2020-10-30 09:08:11 -07:00
Animesh Singh 53dc4f2e15
make user guide prominent (#349)
* make the heading more readable

* top heading should match file intent

* surface user guide

* Update README.md
2020-10-30 07:53:04 -07:00
Rafał Bigaj 905079b338
feat(backend): ARTIFACT_COPY_STEP_TEMPLATE - custom template for copy step (#336)
Issue: https://github.com/kubeflow/kfp-tekton/issues/333
2020-10-29 15:52:07 -07:00
Tommy Li 64a9dbbabd
Add KFP Tekton release deployment yaml (#347)
* add release deployment yaml

* Apply suggestions from code review

Co-authored-by: Animesh Singh <singhan@us.ibm.com>

* update title to match with suggestions

Co-authored-by: Animesh Singh <singhan@us.ibm.com>
2020-10-28 15:48:59 -07:00
Tommy Li aab779374b
Fix UI merge conflict that blocks the UI image build. (#346) 2020-10-28 08:29:57 -07:00
kfp-tekton-bot ae655577e9
KFP 1.0.4 Rebase (#337)
* KFP 1.0.3 Rebase

* comment out line 130-142 in metrics_reporter.go (not supported in Tekton)

* fix merge conflicts in metrics_reporter.go#readNodeMetricsJSONOrEmpty()

* remove OWNERS file

* include changes from KFP 1.0.4
2020-10-26 15:35:59 -07:00
Tommy Li 058c6c95f7
fix(sdk): Quick fix for sanitizing conditional variables (#344) 2020-10-26 13:52:00 -07:00
Tommy Li 9877c92fdf
fix(backend): cover nil status race condition from Tekton (#329) 2020-10-26 08:21:02 -07:00
Animesh Singh aaec18c49a
updated titanic-ml generated with kale 0.5.1 (#330)
* updated titanic-ml generated with kale 0.5.1

* fix root directory path

Co-authored-by: Tommy Li <Tommy.chaoping.li@ibm.com>
2020-10-23 17:13:01 -07:00
Tommy Li 33493a23ea
Add s3 endpoint instructions for admin use case (#323)
* add s3 endpoint instructions for admin use case

* Update kfp-admin-guide.md

* Apply suggestions from code review

Co-authored-by: Animesh Singh <singhan@us.ibm.com>

* Align with new suggestions

Co-authored-by: Animesh Singh <singhan@us.ibm.com>
2020-10-21 16:05:34 -07:00
Tommy Li a1161328f8
Add best practice for tekton and other argo executors (#338)
* Add best practice for tekton and other argo executors

* Update README.md

* Apply suggestions from code review

Co-authored-by: Animesh Singh <singhan@us.ibm.com>

Co-authored-by: Animesh Singh <singhan@us.ibm.com>
2020-10-21 16:03:35 -07:00
Rafał Bigaj c0bc9b5757
fix(backend): MetricsReporter fails on `nil` nodeStatus.Status (#335)
Issue: https://github.com/kubeflow/kfp-tekton/issues/334
2020-10-19 14:53:18 -07:00
Tommy Li 4889307948
fix(samples): Add compilable python file for lightweight and e2e-mnist example (#316)
* add compilable python file for lightweight and e2e-mnist example

* Update single quote comments to double quotes
2020-10-13 12:46:28 -07:00
Tommy Li 0bc378bc90
fix(api): Update list of Tekton final status for api and persistent agent. (#322) 2020-10-01 13:57:19 -07:00
Tommy Li 5f189b7ac5
Fix big data annotations and update docs/samples (#318) 2020-10-01 13:39:19 -07:00
kunalpatel1793 e0b759b5a5
chore: Update the README to expand and link to CRDs (#320) 2020-09-30 17:34:54 -07:00
Tommy Li 18db488d54
Improve development manfests and docs (#314)
* improve dev manfests and docs

* Address comments
2020-09-22 14:52:10 -07:00
Tommy Li 1854b92ae0
fix(sdk): Check whether the dependent component is a tekton task (#313) 2020-09-22 09:26:49 -07:00
Tommy Li b1807476d1
fix(sdk): fixes outputfile key formatting (#308)
* fix outputfile formatting

* ignore python 3.5 test with unsortable characters

* Add python 3.5 error comments

* lint tailing whitespace
2020-09-17 17:32:45 -07:00
Andrew Butler 55a90853e3
feat(ui) Update UI to get archived logs if available (#305)
* Update UI to get archived logs if available

* Fix formatting

* Add note in admin guide

* Add note in admin guide

* Fix typo from #299 and lint format

* Add bracket
2020-09-17 10:58:45 -07:00
Rafał Bigaj b77a1ea987
feat(backend): new server API to read run log. Fixes #298. (#299)
* New server API: read run log

- The new server API endpoint (/apis/v1beta1/runs/{run_id}/nodes/{node_id}/log) to fetch run log
- UI (RunDetails) changes to fetch log from server API instead of direct access to cluster and ObjectStore

* Redundant comment

* New server API: read run log

- ml-pipeline rbac update: allow for access to log

* Read run log: enhanced error handling

- log message on Pod access errors

* Merged changes from upstream: 6135ca5389

- `ARCHIVE_LOG_FILE_NAME` and `ARCHIVE_LOG_PATH_PREFIX` options allows to control archive log path
- UI Server fetches logs from server API or directly from k8s depending on `STREAM_LOGS_FROM_SERVER_API` option

* Cherry-pick 76ba10b0ce67693f2909a56fd7bfac88f74b45a8 from rafalbigaj:master

* Log archive - tekton adaptations

* Archival default log file name: step-main.tgz
2020-09-17 08:38:47 -07:00
Feng Li 8ec1d51abf
Remove persistentVolumeClaim by volumeClaimTemplate (#309)
* Remove persistentVolumeClaim by volumeClaimTemplate

* address the comments from tomcli
2020-09-16 14:13:21 -07:00
Animesh Singh 20627aea21
updating the intro (#311)
some folks confused that its still not ready.
2020-09-16 10:55:20 -07:00
Christian Kadner 777194a2a0
Prepare PyPi distribution for release 0.3.0 (#303) 2020-09-11 13:10:10 -07:00
3494 changed files with 663456 additions and 237147 deletions

View File

@ -1,298 +0,0 @@
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test before submit:
# gcloud builds submit --config=.cloudbuild.yaml --substitutions=COMMIT_SHA="$(git rev-parse HEAD)" --project=ml-pipeline-test
steps:
# Build the Python SDK
- name: 'python:3-alpine'
entrypoint: '/bin/sh'
args: ['-c', 'cd /workspace/sdk/python/; python3 setup.py sdist --format=gztar; cp dist/*.tar.gz /workspace/kfp.tar.gz']
id: 'preparePythonSDK'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp.tar.gz']
id: 'copyPythonSDK'
waitFor: ['preparePythonSDK']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://$PROJECT_ID/builds/latest/kfp.tar.gz']
id: 'copyPythonSDKToLatest'
waitFor: ['preparePythonSDK']
# Build the Python Component SDK
- name: 'python:2-alpine'
entrypoint: '/bin/sh'
args: ['-c', 'cd /workspace/components/gcp/container/component_sdk/python;python setup.py sdist --format=gztar; cp dist/*.tar.gz /workspace/kfp-component.tar.gz']
id: 'preparePythonComponentSDK'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp-component.tar.gz']
id: 'copyPythonComponentSDK'
waitFor: ['preparePythonComponentSDK']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://$PROJECT_ID/builds/latest/kfp-component.tar.gz']
id: 'copyPythonComponentSDKToLatest'
waitFor: ['preparePythonComponentSDK']
# Build the pipeline system images
- name: 'gcr.io/cloud-builders/docker'
entrypoint: /bin/bash
args:
- -ceux
- |
sed -i -e "s/ARG DATE/ENV DATE \"$(date -u)\"/" /workspace/frontend/Dockerfile
docker build -t gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA \
--build-arg COMMIT_HASH=$COMMIT_SHA \
--build-arg TAG_NAME="$(cat /workspace/VERSION)" \
-f /workspace/frontend/Dockerfile \
/workspace
id: 'buildFrontend'
waitFor: ['-']
- name: 'gcr.io/cloud-builders/docker'
entrypoint: /bin/bash
args:
- -ceux
- |
docker build -t gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA \
--build-arg COMMIT_SHA=$COMMIT_SHA \
--build-arg TAG_NAME="$(cat /workspace/VERSION)" \
-f /workspace/backend/Dockerfile /workspace
id: 'buildApiServer'
waitFor: ['copyPythonSDK']
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.scheduledworkflow', '/workspace']
id: 'buildScheduledWorkflow'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.viewercontroller', '/workspace']
id: 'buildViewerCrdController'
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.persistenceagent', '/workspace']
id: 'buildPersistenceAgent'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', '-f',
'/workspace/proxy/Dockerfile', '/workspace/proxy']
id: 'buildInverseProxyAgent'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.visualization', '/workspace']
id: 'buildVisualizationServer'
waitFor: ["-"]
- id: 'buildMetadataWriter'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/metadata_writer/Dockerfile', '/workspace']
waitFor: ["-"]
- id: 'buildCacheServer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.cacheserver', '/workspace']
waitFor: ["-"]
- id: 'buildCacheDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/src/cache/deployer/Dockerfile', '/workspace']
waitFor: ["-"]
# Build marketplace deployer
- id: 'buildMarketplaceDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/manifests/gcp_marketplace/deployer/Dockerfile', '/workspace/manifests/gcp_marketplace']
waitFor: ["-"]
# Build the Kubeflow-based pipeline component images
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-deployer:$COMMIT_SHA',
'/workspace/components/kubeflow/deployer']
id: 'buildDeployer'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/launcher && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildLauncher'
waitFor: ["-"]
- id: 'buildCpuTrainer'
name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer -b 1.6.0']
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer-gpu -b 1.6.0-gpu']
id: 'buildGpuTrainer'
waitFor: ["-"]
# Build the Dataproc-based pipeline component images
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/deprecated/dataproc/analyze && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildDataprocAnalyze'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/deprecated/dataproc/create_cluster && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildDataprocCreateCluster'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/deprecated/dataproc/delete_cluster && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildDataprocDeleteCluster'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/deprecated/dataproc/predict && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildDataprocPredict'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/deprecated/dataproc/transform && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildDataprocTransform'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/deprecated/dataproc/train && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildDataprocTrain'
waitFor: ["-"]
# Build the Generic GCP component image
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/gcp/container/ && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildGcpGenericComponent'
waitFor: ["-"]
# Build the local pipeline component images
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/local/confusion_matrix && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildConfusionMatrix'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/local/roc && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildROC'
waitFor: ["-"]
# Build third_party images
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', '-f',
'/workspace/third_party/metadata_envoy/Dockerfile', '/workspace']
id: 'buildMetadataEnvoy'
# Pull third_party images
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.21.1']
id: 'pullMetadataServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance']
id: 'pullMinio'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/mysql:5.6']
id: 'pullMysql'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/cloudsql-docker/gce-proxy:1.14']
id: 'pullCloudsqlProxy'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance']
id: 'pullArgoExecutor'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/workflow-controller:v2.7.5-license-compliance']
id: 'pullArgoWorkflowController'
# Tag for Hosted - SemVersion to Major.Minor parsing
- id: "parseMajorMinorVersion"
waitFor: ["-"]
name: gcr.io/cloud-builders/docker
entrypoint: /bin/bash
args:
- -ceux
- |
# Parse major minor version and save to a file for reusing in other steps.
# e.g. 1.0.0-rc.1 and 1.0.1 are parsed as 1.0
cat /workspace/VERSION | sed -e "s#\([0-9]\+[.][0-9]\+\)[.].*#\1#" > /workspace/mm.ver
# Tag for Hosted - Tag to hosted folder with MKP friendly name
- id: 'tagForHosted'
waitFor: ['parseMajorMinorVersion', 'buildFrontend', 'buildApiServer', 'buildScheduledWorkflow',
'buildViewerCrdController', 'buildPersistenceAgent', 'buildInverseProxyAgent', 'buildVisualizationServer',
'buildMetadataWriter', 'buildCacheServer', 'buildCacheDeployer', 'buildMetadataEnvoy',
'buildMarketplaceDeployer', 'pullMetadataServer', 'pullMinio', 'pullMysql', 'pullCloudsqlProxy',
'pullArgoExecutor', 'pullArgoWorkflowController']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
/workspace/test/tag_for_hosted.sh $PROJECT_ID $COMMIT_SHA $(cat /workspace/VERSION) $(cat /workspace/mm.ver)
images:
# Images for the pipeline system itself
- 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA'
# Images for Marketplace
- 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA'
# Images for the Kubeflow-based pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-deployer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer-gpu:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tfjob:$COMMIT_SHA'
# Images for the Dataproc-based pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-dataproc-analyze:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-dataproc-create-cluster:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-dataproc-delete-cluster:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-dataproc-predict:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-dataproc-transform:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-dataproc-train:$COMMIT_SHA'
# Images for the GCP generic pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-gcp:$COMMIT_SHA'
# Images for the local components
- 'gcr.io/$PROJECT_ID/ml-pipeline-local-confusion-matrix:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-local-roc:$COMMIT_SHA'
# Images for the third_party components
- 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA'
timeout: '3600s'
options:
diskSizeGb: 300
machineType: 'N1_HIGHCPU_8'
tags:
- build-each-commit

View File

@ -1,4 +1,6 @@
.git
bower_components
dist
node_modules
**/node_modules
backend/build
v2/build

View File

@ -12,7 +12,7 @@ Resolves #
**Checklist:**
- [ ] The title for your pull request (PR) should follow our title convention. [Learn more about the pull request title convention used in this repository](https://github.com/kubeflow/pipelines/blob/master/CONTRIBUTING.md#pull-request-title-convention).
<!--
PR titles examples:
* `fix(frontend): fixes empty page. Fixes #1234`
Use `fix` to indicate that this PR fixes a bug.
@ -22,10 +22,4 @@ Resolves #
Use `chore` to indicate that this PR makes some changes that users don't need to know.
* `test: fix CI failure. Part of #1234`
Use `part of` to indicate that a PR is working on an issue, but shouldn't close the issue when merged.
- [ ] Do you want this pull request (PR) cherry-picked into the current release branch?
If yes, use one of the following options:
* **(Recommended.)** Ask the PR approver to add the `cherrypick-approved` label to this PR. The release manager adds this PR to the release branch in a batch update.
* After this PR is merged, create a cherry-pick PR to add these changes to the release branch. (For more information about creating a cherry-pick PR, see the [Kubeflow Pipelines release guide](https://github.com/kubeflow/pipelines/blob/master/RELEASE.md#option--git-cherry-pick).)
-->

62
.github/renovate.json5 vendored Normal file
View File

@ -0,0 +1,62 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base",
"schedule:weekly",
":dependencyDashboard",
":semanticCommits",
":semanticCommitScope(deps)",
"group:linters",
"group:googleapis",
"group:goOpenapi"
],
"packageRules": [
{
"datasources": ["docker"],
"updateTypes": ["patch"],
"groupName": "docker patch updates",
"groupSlug": "docker-updates"
},
{
"datasources": ["go"],
"updateTypes": ["patch", "minor"],
"groupName": "go.mod dependencies",
"groupSlug": "go-mod-updates"
},
{
"datasources": ["npm"],
"rangeStrategy": "update-lockfile",
},
{
"datasources": ["npm"],
"updateTypes": ["patch", "minor"],
"groupName": "npm dependencies",
"groupSlug": "npm-updates",
},
{ // major updates do not create PRs automatically
"updateTypes": ["major"],
"dependencyDashboardApproval": true,
},
{ // disable python package updates initially, because they are not stable
"languages": ["python"],
"enabled": false
},
{ // we've deprecated bazel
"managers": ["bazel"],
"enabled": false
}
],
"ignorePaths": [
"components/deprecated/",
"third_party/",
"components/ibm-components/",
"components/azure/",
"components/presto/",
"components/arena/",
"components/aws/",
"backend/api/python_http_client/",
"manifests/kustomize/"
],
"dependencyDashboardApproval": true, // require approval for all updates initially
"separateMinorPatch": true
}

27
.github/semantic.yml vendored Normal file
View File

@ -0,0 +1,27 @@
# Configuration for https://github.com/zeke/semantic-pull-requests
#
# Kubeflow Pipelines PR title convention is documented in
# https://github.com/kubeflow/pipelines/blob/master/CONTRIBUTING.md#pull-request-title-convention
# Always validate the PR title, and ignore the commits.
titleOnly: true
# TODO: define a list of valid scopes.
# scopes:
# - scope1
# - scope2
# ...
# By default types specified in commitizen/conventional-commit-types is used.
# See: https://github.com/commitizen/conventional-commit-types/blob/v3.0.0/index.json
types:
- feat
- fix
- docs
- style
- refactor
- perf
- test
- build
- chore
- revert

21
.github/stale.yml vendored Normal file
View File

@ -0,0 +1,21 @@
# Configuration for stale probot
# https://probot.github.io/apps/stale/
#
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 90
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 90
# Issues with these labels will never be considered stale
exemptLabels:
- lifecycle/frozen
# Label to use when marking an issue as stale
staleLabel: lifecycle/stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: >
This issue has been automatically closed because it has not had recent
activity. Please comment "/reopen" to reopen it.

72
.github/workflows/codeql.yml vendored Normal file
View File

@ -0,0 +1,72 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
schedule:
# Every Friday at 19:39
- cron: '39 19 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go', 'javascript', 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Use only 'java' to analyze code written in Java, Kotlin or both
# Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"

View File

@ -0,0 +1,182 @@
name: KFP Tekton Unit Tests
on:
push:
branches: [master]
# Run tests for any PRs.
pull_request:
env:
GITHUB_ACTION: "true"
jobs:
python-unittest:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: Unit Tests
run: VENV=$VIRTUAL_ENV make ci_unit_test
validate-testdata:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: "Generate testdata yaml files."
run: VENV=$VIRTUAL_ENV make unit_test GENERATE_GOLDEN_YAML=True
- name: "Test only required noninled yaml files are generated."
run: make validate-generated-test-yamls
- name: "Tekton validation for testdata."
run: make validate-testdata
- name: "Validation for examples data."
run: make validate-pipelineloop-examples
progress-report:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: "Progress report on compiling KFP DSL test scripts"
run: VENV=$VIRTUAL_ENV make report
python-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: "Lint Python code with flake8"
run: VENV=$VIRTUAL_ENV make lint
check-license:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: "Verify source files contain the license header"
run: make check_license
check-mdtoc:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: "Verify Markdown files have current table of contents"
run: make check_mdtoc
check-doc-links:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: "Verify Markdown files have valid links"
run: make check_doc_links
run-go-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v4
- name: "run go unit tests"
run: make run-go-unittests
- name: "Verify apiserver, agent, and workflow build"
run: make build-backend
run-pipelineloop-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v4
- name: "run go pipelineLoop unit tests"
run: cd tekton-catalog/pipeline-loops && make test-all
run-v2-custom-controller-image-builds:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: changes
uses: jitterbit/get-changed-files@v1
with:
format: 'json'
- name: backend changes
id: backend-changes
run: |
readarray -t backend_files <<<"$(jq -r '.[]|select(startswith("tekton-catalog"))|select(endswith("README.md")|not)' <<<'${{ steps.changes.outputs.all }}')"
echo "::set-output name=all::"
if [[ ${#backend_files[@]} -gt 0 ]]; then
if [[ -n "${backend_files[0]}" ]]; then
echo "::set-output name=all::yes"
fi
fi
- name: build images
if: ${{ steps.backend-changes.outputs.all }}
run: make build-v2-custom-controller-images
backend-integration:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: changes
uses: jitterbit/get-changed-files@v1
with:
format: 'json'
- name: backend changes
id: backend-changes
run: |
readarray -t backend_files <<<"$(jq -r '.[]|select(startswith("backend") or startswith("go.mod"))|select(endswith("README.md")|not)' <<<'${{ steps.changes.outputs.all }}')"
echo "::set-output name=all::"
if [[ ${#backend_files[@]} -gt 0 ]]; then
if [[ -n "${backend_files[0]}" ]]; then
echo "::set-output name=all::yes"
fi
fi
- name: Create k8s Kind Cluster
if: ${{ steps.backend-changes.outputs.all }}
uses: container-tools/kind-action@v2
with:
cluster_name: kfp-tekton
kubectl_version: v1.26.1
version: v0.17.0
node_image: kindest/node:v1.26.0
- name: build images
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/build-images.sh
- name: Set up Python 3.9
if: ${{ steps.backend-changes.outputs.all }}
uses: actions/setup-python@v4
with:
python-version: 3.9
- name: "deploy kfp-tekton"
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/deploy-kfp.sh
- name: Install sdk
if: ${{ steps.backend-changes.outputs.all }}
run: python -m pip install -e sdk/python
- name: "flip coin test"
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/e2e-test.sh

17
.gitignore vendored
View File

@ -69,6 +69,7 @@ frontend/test/ui/visual-regression/screenshots/screen
# go vendor
vendor
tekton-catalog/pipeline-loops/go.sum
# Go module cache
backend/pkg/mod/cache
@ -98,3 +99,19 @@ docs/_build
# sed backups
*.bak
# virtualenv
.venv/
# python sdk package
*.tar.gz
# Copy from kubeflow/frontend
coverage/
# Python cache
__pycache__
.pytest_cache
# Coverage
.coverage
.coverage*

1
.kfp-rebase-version Normal file
View File

@ -0,0 +1 @@
1.8.4

View File

@ -216,7 +216,7 @@ max-module-lines=1000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4

8
.readthedocs.yml Normal file
View File

@ -0,0 +1,8 @@
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
version: 2
sphinx:
configuration: docs/conf.py
build:
os: "ubuntu-22.04"
tools:
python: "3.10"

View File

@ -1,653 +0,0 @@
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
steps:
# Marketplace Major.Minor parsing
- id: "parseMajorMinorVersion"
name: gcr.io/cloud-builders/docker
entrypoint: /bin/bash
args:
- -ceux
- |
# Parse major minor version and save to a file for reusing in other steps.
# e.g. 1.0.0-rc.1 and 1.0.1 are parsed as 1.0
echo $TAG_NAME | sed -e "s#\([0-9]\+[.][0-9]\+\)[.].*#\1#" > /workspace/mm.ver
# Pull and retag images for pipeline components
- id: 'retagComponentImages'
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
waitFor: ['-']
args:
- -ceux
- |
images=(
"ml-pipeline-kubeflow-deployer"
"ml-pipeline-kubeflow-tf-trainer"
"ml-pipeline-kubeflow-tf-trainer-gpu"
"ml-pipeline-kubeflow-tfjob"
"ml-pipeline-dataproc-analyze"
"ml-pipeline-dataproc-create-cluster"
"ml-pipeline-dataproc-delete-cluster"
"ml-pipeline-dataproc-predict"
"ml-pipeline-dataproc-transform"
"ml-pipeline-dataproc-train"
"ml-pipeline-local-confusion-matrix"
"ml-pipeline-local-roc"
"ml-pipeline-gcp"
)
for image in "${images[@]}"
do
from_image="gcr.io/$PROJECT_ID/$image:$COMMIT_SHA"
target_image="gcr.io/ml-pipeline/$image:$TAG_NAME"
docker pull $from_image
docker tag $from_image $target_image
docker push $target_image
done
# Pull and retag the images for the pipeline system
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA']
id: 'pullFrontend'
- id: 'tagFrontendForMarketplaceMajorMin'
waitFor: ['pullFrontend', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/frontend:$COMMIT_SHA
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/frontend:$COMMIT_SHA
docker push gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA']
id: 'pullAPIServer'
- id: 'tagAPIServerForMarketplaceMajorMinor'
waitFor: ['pullAPIServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/api-server:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/api-server:$COMMIT_SHA'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
docker push 'gcr.io/ml-pipeline/api-server:$TAG_NAME'
docker push 'gcr.io/ml-pipeline/api-server:$COMMIT_SHA'
docker push 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME'
docker push 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME'
docker push gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA']
id: 'pullScheduledworkflow'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowVersionNumber'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/scheduledworkflow:$COMMIT_SHA']
id: 'tagScheduledworkflowCommitSHA'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowForMarketplace'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowForMarketplaceTest'
waitFor: ['pullScheduledworkflow']
- id: 'tagScheduledworkflowForMarketplaceMajorMinor'
waitFor: ['pullScheduledworkflow', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA']
id: 'pullViewerCrdController'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/viewer-crd-controller:$TAG_NAME']
id: 'tagViewerCrdControllerVersionNumber'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/viewer-crd-controller:$COMMIT_SHA']
id: 'tagViewerCrdControllerCommitSHA'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/viewercrd:$TAG_NAME']
id: 'tagViewerCrdControllerForMarketplace'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME']
id: 'tagViewerCrdControllerForMarketplaceTest'
waitFor: ['pullViewerCrdController']
- id: 'tagViewerCrdControllerForMarketplaceMajorMinor'
waitFor: ['pullViewerCrdController', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA']
id: 'pullPersistenceagent'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentVersionNumber'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/persistenceagent:$COMMIT_SHA']
id: 'tagPersistenceagentCommitSHA'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentForMarketplace'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentForMarketplaceTest'
waitFor: ['pullPersistenceagent']
- id: 'tagPersistenceagentForMarketplaceMajorMinor'
waitFor: ['pullPersistenceagent', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA']
id: 'pullInverseProxyAgent'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/inverse-proxy-agent:$TAG_NAME']
id: 'tagInverseProxyAgentVersionNumber'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/inverse-proxy-agent:$COMMIT_SHA']
id: 'tagInverseProxyAgentCommitSHA'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/proxyagent:$TAG_NAME']
id: 'tagInverseProxyAgentForMarketplace'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME']
id: 'tagInverseProxyAgentForMarketplaceTest'
waitFor: ['pullInverseProxyAgent']
- id: 'tagInverseProxyAgentForMarketplaceMajorMinor'
waitFor: ['pullInverseProxyAgent', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA']
id: 'pullVisualizationServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/visualization-server:$TAG_NAME']
id: 'tagVisualizationServerVersionNumber'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/visualization-server:$COMMIT_SHA']
id: 'tagVisualizationServerCommitSHA'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/visualizationserver:$TAG_NAME']
id: 'tagVisualizationServerForMarketplace'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME']
id: 'tagVisualizationServerForMarketplaceTest'
waitFor: ['pullVisualizationServer']
- id: 'tagVisualizationServerForMarketplaceMajorMinor'
waitFor: ['pullVisualizationServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.21.1']
id: 'pullMetadataServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.21.1', 'gcr.io/ml-pipeline/google/pipelines/metadataserver:$TAG_NAME']
id: 'tagMetadataServerForMarketplace'
waitFor: ['pullMetadataServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:0.21.1', 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME']
id: 'tagMetadataServerForMarketplaceTest'
waitFor: ['pullMetadataServer']
- id: 'tagMetadataServerForMarketplaceMajorMinor'
waitFor: ['pullMetadataServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:0.21.1 gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:0.21.1 gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
- id: 'pullMetadataWriter'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagMetadataWriterVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplaceMajorMinor'
waitFor: ['pullMetadataWriter', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
- id: 'pullCacheServer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagCacheServerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-server:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-server:$COMMIT_SHA']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/cacheserver:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplaceMajorMinor'
waitFor: ['pullCacheServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/cacheserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cacheserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$(cat /workspace/mm.ver)
- id: 'pullCacheDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagCacheDeployerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-deployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-deployer:$COMMIT_SHA']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/cachedeployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplaceMajorMinor'
waitFor: ['pullCacheDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/cachedeployer:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cachedeployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA']
id: 'pullMetadataEnvoy'
- id: 'tagMetadataEnvoyVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-envoy:$TAG_NAME']
waitFor: ['pullMetadataEnvoy']
- id: 'tagMetadataEnvoyCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-envoy:$COMMIT_SHA']
waitFor: ['pullMetadataEnvoy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME']
id: 'tagMetadataEnvoyForMarketplace'
waitFor: ['pullMetadataEnvoy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME']
id: 'tagMetadataEnvoyForMarketplaceTest'
waitFor: ['pullMetadataEnvoy']
- id: 'tagMetadataEnvoyForMarketplaceMajorMinor'
waitFor: ['pullMetadataEnvoy', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance']
id: 'pullMinio'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/minio:$TAG_NAME']
id: 'tagMinioForMarketplace'
waitFor: ['pullMinio']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME']
id: 'tagMinioForMarketplaceTest'
waitFor: ['pullMinio']
- id: 'tagMinioForMarketplaceMajorMinor'
waitFor: ['pullMinio', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/mysql:5.6']
id: 'pullMysql'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/mysql:5.6', 'gcr.io/ml-pipeline/google/pipelines/mysql:$TAG_NAME']
id: 'tagMySqlForMarketplace'
waitFor: ['pullMysql']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/mysql:5.6', 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME']
id: 'tagMySqlForMarketplaceTest'
waitFor: ['pullMysql']
- id: 'tagMySqlForMarketplaceMajorMinor'
waitFor: ['pullMysql', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/mysql:5.6 gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/mysql:5.6 gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/cloudsql-docker/gce-proxy:1.14']
id: 'pullCloudsqlProxy'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.14', 'gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$TAG_NAME']
id: 'tagCloudSqlProxyForMarketplace'
waitFor: ['pullCloudsqlProxy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.14', 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME']
id: 'tagCloudSqlProxyForMarketplaceTest'
waitFor: ['pullCloudsqlProxy']
- id: 'tagCloudSqlProxyForMarketplaceMajorMinor'
waitFor: ['pullCloudsqlProxy', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/cloudsql-docker/gce-proxy:1.14 gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
docker tag gcr.io/cloudsql-docker/gce-proxy:1.14 gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance']
id: 'pullArgoExecutor'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME']
id: 'tagArgoExecutorForMarketplace'
waitFor: ['pullArgoExecutor']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME']
id: 'tagArgoExecutorForMarketplaceTest'
waitFor: ['pullArgoExecutor']
- id: 'tagArgoExecutorForMarketplaceMajorMinor'
waitFor: ['pullArgoExecutor', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/argoexec:v2.7.5-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/workflow-controller:v2.7.5-license-compliance']
id: 'pullArgoWorkflowController'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v2.7.5-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME']
id: 'tagArgoWorkflowControllerForMarketplace'
waitFor: ['pullArgoWorkflowController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v2.7.5-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME']
id: 'tagArgoWorkflowControllerForMarketplaceTest'
waitFor: ['pullArgoWorkflowController']
- id: 'tagArgoWorkflowControllerForMarketplaceMajorMinor'
waitFor: ['pullArgoWorkflowController', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/workflow-controller:v2.7.5-license-compliance gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/workflow-controller:v2.7.5-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
# Marketplace specific deployer and specific primary image
- id: 'pullMarketplaceDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagMarketplaceDeployerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplaceDeployerVersionNumberTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplaceDeployerVersionNumberMajorMinor'
waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
- id: 'tagMarketplacePrimaryVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplacePrimaryVersionNumberTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplacePrimaryVersionNumberMajorMinor'
waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
# Copy the Python SDK
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp.tar.gz', '/workspace/']
id: 'copyPythonSDKLocal'
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://ml-pipeline/release/$TAG_NAME/kfp.tar.gz']
id: 'copyPythonSDK'
waitFor: ['copyPythonSDKLocal']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp.tar.gz', 'gs://ml-pipeline/release/latest/kfp.tar.gz']
id: 'copyPythonSDKToLatest'
waitFor: ['copyPythonSDKLocal']
# Copy the Python Component SDK
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp-component.tar.gz', '/workspace/']
id: 'copyPythonComponentSDKLocal'
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://ml-pipeline/release/$TAG_NAME/kfp-component.tar.gz']
id: 'copyPythonComponentSDK'
waitFor: ['copyPythonComponentSDKLocal']
- name: 'gcr.io/cloud-builders/gsutil'
args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://ml-pipeline/release/latest/kfp-component.tar.gz']
id: 'copyPythonComponentSDKToLatest'
waitFor: ['copyPythonComponentSDKLocal']
images:
- 'gcr.io/ml-pipeline/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/scheduledworkflow:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/persistenceagent:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/viewer-crd-controller:$TAG_NAME'
- 'gcr.io/ml-pipeline/viewer-crd-controller:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/inverse-proxy-agent:$TAG_NAME'
- 'gcr.io/ml-pipeline/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/visualization-server:$TAG_NAME'
- 'gcr.io/ml-pipeline/visualization-server:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/metadata-envoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/metadata-envoy:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME'
- 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/cache-server:$TAG_NAME'
- 'gcr.io/ml-pipeline/cache-server:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/cache-deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/cache-deployer:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/viewercrd:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/proxyagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/visualizationserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadataserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/minio:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/mysql:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cacheserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cachedeployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME'
timeout: '1200s'
tags:
- release-on-tag

View File

@ -1,9 +1,5 @@
[style]
based_on_style = google
blank_line_before_nested_class_or_def = true
based_on_style = yapf
indent_width = 4
split_before_named_assigns = true
column_limit = 80
continuation_indent_width = 4
dedent_closing_brackets = true
coalesce_brackets = true
indent_width = 2
split_before_first_argument = true

186
.tekton/listener.yaml Normal file
View File

@ -0,0 +1,186 @@
apiVersion: tekton.dev/v1beta1
kind: TriggerTemplate
metadata:
name: template
spec:
params:
- name: repository
description: The git repo
- name: revision
description: the branch for the git repo
- name: apikey
description: the ibmcloud api key
- name: registry-url
description: ibm container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: region
description: the ibmcloud registry region
default: us-south
- name: org
description: organization
- name: space
description: space
default: dev
- name: resource-group
description: resource group
default: default
- name: docker-username
description: ibm container registry username
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr-username
description: username for docker hub
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public container registry
default: docker.io
- name: tekton-version
description: tekton version
- name: tekton-ns
description: tekton namespace
default: tekton-pipeline
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: slack-webhook
description: webhook URL
default: ""
- name: slack-channel
description: slack channel
default: ""
- name: publish-to-public-cr
description: publish images to public container registry
default: "0"
- name: extra-test-cases
description: run extra test cases
default: "0"
- name: skip-pipeline-loop
descript: skip the pipeline loop test case
default: "0"
- name: image-tag
description: image tag
default: "nightly"
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
- name: images
description: a image list for publishing
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend pipelineloop-controller pipelineloop-webhook kubeclient"
- name: v2-images
description: a v2 image list for publishing
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook tekton-driver"
- name: many-edge-duration
description: duration threshold for many edge pipeline
value: "7"
- name: v2-image-tag
description: v2 tekton catalog image tag
default: "nightly"
resourcetemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pipelinerun-$(uid)-pvc
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
- apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: pipelinerun-$(uid)
spec:
pipelineRef:
name: pipeline
workspaces:
- name: pipeline-pvc
persistentVolumeClaim:
claimName: pipelinerun-$(uid)-pvc
params:
- name: repository
value: $(params.repository)
- name: revision
value: $(params.revision)
- name: apikey
value: $(params.apikey)
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: region
value: $(params.region)
- name: docker-username
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: public-cr-username
value: $(params.public-cr-username)
- name: public-cr-password
value: $(params.public-cr-password)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: space
value: $(params.space)
- name: tekton-version
value: $(params.tekton-version)
- name: tekton-ns
value: $(params.tekton-ns)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: image-tag
value: $(params.image-tag)
- name: public-cr
value: $(params.public-cr)
- name: public-cr-namespace
value: $(params.public-cr-namespace)
- name: publish-to-public-cr
value: $(params.publish-to-public-cr)
- name: extra-test-cases
value: $(params.extra-test-cases)
- name: skip-pipeline-loop
value: $(params.skip-pipeline-loop)
- name: images
value: $(params.images)
- name: v2-images
value: $(params.v2-images)
- name: many-edge-duration
value: $(params.many-edge-duration)
- name: v2-image-tag
value: $(params.v2-image-tag)
---
apiVersion: tekton.dev/v1beta1
kind: TriggerBinding
metadata:
name: binding
spec:
params:
- name: repository
value: "https://github.com/kubeflow/kfp-tekton"
- name: revision
value: "master"
---
apiVersion: tekton.dev/v1beta1
kind: EventListener
metadata:
name: listener
spec:
triggers:
- binding:
name: binding
template:
name: template

934
.tekton/pipeline.yaml Normal file
View File

@ -0,0 +1,934 @@
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: pipeline
spec:
params:
- name: repository
description: the git repo
- name: revision
description: the revision
default: master
- name: apikey
description: the ibmcloud api key
- name: registry-url
description: ibm container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr-username
description: username for public container registry
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public container registry
default: docker.io
- name: resource-group
description: resource group
default: default
- name: org
description: organization
- name: region
description: region
- name: space
description: space
- name: tekton-version
description: tekton version
- name: tekton-ns
description: tekton namespace
default: tekton-pipeline
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: kubernetes-cluster
description: cluster name
- name: slack-webhook
description: webhook URL
default: ""
- name: slack-channel
description: slack channel
default: ""
- name: publish-to-public-cr
description: publish images to public container registry
default: "0"
- name: extra-test-cases
description: execute extra test cases
default: "0"
- name: skip-pipeline-loop
descript: skip the pipeline loop test case
default: "0"
- name: image-tag
description: image tag
default: "nightly"
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
- name: images
description: a image list for publishing
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend pipelineloop-controller pipelineloop-webhook kubeclient"
- name: v2-images
description: a v2 image list for publishing
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook"
- name: many-edge-duration
description: duration threshold for many edge pipeline
value: "7"
- name: v2-image-tag
description: v2 tekton catalog image tag
default: "nightly"
workspaces:
- name: pipeline-pvc
tasks:
- name: test
taskRef:
name: test
params:
- name: repository
value: $(params.repository)
- name: revision
value: $(params.revision)
- name: apikey
value: $(params.apikey)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-artifact
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: api-server
- name: docker-root
value: .
- name: docker-file
value: ""
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: artifact
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-api-server
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: api-server
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-persistenceagent
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: persistenceagent
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile.persistenceagent
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-metadata-writer
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: metadata-writer
- name: docker-root
value: .
- name: docker-file
value: backend/metadata_writer/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-scheduledworkflow
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: scheduledworkflow
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile.scheduledworkflow
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-cacheserver
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: cache-server
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile.cacheserver
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-frontend
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: frontend
- name: docker-root
value: .
- name: docker-file
value: frontend/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
- name: build-args
value: "COMMIT_HASH=$(git rev-parse HEAD) DATE=\"$(date -u)\""
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-pipeline-loops-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/pipeline-loops
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-any-sequencer-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/any-sequencer
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-kubectl-wrapper-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-pipelineloop-controller
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: pipelineloop-controller
- name: docker-root
value: tekton-catalog/pipeline-loops
- name: docker-file
value: tekton-catalog/pipeline-loops/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: build-args
value: "bin_name=pipelineloop-controller"
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-kfptask-controller
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-kfptask-controller
- name: docker-root
value: tekton-catalog/tekton-kfptask
- name: docker-file
value: tekton-catalog/tekton-kfptask/Dockerfile.tekton-kfptask.controller
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-kfptask-webhook
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-kfptask-webhook
- name: docker-root
value: tekton-catalog/tekton-kfptask
- name: docker-file
value: tekton-catalog/tekton-kfptask/Dockerfile.tekton-kfptask.webhook
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-exithandler-controller
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-exithandler-controller
- name: docker-root
value: tekton-catalog/tekton-exithandler
- name: docker-file
value: tekton-catalog/tekton-exithandler/Dockerfile.tekton-exithandler.controller
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-exithandler-webhook
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-exithandler-webhook
- name: docker-root
value: tekton-catalog/tekton-exithandler
- name: docker-file
value: tekton-catalog/tekton-exithandler/Dockerfile.tekton-exithandler.webhook
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-pipelineloop-webhook
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: pipelineloop-webhook
- name: docker-root
value: tekton-catalog/pipeline-loops
- name: docker-file
value: tekton-catalog/pipeline-loops/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: build-args
value: "bin_name=pipelineloop-webhook"
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-any-sequencer
runAfter:
- build-any-sequencer-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: any-sequencer
- name: docker-root
value: tekton-catalog/any-sequencer
- name: docker-file
value: tekton-catalog/any-sequencer/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-kubectl-wrapper
runAfter:
- build-kubectl-wrapper-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: kubeclient
- name: docker-root
value: tekton-catalog/kubectl-wrapper
- name: docker-file
value: tekton-catalog/kubectl-wrapper/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy
taskRef:
name: deploy
runAfter:
- build-images-api-server
- build-images-persistenceagent
- build-images-metadata-writer
- build-images-scheduledworkflow
- build-images-cacheserver
- build-images-frontend
params:
- name: apikey
value: $(params.apikey)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: public-cr
value: $(params.public-cr)
# - name: tekton-version
# value: $(params.tekton-version)
# - name: tekton-ns
# value: $(params.tekton-ns)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: setup-pipeline-loops-deploy
taskRef:
name: setup-pipeline-loops-deploy
runAfter:
- containerize-pipelineloop-controller
- containerize-pipelineloop-webhook
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: setup-kubectl-wrapper-deploy
taskRef:
name: setup-kubectl-wrapper-deploy
runAfter:
- containerize-kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-pipeline-loops-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- setup-pipeline-loops-deploy
- deploy
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export SKIP_PIPELINE_LOOP=$(params.skip-pipeline-loop);
source scripts/deploy/iks/tekton-catalog/deploy-pipeline-loops-e2e.sh;
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-any-sequencer-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- deploy
- containerize-any-sequencer
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export NEW_IMAGE_URL="${REGISTRY_URL}/${REGISTRY_NAMESPACE}/any-sequencer"
export NEW_IMAGE_TAG="${IMAGE_TAG}"
kubectl create clusterrolebinding pipeline-runner-extend --clusterrole=cluster-admin --serviceaccount=default:default || true
source scripts/deploy/iks/tekton-catalog/deploy-any-sequencer-e2e.sh
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-kubectl-wrapper-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- deploy
- setup-kubectl-wrapper-deploy
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export MANIFEST="resourceop_basic.yaml"
source scripts/deploy/iks/tekton-catalog/deploy-kubectl-wrapper-e2e.sh
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-flip-coin
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-flip-coin.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-cond-dep
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-condition-depend.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-cache
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-cache.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-many-edges
retries: 2
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-many-edges.sh"
- name: many-edge-duration
value: $(params.many-edge-duration)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-trusted-ai
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
when:
- input: $(params.extra-test-cases)
operator: in
values:
- '1'
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-trusted-ai.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: publish-images
taskRef:
name: publish-images
runAfter:
- e2e-test-flip-coin
- deploy-any-sequencer-e2e
- deploy-kubectl-wrapper-e2e
when:
- input: $(params.publish-to-public-cr)
operator: in
values:
- '1'
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: public-cr-username
value: $(params.public-cr-username)
- name: public-cr-password
value: $(params.public-cr-password)
- name: image-tag
value: $(params.image-tag)
- name: public-cr-namespace
value: $(params.public-cr-namespace)
- name: public-cr
value: $(params.public-cr)
- name: images
value: $(params.images)
- name: v2-images
value: $(params.v2-images)
- name: v2-image-tag
value: $(params.v2-image-tag)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
finally:
- name: cleanup-pipeline-loops
taskRef:
name: iks-test
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
kubectl delete -f tekton-catalog/pipeline-loops/config || true
kubectl delete -f tekton-catalog/pipeline-loops/examples/loop-example-basic.yaml || true
kubectl delete -f sdk/python/tests/compiler/testdata/any_sequencer.yaml || true
kubectl delete -f sdk/python/tests/compiler/testdata/resourceop_basic.yaml || true
kubectl delete job --all -n default || true
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: undeploy
taskRef:
name: undeploy
params:
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: apikey
value: $(params.apikey)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
workspaces:
- name: task-pvc
workspace: pipeline-pvc

774
.tekton/task.yaml Normal file
View File

@ -0,0 +1,774 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: test
spec:
params:
- name: repository
description: the git repo
- name: revision
description: the revision
default: master
- name: apikey
description: the ibmcloud api key
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: resource-group
description: resource group
default: default
- name: org
description: organization
- name: region
description: region
- name: space
description: space
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: run-go-unittests
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: GIT_URL
value: $(params.repository)
- name: REGION
value: $(params.region)
- name: ORG
value: $(params.org)
- name: SPACE
value: $(params.space)
- name: RESOURCE_GROUP
value: $(params.resource-group)
- name: GIT_BRANCH
value: $(params.revision)
- name: REGISTER_NAMESPACE
value: $(params.registry-namespace)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: ARCHIVE_DIR
value: $(params.archive-dir)
command: ["/bin/bash", "-c"]
args:
- set -ex;
rm -r /artifacts/*;
cd /artifacts && git clone -q -b $GIT_BRANCH $GIT_URL .;
GIT_COMMIT=$(git rev-parse HEAD);
source ./scripts/deploy/iks/run-test.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/build-image.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images-args
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
export BUILD_ARG_LIST="COMMIT_HASH=$(git rev-parse HEAD) DATE=\"$(date -u)\"";
source ./scripts/deploy/iks/build-image.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images-dnd
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
- name: build-args
description: docker build args
default: ""
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
export BUILD_ARG_LIST="$(params.build-args)";
source scripts/deploy/iks/build-image-dind.sh;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
sidecars:
- name: server
image: icr.io/continuous-delivery/pipeline/docker:20.10.22-dind
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: dind-certs
emptyDir: {}
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: deploy
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
# - name: tekton-version
# description: tekton version
# - name: tekton-ns
# description: tekton namespace
# default: tekton-pipeline
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr
description: public container registry
default: quay.io
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: deploy-kfp-tekton
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PUBLIC_CR
value: $(params.public-cr)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/deploy-kfp.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: e2e-test
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: archive-dir
description: archive directory
default: "."
- name: slack-webhook
description: webhook URL
default: ""
- name: slack-channel
description: slack channel
default: ""
- name: test-script
description: a shell script to run the test case
default: ""
- name: many-edge-duration
description: duration threshold for pipeline
default: "5"
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: run-test
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: SLACK_WEBHOOK
value: $(params.slack-webhook)
- name: SLACK_CHANNEL
value: $(params.slack-channel)
- name: PIPELINE_URL
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/pipeline-run-url']
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: TEST_SCRIPT
value: $(params.test-script)
- name: MANY_EDGE_DURATION
value: $(params.many-edge-duration)
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/e2e-test.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: publish-images
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: ibm container registry username
- name: docker-password
description: ibm container registry password
- name: public-cr-username
description: username for public container registry
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public conainer registry uri
default: docker.io
- name: images
description: image list to publish
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend"
- name: image-tag
description: image tag
default: "nightly"
- name: v2-images
description: v2 image list to publish
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook"
- name: v2-image-tag
description: v2 image tag
default: "nightly"
- name: dind-ns
description: dind namespace
default: docker-build
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: publish-images-to-cr
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: PUBLIC_CR_USERNAME
value: $(params.public-cr-username)
- name: PUBLIC_CR_TOKEN
value: $(params.public-cr-password)
- name: PUBLIC_CR_NAMESPACE
value: $(params.public-cr-namespace)
- name: PUBLIC_CR
value: $(params.public-cr)
- name: IMAGES
value: $(params.images)
- name: PUBLISH_TAG
value: $(params.image-tag)
- name: V2_IMAGES
value: $(params.v2-images)
- name: V2_PUBLISH_TAG
value: $(params.v2-image-tag)
- name: DIND_NS
value: $(params.dind-ns)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source scripts/deploy/iks/publish-image.sh;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
sidecars:
- image: icr.io/continuous-delivery/pipeline/docker:19.03.15-dind
name: server
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: dind-certs
emptyDir: {}
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: undeploy
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: archive-dir
description: archive directory
default: "."
- name: kubernetes-cluster
description: cluster name
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: undeploy-kfp-tekton
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/undeploy-kfp.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-binaries
spec:
params:
- name: build-dir
description: directory with makefile
default: tekton-catalog/feature
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-binaries
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: BUILD_DIR
value: $(params.build-dir)
command:
- /bin/bash
- '-c'
args:
- set -ex;
cd /artifacts && source build.properties;
source scripts/deploy/iks/tekton-catalog/build-binaries.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-pipeline-loops-deploy
spec:
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
command:
- /bin/bash
- '-c'
args:
- |
set -ex;
cd /artifacts && source build.properties;
export CONTROLLER_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/pipelineloop-controller;
export WEBHOOK_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/pipelineloop-webhook;
source scripts/deploy/iks/tekton-catalog/setup-pipeline-loops-deploy.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-kubectl-wrapper-deploy
spec:
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
command:
- /bin/bash
- '-c'
args:
- >
cd /artifacts && source build.properties;
export KUBECTL_WRAPPER_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/kubeclient;
source scripts/deploy/iks/tekton-catalog/setup-kubectl-wrapper-deploy.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: iks-test
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: resource-group
description: resource group
default: default
- name: org
description: organization
- name: region
description: region
- name: space
description: space
- name: archive-dir
description: archive directory
default: "."
- name: test-commands
description: testing commands
default: ""
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: iks-script
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: REGION
value: $(params.region)
- name: ORG
value: $(params.org)
- name: SPACE
value: $(params.space)
- name: RESOURCE_GROUP
value: $(params.resource-group)
- name: PIPELINE_URL
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/pipeline-run-url']
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- |
set -ex;
cd /artifacts && source build.properties
retry() {
local max=$1; shift
local interval=$1; shift
until "$@"; do
echo "trying.."
max=$((max-1))
if [[ "$max" -eq 0 ]]; then
return 1
fi
sleep "$interval"
done
}
# Set up kubernetes config
retry 3 3 ibmcloud login --apikey "${IBM_CLOUD_API_KEY}" --no-region
retry 3 3 ibmcloud target -r "$REGION" -g "$RESOURCE_GROUP"
retry 3 3 ibmcloud ks cluster config -c "$PIPELINE_KUBERNETES_CLUSTER_NAME"
$(params.test-commands)

View File

@ -0,0 +1,7 @@
# Tekton Catalog Pipeline Manifests
Directory containing the pipeline manifests to build, test, and publish the [tekton-catalog](../../tekton-catalog) images.
Scripts related to this pipeline run can be found under [scripts/deploy/iks/tekton-catalog](../../scripts/deploy/iks/tekton-catalog).
This pipeline also relies on some of the tasks defined in IBM's Open Toolchain [tekton-catalog](https://github.com/open-toolchain/tekton-catalog).

View File

@ -0,0 +1,110 @@
apiVersion: tekton.dev/v1beta1
kind: TriggerTemplate
metadata:
name: trigger-template
spec:
params:
# Passed in through secure-properties: apikey, dockerhub-token
- name: repository
description: git repository url
default: https://github.com/kubeflow/kfp-tekton
- name: branch
description: repository branch
default: master
- name: directory-name
description: Directory to clone repository into
default: kfp-tekton
- name: registry-region
description: The IBM Cloud region for image registry
- name: registry-namespace
description: Container registry namespace
- name: dev-region
default: ibm:yp:us-south
- name: dev-resource-group
default: default
- name: cluster-name
description: the name of the cluster to target
default: iks-cluster
- name: push-to-dockerhub
description: flag to signal if images should be pushed to dockerhub
default: '1'
- name: dockerhub-username
description: Dockerhub username
default: kfptektonbot
- name: docker-namespace
description: Image namespace
default: aipipeline
- name: image-tag
description: Tag for all images pushed to dockerhub/ibmcloud
default: nightly
- name: pipeline-debug
description: Flag used by tekton-catalog tasks
default: '0'
resourcetemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pipelinerun-$(uid)-pvc
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
- apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: pipelinerun-$(uid)
spec:
pipelineRef:
name: tekton-catalog-publish-pipeline
params:
- name: repository
value: $(params.repository)
- name: branch
value: $(params.branch)
- name: directory-name
value: $(params.directory-name)
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: dev-region
value: $(params.dev-region)
- name: dev-resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: push-to-dockerhub
value: $(params.push-to-dockerhub)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: pipeline-ws
persistentVolumeClaim:
claimName: pipelinerun-$(uid)-pvc
---
apiVersion: tekton.dev/v1beta1
kind: TriggerBinding
metadata:
name: trigger-binding-manual
spec:
params: null
---
apiVersion: tekton.dev/v1beta1
kind: EventListener
metadata:
name: manual-run
spec:
triggers:
- binding:
name: trigger-binding-manual
template:
name: trigger-template

View File

@ -0,0 +1,504 @@
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: tekton-catalog-publish-pipeline
timeout: 30m
spec:
params:
- name: repository
description: git repository url
default: https://github.com/kubeflow/kfp-tekton
- name: branch
description: repository branch
default: master
- name: directory-name
description: Directory to clone repository into
default: kfp-tekton
- name: registry-region
description: The IBM Cloud region for image registry
- name: registry-namespace
description: Container registry namespace
- name: dev-region
default: ibm:yp:us-south
- name: dev-resource-group
default: default
- name: cluster-name
description: the name of the cluster to target
default: iks-cluster
- name: push-to-dockerhub
description: flag to signal if images should be pushed to dockerhub
default: '0'
- name: dockerhub-username
description: Dockerhub username
default: kfptektonbot
- name: docker-namespace
description: Image namespace
default: aipipeline
- name: image-tag
description: Tag for all images pushed to dockerhub/ibmcloud
default: nightly
- name: pipeline-debug
description: Flag used by tekton-catalog tasks
default: '0'
# pipeline-loops specific parameters
- name: path-to-pipeline-loops-context
description: Path to pipeline-loops docker build directory.
default: tekton-catalog/pipeline-loops
- name: path-to-pipeline-loops-dockerfile
description: Path to directory with pipeline-loops dockerfile
default: tekton-catalog/pipeline-loops
# any-sequencer specific parameters
- name: path-to-any-sequencer-context
description: >-
Path to any-sequencer docker build directory.
default: tekton-catalog/any-sequencer
- name: path-to-any-sequencer-dockerfile
description: Path to directory with any-sequencer dockerfile
default: tekton-catalog/any-sequencer
# kubectl-wrapper specific parameters
- name: path-to-kubectl-wrapper-context
description: >-
Path to kubectl-wrapper docker build directory.
default: tekton-catalog/kubectl-wrapper
- name: path-to-kubectl-wrapper-dockerfile
description: Path to directory with kubectl-wrapper dockerfile
default: tekton-catalog/kubectl-wrapper
workspaces:
- name: pipeline-ws
tasks:
- name: git-clone
taskRef:
name: git-clone-repo
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: ibmcloud-apikey-secret-key
value: toolchain-apikey
- name: repository
value: $(params.repository)
- name: branch
value: $(params.branch)
- name: directory-name
value: $(params.directory-name)
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: output
workspace: pipeline-ws
# ==========================================================
# Pipeline Loops Tasks
# ==========================================================
- name: build-pipeline-loops-binaries
taskRef:
name: build-binaries
runAfter:
- git-clone
params:
- name: directory-name
value: $(params.directory-name)
- name: build-dir
value: tekton-catalog/pipeline-loops
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: containerize-pipelineloop-controller
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: "pipelineloop-controller"
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-pipeline-loops-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-pipeline-loops-dockerfile)
- name: build-args
value: "bin_name=pipelineloop-controller"
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: containerize-pipelineloop-webhook
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: "pipelineloop-webhook"
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-pipeline-loops-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-pipeline-loops-dockerfile)
- name: build-args
value: "bin_name=pipelineloop-webhook"
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: setup-pipeline-loops-deploy
taskRef:
name: setup-pipeline-loops-deploy
runAfter:
- containerize-pipelineloop-controller
- containerize-pipelineloop-webhook
params:
- name: controller-image-url
value: $(tasks.containerize-pipelineloop-controller.results.image-repository)
- name: webhook-image-url
value: $(tasks.containerize-pipelineloop-webhook.results.image-repository)
# Controller and webhook should have been built with same tag
- name: image-tag
value: $(tasks.containerize-pipelineloop-controller.results.image-tags)
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: deploy-pipeline-loops-e2e
taskRef:
name: iks-deploy-to-kubernetes
runAfter:
- setup-pipeline-loops-deploy
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: script
value: |
#!/bin/bash
set -euxo pipefail
cd $(params.directory-name)
source scripts/deploy/iks/tekton-catalog/deploy-pipeline-loops-e2e.sh
APP_URL="null"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: artifacts
workspace: pipeline-ws
- name: publish-pipeline-loops-images-to-dockerhub
taskRef:
name: publish-images-to-dockerhub
runAfter:
- deploy-pipeline-loops-e2e
when:
- input: $(params.push-to-dockerhub)
operator: in
values:
- '1'
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: dockerhub-token-secret-key
value: dockerhub-token
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: publish-script
value: scripts/deploy/iks/tekton-catalog/publish-pipeline-loops-images.sh
workspaces:
- name: task-pvc
workspace: pipeline-ws
# ==========================================================
# Any Sequencer Tasks
# ==========================================================
- name: build-any-sequencer-binaries
taskRef:
name: build-binaries
runAfter:
- git-clone
params:
- name: directory-name
value: $(params.directory-name)
- name: build-dir
value: tekton-catalog/any-sequencer
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: containerize-any-sequencer
runAfter:
- build-any-sequencer-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: any-sequencer
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-any-sequencer-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-any-sequencer-dockerfile)
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: deploy-any-sequencer-e2e
taskRef:
name: iks-deploy-to-kubernetes
runAfter:
- build-any-sequencer-binaries
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: script
value: |
cd $(params.directory-name)
export NEW_IMAGE_URL="$(tasks.containerize-any-sequencer.results.image-repository)"
export NEW_IMAGE_TAG=$(tasks.containerize-any-sequencer.results.image-tags)
source scripts/deploy/iks/tekton-catalog/deploy-any-sequencer-e2e.sh
APP_URL="null"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: artifacts
workspace: pipeline-ws
- name: publish-any-sequencer-image-to-dockerhub
taskRef:
name: publish-images-to-dockerhub
runAfter:
- deploy-any-sequencer-e2e
when:
- input: $(params.push-to-dockerhub)
operator: in
values:
- '1'
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: dockerhub-token-secret-key
value: dockerhub-token
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: publish-script
value: scripts/deploy/iks/tekton-catalog/publish-any-sequencer-image.sh
workspaces:
- name: task-pvc
workspace: pipeline-ws
# ==========================================================
# Kubectl Wrapper Tasks
# ==========================================================
- name: build-kubectl-wrapper-binaries
taskRef:
name: build-binaries
runAfter:
- git-clone
params:
- name: directory-name
value: $(params.directory-name)
- name: build-dir
value: tekton-catalog/kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: containerize-kubectl-wrapper
runAfter:
- build-kubectl-wrapper-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: "kubeclient"
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-kubectl-wrapper-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-kubectl-wrapper-dockerfile)
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: setup-kubectl-wrapper-deploy
taskRef:
name: setup-kubectl-wrapper-deploy
runAfter:
- containerize-kubectl-wrapper
params:
- name: kubectl-wrapper-image-url
value: $(tasks.containerize-kubectl-wrapper.results.image-repository)
- name: image-tag
value: $(tasks.containerize-kubectl-wrapper.results.image-tags)
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: deploy-kubectl-wrapper-e2e
taskRef:
name: iks-deploy-to-kubernetes
runAfter:
- setup-kubectl-wrapper-deploy
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: script
value: |
#!/bin/bash
set -euxo pipefail
cd $(params.directory-name)
source scripts/deploy/iks/tekton-catalog/deploy-kubectl-wrapper-e2e.sh
APP_URL="null"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: artifacts
workspace: pipeline-ws
- name: publish-kubectl-wrapper-image-to-dockerhub
taskRef:
name: publish-images-to-dockerhub
runAfter:
- deploy-kubectl-wrapper-e2e
when:
- input: $(params.push-to-dockerhub)
operator: in
values:
- '1'
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: dockerhub-token-secret-key
value: dockerhub-token
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: publish-script
value: scripts/deploy/iks/tekton-catalog/publish-kubectl-wrapper-image.sh
workspaces:
- name: task-pvc
workspace: pipeline-ws
# ==========================================================
# Final Task
# ==========================================================
finally:
- name: cleanup
taskRef:
Name: cleanup
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
workspaces:
- name: task-pvc
workspace: pipeline-ws

View File

@ -0,0 +1,357 @@
# ==========================================================
# Shared Tasks
# ==========================================================
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-binaries
spec:
params:
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: build-dir
description: directory with makefile
default: tekton-catalog/feature
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-binaries
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: BUILD_DIR
value: $(params.build-dir)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/build-binaries.sh;
---
# Stolen and modified from https://github.com/open-toolchain/tekton-catalog/blob/master/container-registry/task-execute-in-dind.yaml
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: publish-images-to-dockerhub
spec:
params:
- name: ibmcloud-api
description: the ibmcloud api
default: 'https://cloud.ibm.com'
- name: continuous-delivery-context-secret
description: >-
name of the configmap containing the continuous delivery pipeline
context secrets
default: secure-properties
- name: kubernetes-service-apikey-secret-key
description: >-
field in the secret that contains the api key used to login to
ibmcloud kubernetes service
default: apikey
- name: dockerhub-token-secret-key
description: >-
field in the secret that contains the api key used to login to
ibmcloud kubernetes service
default: dockerhub-token
- name: cluster-region
description: >
the ibmcloud region hosting the cluster
default: 'us-south'
- name: resource-group
description: target resource group (name or id) for the ibmcloud login operation
default: "default"
- name: cluster-name
description: >-
name of the cluster
default: 'iks-cluster'
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: dockerhub-username
description: Dockerhub username
default: ''
- name: dockerhub-config-dir
description: Directory to store docker's config.json in
default: /steps
- name: docker-registry-secret-name
description: Name of docker-registry secret
default: registry-dockerconfig-secret
- name: docker-registry
description: Image registry
default: docker.io
- name: docker-namespace
description: Image namespace
default: ''
- name: image-tag
description: Image tag
default: ''
- name: publish-script
description: Script to build/tag/publish/... image
default: run.sh
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: create-docker-registry-secret
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: API_KEY
valueFrom:
secretKeyRef:
name: $(params.continuous-delivery-context-secret)
key: $(params.kubernetes-service-apikey-secret-key)
- name: IBM_CLOUD_API
value: $(params.ibmcloud-api)
- name: IBM_CLOUD_REGION
value: $(params.cluster-region)
- name: IBMCLOUD_RESOURCE_GROUP
value: $(params.resource-group)
- name: CLUSTER_NAME
value: $(params.cluster-name)
- name: DOCKERHUB_USERNAME
value: $(params.dockerhub-username)
- name: DOCKERHUB_TOKEN
valueFrom:
secretKeyRef:
name: $(params.continuous-delivery-context-secret)
key: $(params.dockerhub-token-secret-key)
- name: DOCKER_CONFIG_DIR
value: $(params.dockerhub-config-dir)
- name: SECRET_NAME
value: $(params.docker-registry-secret-name)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/iks-authenticate.sh;
source scripts/deploy/iks/tekton-catalog/create-dockerconfig-secret.sh;
volumeMounts:
- mountPath: /steps
name: steps-volume
- name: run-docker-commands
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
# The location of the client configuration files.
- name: DOCKER_CONFIG
value: /steps
- name: DOCKER_REGISTRY
value: $(params.docker-registry)
- name: DOCKER_NAMESPACE
value: $(params.docker-namespace)
- name: IMAGE_TAG
value: $(params.image-tag)
command:
- "/bin/bash"
- "-c"
args:
- |
cd artifacts/$(params.directory-name);
source $(params.publish-script)
volumeMounts:
- mountPath: /steps
name: steps-volume
- mountPath: /certs/client
name: dind-certs
sidecars:
- image: docker:dind
name: server
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: steps-volume
emptyDir: {}
- name: dind-certs
emptyDir: {}
# ==========================================================
# Final Tasks
# ==========================================================
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: cleanup
spec:
params:
- name: ibmcloud-api
description: the ibmcloud api
default: 'https://cloud.ibm.com'
- name: continuous-delivery-context-secret
description: >-
name of the configmap containing the continuous delivery pipeline
context secrets
default: secure-properties
- name: kubernetes-service-apikey-secret-key
description: >-
field in the secret that contains the api key used to login to
ibmcloud kubernetes service
default: apikey
- name: cluster-region
description: >
the ibmcloud region hosting the cluster
default: 'us-south'
- name: resource-group
description: target resource group (name or id) for the ibmcloud login operation
default: "default"
- name: cluster-name
description: >-
name of the cluster
default: 'iks-cluster'
- name: directory-name
description: Repository directory
default: kfp-tekton
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: cleanup
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: API_KEY
valueFrom:
secretKeyRef:
name: $(params.continuous-delivery-context-secret)
key: $(params.kubernetes-service-apikey-secret-key)
- name: IBM_CLOUD_API
value: $(params.ibmcloud-api)
- name: IBM_CLOUD_REGION
value: $(params.cluster-region)
- name: IBMCLOUD_RESOURCE_GROUP
value: $(params.resource-group)
- name: CLUSTER_NAME
value: $(params.cluster-name)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/iks-authenticate.sh;
source scripts/deploy/iks/tekton-catalog/cleanup.sh;
# ==========================================================
# Pipeline Loops Tasks
# ==========================================================
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-pipeline-loops-deploy
spec:
params:
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: controller-image-url
description: Controller image url of form registry/repository/image
default: us.icr.io/kfp-tekton/pipelineloop-controller
- name: webhook-image-url
description: Webhook image url of form registry/repository/image
default: us.icr.io/kfp-tekton/pipelineloop-webhook
- name: image-tag
description: Image tag SHARED by controller and webhook
default: nightly
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: CONTROLLER_IMAGE_URL
value: $(params.controller-image-url)
- name: WEBHOOK_IMAGE_URL
value: $(params.webhook-image-url)
- name: IMAGE_TAG
value: $(params.image-tag)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/setup-pipeline-loops-deploy.sh;
# ==========================================================
# Any Sequencer Tasks
# ==========================================================
# ==========================================================
# Kubectl Wrapper Tasks
# ==========================================================
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-kubectl-wrapper-deploy
spec:
params:
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: kubectl-wrapper-image-url
description: Kubectl wrapper image url of form registry/repository/image
default: us.icr.io/kfp-tekton/kubeclient
- name: image-tag
description: Image tag of kubectl-wrapper image
default: nightly
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: KUBECTL_WRAPPER_IMAGE_URL
value: $(params.kubectl-wrapper-image-url)
- name: IMAGE_TAG
value: $(params.image-tag)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/setup-kubectl-wrapper-deploy.sh;
# ==========================================================
# Imported Tasks - https://github.com/open-toolchain/tekton-catalog
# ==========================================================
# git-clone
# icr-containerize
# iks-deploy-to-kubernetes

View File

@ -1,152 +0,0 @@
# Copyright 2020 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
matrix:
include:
- name: "Unit tests, Python 3.5"
language: python
python: "3.5"
env: TOXENV=py35
install: &0
- python3 -m pip install -e sdk/python
script: &1
- VENV=$VIRTUAL_ENV make unit_test
- name: "Unit tests, Python 3.6"
language: python
python: "3.6"
env: TOXENV=py36
install: *0
script: *1
- name: "Unit tests, Python 3.7"
language: python
python: "3.7"
env: TOXENV=py37
install: *0
script: *1
- name: "Progress report on compiling KFP DSL test scripts"
language: python
python: "3.7"
install: *0
script:
- VENV=$VIRTUAL_ENV make report
- name: "Lint Python code with flake8"
language: python
python: "3.7"
script:
- VENV=$VIRTUAL_ENV make lint
- name: "Verify source files contain the license header"
language: bash
script:
- make check_license
- name: "Verify Markdown files have current table of contents"
language: bash
script:
- make check_mdtoc
# =================================
# TODO: merge with config from KFP
# =================================
#matrix:
# include:
# - language: python
# python: "3.5"
# env: TOXENV=py35
# before_install:
# - export PYTHONPATH=$PYTHONPATH:/home/travis/.local/lib/python3.5/site-packages/
# install: &0
# - python3 -m pip install --upgrade pip
# - python3 -m pip install -r $TRAVIS_BUILD_DIR/sdk/python/requirements.txt
# # Additional dependencies
# - pip3 install coverage==4.5.4 coveralls==1.9.2 six>=1.13.0
# # Sample test infra dependencies
# - pip3 install minio
# - pip3 install junit_xml
# # Visualization test dependencies
# - cd $TRAVIS_BUILD_DIR/backend/src/apiserver/visualization
# - pip3 install -r requirements-test.txt
# # Using Argo to lint all compiled workflows
# - export LOCAL_BIN="${HOME}/.local/bin"
# - mkdir -p "$LOCAL_BIN"
# - export PATH="${PATH}:$LOCAL_BIN" # Unnecessary - Travis already has it in PATH
# - wget --quiet -O "${LOCAL_BIN}/argo" https://github.com/argoproj/argo/releases/download/v2.4.3/argo-linux-amd64 && chmod +x "${LOCAL_BIN}/argo"
# script: &1 # DSL tests
# - cd $TRAVIS_BUILD_DIR/sdk/python
# - python3 -m pip install -e .
# - cd $TRAVIS_BUILD_DIR # Changing the current directory to the repo root for correct coverall paths
# - coverage run --source=kfp --append -m unittest discover --verbose --start-dir sdk/python/tests --top-level-directory=sdk/python
# #- coveralls
#
# # Test against TFX
# # Compile and setup protobuf
# - PROTOC_ZIP=protoc-3.7.1-linux-x86_64.zip
# - curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/$PROTOC_ZIP
# - sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
# - sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
# - rm -f $PROTOC_ZIP
# # Install TFX from head
# - cd $TRAVIS_BUILD_DIR
# - git clone https://github.com/tensorflow/tfx.git
# - cd $TRAVIS_BUILD_DIR/tfx
# - pip3 install --upgrade pip
# - pip3 install --upgrade 'numpy>=1.16,<1.17'
# # Specify transitive dependency to get around: https://github.com/pypa/pip/issues/8583
# - pip3 install --upgrade 'google-auth>=1.18.0'
# - set -x
# - set -e
# - python3 setup.py bdist_wheel
# - WHEEL_PATH=$(find dist -name "tfx-*.whl")
# - python3 -m pip install "${WHEEL_PATH}" --upgrade
# - set +e
# - set +x
# # Three KFP-related unittests
# - cd $TRAVIS_BUILD_DIR/tfx/tfx/orchestration/kubeflow
# - python3 kubeflow_dag_runner_test.py
# - cd $TRAVIS_BUILD_DIR/tfx/tfx/examples/chicago_taxi_pipeline
# - python3 taxi_pipeline_kubeflow_gcp_test.py
# - python3 taxi_pipeline_kubeflow_local_test.py
#
# # Visualization test
# - cd $TRAVIS_BUILD_DIR/backend/src/apiserver/visualization
# - python3 test_exporter.py
# - python3 test_server.py
#
# # Test loading all component.yaml definitions
# - $TRAVIS_BUILD_DIR/components/test_load_all_components.sh
#
# # Component SDK tests
# - cd $TRAVIS_BUILD_DIR/components/gcp/container/component_sdk/python
# - ./run_test.sh
#
# # Sample test unittests.
# - cd $TRAVIS_BUILD_DIR/test/sample-test/unittests
# - python3 -m unittest utils_tests.py
# - language: python
# python: "3.6"
# env: TOXENV=py36
# before_install:
# - export PYTHONPATH=$PYTHONPATH:/home/travis/.local/lib/python3.6/site-packages/
# install: *0
# script: *1
# - language: python
# python: "3.7"
# env: TOXENV=py37
# before_install:
# - export PYTHONPATH=$PYTHONPATH:/home/travis/.local/lib/python3.7/site-packages/
# install: *0
# script: *1
# - name: "Lint Python code with flake8"
# language: python
# python: "3.7"
# install: pip install flake8
# script: flake8 . --count --exclude=backend/src/apiserver/visualization/types/*.py --select=E9,F63,F7,F82 --show-source --statistics || true

View File

@ -1,8 +0,0 @@
load("@bazel_gazelle//:def.bzl", "gazelle")
# gazelle:prefix github.com/kubeflow/pipelines
# gazelle:resolve proto protoc-gen-swagger/options/annotations.proto @com_github_grpc_ecosystem_grpc_gateway//protoc-gen-swagger/options:options_proto
# gazelle:resolve proto go protoc-gen-swagger/options/annotations.proto @com_github_grpc_ecosystem_grpc_gateway//protoc-gen-swagger/options:go_default_library
# gazelle:resolve go github.com/kubeflow/pipelines/backend/api/go_client //backend/api:go_default_library
# gazelle:exclude vendor/
gazelle(name = "gazelle")

View File

@ -8,9 +8,11 @@ just a few small guidelines you need to follow.
<!-- START of ToC generated by running ./tools/mdtoc.sh CONTRIBUTING.md -->
- [Contributor License Agreement](#contributor-license-agreement)
- [Development Guidelines](#development-guidelines)
- [Project Structure](#project-structure)
- [Legal](#legal)
- [Coding Style](#coding-style)
- [Unit Testing Best Practices](#unit-testing-best-practices)
- [Golang](#golang)
- [Code Reviews](#code-reviews)
- [Pull Requests](#pull-requests)
- [Pull Request Title Convention](#pull-request-title-convention)
@ -19,25 +21,25 @@ just a few small guidelines you need to follow.
- [PR Scope](#pr-scope)
- [Get Involved](#get-involved)
<!-- END of ToC generated by running ./tools/mdtoc.sh sdk/README.md -->
## Project Structure
## Contributor License Agreement
Kubeflow Pipelines consists of multiple components. Before you begin, learn how
to [build the Kubeflow Pipelines component container images](./guides/developer_guide.md##development-building-from-source-code).
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution,
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
To get started, see the development guides:
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
* [Frontend development guide](./frontend/README.md)
* [Backend development guide](./backend/README.md)
* [SDK development guide](./sdk/python/README.md)
## Development Guidelines
## Legal
Please take a look at the [KFP-Tekton Developer Guide](sdk/python/README.md) for
details about how to make code contributions to the KFP-Tekton project.
Kubeflow uses Developer Certificate of Origin ([DCO](https://github.com/apps/dco/)).
Please see https://github.com/kubeflow/community/tree/master/dco-signoff-hook#signing-off-commits to learn how to sign off your commits.
## Coding Style
@ -50,6 +52,22 @@ The frontend part of the project uses [prettier](https://prettier.io/) for
formatting, read [frontend/README.md#code-style](frontend/README.md#code-style)
for more details.
## Unit Testing Best Practices
* Testing via Public APIs
### Golang
* Put your tests in a different package: Moving your test code out of the package
allows you to write tests as though you were a real user of the package. You
cannot fiddle around with the internals, instead you focus on the exposed
interface and are always thinking about any noise that you might be adding to
your API. Usually the test code will be put under the same folder but with a
package suffix of `_test`. https://golang.org/src/go/ast/example_test.go (example)
* Internal tests go in a different file: If you do need to unit test some internals,
create another file with `_internal_test.go` as the suffix.
* Write table-driven tests: https://github.com/golang/go/wiki/TableDrivenTests (example)
## Code Reviews
All submissions, including submissions by project members, require review. We
@ -96,7 +114,7 @@ PR titles should use the following structure.
Replace the following:
* **`<type>`**: The PR type describes the reason for the change, such as `fix` to indicate that the PR fixes a bug. More information about PR types is available in the next section.
* **`<type>`**: The PR type describes the reason for the change, such as `fix` to indicate that the PR fixes a bug. More information about PR types is available in the next section.
* **`[optional scope]`**: (Optional.) The PR scope describes the part of Kubeflow Pipelines that this PR changes, such as `frontend` to indicate that the change affects the user interface. Choose a scope according to [PR Scope section](#pr-scope).
* **`<description>`**: A user friendly description of this change.
* **`[ Fixes #<issues-number>]`**: (Optional.) Specifies the issues fixed by this PR.

170
Makefile
View File

@ -1,4 +1,4 @@
# Copyright 2020 kubeflow.org
# Copyright 2020-2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -16,13 +16,16 @@
# - The help target was derived from https://stackoverflow.com/a/35730328/5601796
VENV ?= .venv
KFP_TEKTON_RELEASE ?= v1.9.2
export VIRTUAL_ENV := $(abspath ${VENV})
export PATH := ${VIRTUAL_ENV}/bin:${PATH}
DOCKER_REGISTRY ?= aipipeline
GITHUB_ACTION ?= false
.PHONY: help
help: ## Display the Make targets
@grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}'
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-25s\033[0m %s\n", $$1, $$2}'
.PHONY: venv
venv: $(VENV)/bin/activate ## Create and activate virtual environment
@ -33,12 +36,38 @@ $(VENV)/bin/activate: sdk/python/setup.py
@echo "VENV=$(VENV)"
@test -d $(VENV) || python3 -m venv $(VENV)
@$(VENV)/bin/pip show kfp-tekton >/dev/null 2>&1 || $(VENV)/bin/pip install -e sdk/python
@touch $(VENV)/bin/activate
@if [ "$(GITHUB_ACTION)" = "false" ]; then touch $(VENV)/bin/activate; fi
.PHONY: install
install: venv ## Install the kfp_tekton compiler in a virtual environment
@echo "Run 'source $(VENV)/bin/activate' to activate the virtual environment."
.PHONY: validate-generated-test-yamls
validate-generated-test-yamls:
@echo "=================================================================="
@echo "Reporting files with same non-inlined and inlined generated yamls in testdata:"
@find sdk/python/tests/compiler/testdata \
\( -type f -name "*yaml" -name "*noninlined.yaml" \) | sort -z >/tmp/validate-generated-test-yamls_total
@find sdk/python/tests/compiler/testdata \
\( -type f -name "*yaml" -name "*noninlined.yaml" \) | \
sed -n -e 's/\(.*\)_noninlined.yaml/\1/p' | \
xargs -n1 -I '{}' diff -q '{}.yaml' '{}_noninlined.yaml' | cut -f4 -d' ' | \
sort -z >/tmp/validate-generated-test-yamls_valid
@echo "=================================================================="
@echo "Noninlined and inlined testdata yamls, having same content."
@diff -a /tmp/validate-generated-test-yamls_total /tmp/validate-generated-test-yamls_valid
@echo "$@: OK"
.PHONY: validate-testdata
validate-testdata:
@cd tekton-catalog/pipeline-loops/ && make validate-testdata-python-sdk
@echo "$@: OK"
.PHONY: validate-pipelineloop-examples
validate-pipelineloop-examples:
@cd tekton-catalog/pipeline-loops/ && make validate-examples
@echo "$@: OK"
.PHONY: unit_test
unit_test: venv ## Run compiler unit tests
@echo "=================================================================="
@ -49,6 +78,9 @@ unit_test: venv ## Run compiler unit tests
@sdk/python/tests/run_tests.sh
@echo "$@: OK"
.PHONY: ci_unit_test
ci_unit_test: unit_test
.PHONY: e2e_test
e2e_test: venv ## Run compiler end-to-end tests (requires kubectl and tkn CLI)
@echo "=================================================================="
@ -76,7 +108,7 @@ lint: venv ## Check Python code style compliance
@which flake8 > /dev/null || pip install flake8
@flake8 sdk/python --show-source --statistics \
--select=E9,E2,E3,E5,F63,F7,F82,F4,F841,W291,W292 \
--per-file-ignores sdk/python/tests/compiler/testdata/*:F841 \
--per-file-ignores sdk/python/tests/compiler/testdata/*:F841,F821 \
--max-line-length=140
@echo "$@: OK"
@ -90,7 +122,7 @@ check_license: ## Check for license header in source files
.PHONY: check_mdtoc
check_mdtoc: ## Check Markdown files for valid the Table of Contents
@find samples sdk *.md -type f -name '*.md' -exec \
@find guides samples sdk *.md -type f -name '*.md' -exec \
grep -l -i 'Table of Contents' {} \; | sort | \
while read -r md_file; do \
grep -oE '^ *[-+*] \[[^]]+\]\(#[^)]+\)' "$${md_file}" | sed -e 's/[-+*] /- /g' > md_file_toc; \
@ -100,8 +132,14 @@ check_mdtoc: ## Check Markdown files for valid the Table of Contents
done | grep . && echo "Run './tools/mdtoc.sh <md-file>' to update the 'Table of Contents' in the Markdown files reported above." && exit 1 || \
echo "$@: OK"
.PHONY: check_doc_links
check_doc_links: ## Check Markdown files for valid links
@pip3 show requests > /dev/null || pip install requests
@python3 tools/python/verify_doc_links.py
@echo "$@: OK"
.PHONY: verify
verify: check_license check_mdtoc lint unit_test report ## Run all verification targets: check_license, check_mdtoc, lint, unit_test, report
verify: check_license check_mdtoc check_doc_links lint unit_test report ## Run all verification targets: check_license, check_mdtoc, lint, unit_test, report
@echo "$@: OK"
.PHONY: distribution
@ -123,3 +161,123 @@ build: ## Create GO vendor directories with all dependencies
licext --mode merge --source vendor/ --target third_party/license.txt --overwrite
# Delete vendor directory
rm -rf vendor
.PHONY: build-release-template
build-release-template: ## Build KFP Tekton release deployment templates
@mkdir -p install/$(KFP_TEKTON_RELEASE)
@kustomize build manifests/kustomize/env/kfp-template -o install/$(KFP_TEKTON_RELEASE)/kfp-tekton.yaml
.PHONY: build-backend
build-backend: build-apiserver build-agent build-workflow build-cacheserver ## Verify apiserver, agent, and workflow build
@echo "$@: OK"
.PHONY: build-apiserver
build-apiserver: ## Build apiserver
go build -o apiserver ./backend/src/apiserver
.PHONY: build-agent
build-agent: ## Build agent
go build -o agent ./backend/src/agent/persistence
.PHONY: build-workflow
build-workflow: ## Build workflow
go build -o workflow ./backend/src/crd/controller/scheduledworkflow/*.go
.PHONY: build-cacheserver
build-cacheserver: ## Build cache
go build -o cache ./backend/src/cache/*.go
.PHONY: build-backend-images
build-backend-images: \
build-api-server-image \
build-persistenceagent-image \
build-metadata-writer-image \
build-scheduledworkflow-image \
build-cacheserver-image \
## Build backend docker images
@echo "$@: OK"
.PHONY: build-v2-custom-controller-images
build-v2-custom-controller-images: \
build-tekton-exithandler-controller-image \
build-tekton-exithandler-webhook-image \
build-tekton-kfptask-controller-image \
build-tekton-kfptask-webhook-image \
## Build V2 Tekton custom task controller images
@echo "$@: OK"
.PHONY: build-api-server-image
build-api-server-image: ## Build api-server docker image
docker build -t ${DOCKER_REGISTRY}/api-server -f backend/Dockerfile .
.PHONY: build-persistenceagent-image
build-persistenceagent-image: ## Build persistenceagent docker image
docker build -t ${DOCKER_REGISTRY}/persistenceagent -f backend/Dockerfile.persistenceagent .
.PHONY: build-metadata-writer-image
build-metadata-writer-image: ## Build metadata-writer docker image
docker build -t ${DOCKER_REGISTRY}/metadata-writer -f backend/metadata_writer/Dockerfile .
.PHONY: build-scheduledworkflow-image
build-scheduledworkflow-image: ## Build scheduledworkflow docker image
docker build -t ${DOCKER_REGISTRY}/scheduledworkflow -f backend/Dockerfile.scheduledworkflow .
.PHONY: build-cacheserver-image
build-cacheserver-image: ## Build cacheserver docker image
docker build -t ${DOCKER_REGISTRY}/cache-server -f backend/Dockerfile.cacheserver .
.PHONY: build-tekton-exithandler-controller-image
build-tekton-exithandler-controller-image: ## Build tekton-exithandler-controller docker image
@cd tekton-catalog/tekton-exithandler/ && docker build -t ${DOCKER_REGISTRY}/tekton-exithandler-controller -f Dockerfile.tekton-exithandler.controller .
.PHONY: build-tekton-exithandler-webhook-image
build-tekton-exithandler-webhook-image: ## Build tekton-exithandler-webhook docker image
@cd tekton-catalog/tekton-exithandler/ && docker build -t ${DOCKER_REGISTRY}/tekton-exithandler-webhook -f Dockerfile.tekton-exithandler.webhook .
.PHONY: build-tekton-kfptask-controller-image
build-tekton-kfptask-controller-image: ## Build tekton-kfptask-controller docker image
@cd tekton-catalog/tekton-kfptask/ && docker build -t ${DOCKER_REGISTRY}/tekton-kfptask-controller -f Dockerfile.tekton-kfptask.controller .
.PHONY: build-tekton-kfptask-webhook-image
build-tekton-kfptask-webhook-image: ## Build tekton-kfptask-webhook docker image
@cd tekton-catalog/tekton-kfptask/ && docker build -t ${DOCKER_REGISTRY}/tekton-kfptask-webhook -f Dockerfile.tekton-kfptask.webhook .
.PHONY: build-tekton-driver-image
build-tekton-driver-image: ## Build tekton-driver docker image
@cd tekton-catalog/tekton-driver/ && docker build -t ${DOCKER_REGISTRY}/tekton-driver -f Dockerfile.tektondriver .
.PHONY: run-go-unittests
run-go-unittests: \
run-apiserver-unittests \
run-common-unittests \
run-crd-unittests \
run-persistenceagent-unittests \
run-cacheserver-unittests \
run-tekton-exithandler-unittests \
run-tekton-kfptask-unittests \
## Verify go backend unit tests
@echo "$@: OK"
run-apiserver-unittests: # apiserver golang unit tests
go test -v -cover ./backend/src/apiserver/...
run-common-unittests: # common golang unit tests
go test -v -cover ./backend/src/common/...
run-crd-unittests: # crd golang unit tests
go test -v -cover ./backend/src/crd/...
run-persistenceagent-unittests: # persistence agent golang unit tests
go test -v -cover ./backend/src/agent/...
run-cacheserver-unittests: # cache golang unit tests
go test -v -cover ./backend/src/cache/...
run-tekton-exithandler-unittests: # tekton-exithandler golang unit tests
@cd tekton-catalog/tekton-exithandler/ && go test -v -cover ./...
run-tekton-kfptask-unittests: # tekton-kfptask golang unit tests
@cd tekton-catalog/tekton-kfptask/ && go test -v -cover ./...
run-tekton-driver-unittests: # tekton-driver golang unit tests
@cd tekton-catalog/tekton-driver/ && go test -v -cover ./...

11
OWNERS
View File

@ -3,10 +3,15 @@ approvers:
- ckadner
- Tomcli
- fenglixa
- pugangxa
- scrapcodes
- yhwang
- rafalbigaj
reviewers:
- animeshsingh
- ckadner
- Tomcli
- fenglixa
- drewbutlerbb4
- pugangxa
- jinchihe
- scrapcodes
- yhwang

View File

@ -1,29 +1,39 @@
# Kubeflow Pipelines with Tekton
# Kubeflow Pipelines on Tekton
Project to bring Kubeflow Pipelines and Tekton together. The project is split in phases and driven according to this [design doc](http://bit.ly/kfp-tekton). The current code allows you run Kubeflow Pipelines with Tekton backend end to end.
Project bringing Kubeflow Pipelines and Tekton together. The project is driven
according to this [design doc](http://bit.ly/kfp-tekton). The current code allows you run Kubeflow Pipelines with Tekton backend end to end.
* Create your Pipeline using Kubeflow Pipelines DSL, and compile it to Tekton YAML.
* Upload the compiled Tekton YAML to KFP engine (API and UI), and run end to end with logging and artifacts tracking enabled.
* Create your Pipeline using Kubeflow Pipelines DSL, and compile it to Tekton
YAML.
* Upload the compiled Tekton YAML to KFP engine (API and UI), and run end to end
with logging and artifacts tracking enabled.
* In KFP-Tekton V2, the SDK compiler will generate the same intermediate representation as in the main Kubeflow pipelines SDK. All the Tekton related implementations are all embedded into the V2 backend API service.
For more details about the project, including demos, please look at these [slides](https://www.slideshare.net/AnimeshSingh/kubeflow-pipelines-with-tekton-236769976) and the [deep dive presentation](https://www.youtube.com/watch?v=AYIeNtXLT_k).
For more details about the project please follow this detailed [blog post](https://developer.ibm.com/blogs/awb-tekton-optimizations-for-kubeflow-pipelines-2-0) . For the latest KFP-Tekton V2 implementation and [supported offerings](https://developer.ibm.com/articles/advance-machine-learning-workflows-with-ibm-watson-pipelines/), please follow our latest [Kubecon Talk](https://www.youtube.com/watch?v=ecx-yp4g7YU) and [slides](https://docs.google.com/presentation/d/1Su42ApXzZvVwhNSYRAk3bd0heHOtrdEX/edit?usp=sharing&ouid=103716780892927252554&rtpof=true&sd=true). For information on the KFP-Tekton V1 implementation, look at these [slides](https://www.slideshare.net/AnimeshSingh/kubeflow-pipelines-with-tekton-236769976) as well as this [deep dive presentation](https://www.youtube.com/watch?v=AYIeNtXLT_k) for demos.
## Tekton
## Architecture
The Tekton Pipelines project provides Kubernetes-style resources for declaring CI/CD-style pipelines. Tekton introduces
several new CRDs including Task, Pipeline, TaskRun, and PipelineRun. A PipelineRun represents a single running instance
of a Pipeline and is responsible for creating a Pod for each of its Tasks and as many containers within each Pod as it
has Steps. Please look for more details in [Tekton repo](https://github.com/tektoncd/pipeline).
We are currently using [Kubeflow Pipelines 1.8.4](https://github.com/kubeflow/pipelines/releases/tag/1.8.4) and
[Tekton >= 0.53.2](https://github.com/tektoncd/pipeline/releases/tag/v0.53.2)
in the master branch for this project.
## Kubeflow Pipeline with Tekton Backend
We are currently using [Kubeflow Pipelines 1.0.0](https://github.com/kubeflow/pipelines/releases/tag/1.0.0) and
[Tekton >= 0.14.0](https://github.com/tektoncd/pipeline/releases/tag/v0.14.0) for this project.
For [Kubeflow Pipelines 2.0.5](https://github.com/kubeflow/pipelines/releases/tag/2.0.5) and
[Tekton >= 0.53.2](https://github.com/tektoncd/pipeline/releases/tag/v0.53.2)
integration, please check out the [kfp-tekton v2-integration](https://github.com/kubeflow/kfp-tekton/tree/v2-integration) branch and [KFP-Tekton V2 deployment](/guides/kfp_tekton_install.md#standalone-kubeflow-pipelines-v2-with-tekton-backend-deployment) instead.
![kfp-tekton](images/kfp-tekton.png)
### Get Started using Kubeflow Pipelines with Tekton
[Install Kubeflow Pipelines with Tekton backend](tekton_kfp_guide.md)
Kubeflow Pipelines is a platform for building and deploying portable, scalable machine learning (ML) workflows. More architectural details about the Kubeflow Pipelines can be found on the [Kubeflow website](https://www.kubeflow.org/docs/components/pipelines/overview/).
The Tekton Pipelines project provides Kubernetes-style resources for declaring
CI/CD-style pipelines. Tekton introduces several [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)(CRDs) including Task, Pipeline, TaskRun, and PipelineRun. A PipelineRun represents a single running instance of a Pipeline and is responsible for creating a Pod for each of its Tasks and as many containers within each Pod as it has Steps. Please look for more details in the [Tekton repo](https://github.com/tektoncd/pipeline).
### Get Started using Kubeflow Pipelines on Tekton
[Install Kubeflow Pipelines with Tekton backend](/guides/kfp_tekton_install.md)
[KFP Tekton Pipelines User Guide](/guides/kfp-user-guide/README.md)
[Use KFP Tekton SDK](/sdk/README.md)
@ -31,9 +41,13 @@ We are currently using [Kubeflow Pipelines 1.0.0](https://github.com/kubeflow/pi
[Available KFP DSL Features](/sdk/FEATURES.md)
[Tekton Specific Features](/guides/advanced_user_guide.md)
### Development Guides
[Developer Guide](/sdk/python/README.md)
[Backend Developer Guide](/guides/developer_guide.md)
[SDK Developer Guide](/sdk/python/README.md)
[Compilation Tests Status Report](/sdk/python/tests/README.md)
@ -47,10 +61,6 @@ We are currently using [Kubeflow Pipelines 1.0.0](https://github.com/kubeflow/pi
[Kubeflow Slack](https://join.slack.com/t/kubeflow/shared_invite/zt-cpr020z4-PfcAue_2nw67~iIDy7maAQ)
[CD Foundation MLOps Sig](https://cd.foundation/blog/2020/02/11/announcing-the-cd-foundation-mlops-sig/).
[Instructions to join](https://github.com/cdfoundation/sig-mlops)
### References
[Kubeflow and TFX Pipelines](/samples/kfp-tfx)

17
SECURITY.md Normal file
View File

@ -0,0 +1,17 @@
# Security Policy
## Supported Versions
Below are the list of supported KFP-Tekton for security and bug fixes.
| Version | Supported |
| ------- | ------------------ |
| 2.0.x | :white_check_mark: |
| 1.9.x | :white_check_mark: |
| 1.8.x | :white_check_mark: |
| 1.7.x | :white_check_mark: |
| < 1.7 | :x: |
## Reporting a Vulnerability
Please summit [an issue](https://github.com/kubeflow/kfp-tekton/issues) in the KFP-Tekton repo for any vulnerability you found in this project. If the vulnerability should not be exposed in open source, then please summit the vulnerability to the Kubeflow slack channel `#external-kfp-tekton` or direct message to `Tommy Li`.

View File

@ -1 +1 @@
0.3.0
1.9.2

1081
WORKSPACE

File diff suppressed because it is too large Load Diff

View File

@ -1,46 +1,40 @@
### Updated base image to golang in order to build with go modules
### Bazel build cannot work with the Tekton library because the current
### KFP Bazel does not support go.mod "replace" on key dependencies.
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG BAZEL_IMAGE=golang:1.13.0
FROM $BAZEL_IMAGE as builder
RUN apt-get update && \
apt-get install -y cmake clang musl-dev openssl
# 1. Build api server application
FROM golang:1.19.3-buster as builder
RUN apt-get update && apt-get install -y cmake clang musl-dev openssl
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
RUN GO111MODULE=on go build -o /bin/apiserver backend/src/apiserver/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/apiserver
RUN go-licenses csv ./backend/src/apiserver > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/apiserver.csv && \
go-licenses save ./backend/src/apiserver --save_path /tmp/NOTICES
ARG google_application_credentials
ARG use_remote_build=false
# RUN bazel build -c opt --action_env=PATH --define=grpc_no_ares=true backend/src/apiserver:apiserver
# RUN if [ "$use_remote_build" = "true" ]; then \
# echo "Using remote build execution ..." && \
# printf "%s" "$google_application_credentials" > /credentials.json && \
# bazel --bazelrc=tools/bazel_builder/bazelrc \
# build -c opt backend/src/apiserver:apiserver --config=remote \
# --google_credentials=/credentials.json; \
# else \
# echo "Using local build execution..." && \
# bazel --bazelrc=tools/bazel_builder/bazelrc \
# build -c opt backend/src/apiserver:apiserver; \
# fi
RUN mkdir -p /go/src/github.com/kubeflow/pipelines/bazel-bin/backend/src/apiserver/
RUN go build -o /go/src/github.com/kubeflow/pipelines/bazel-bin/backend/src/apiserver/apiserver ./backend/src/apiserver
# Compile
FROM python:3.5 as compiler
RUN apt-get update -y && \
apt-get install --no-install-recommends -y -q default-jdk python3-setuptools python3-dev jq
# 2. Compile preloaded pipeline samples
FROM python:3.8 as compiler
RUN apt-get update -y && apt-get install --no-install-recommends -y -q default-jdk python3-setuptools python3-dev jq
RUN wget https://bootstrap.pypa.io/get-pip.py && python3 get-pip.py
COPY backend/requirements.txt .
RUN python3 -m pip install -r requirements.txt
# Downloading Argo CLI so that the samples are validated
#ADD https://github.com/argoproj/argo/releases/download/v2.7.5/argo-linux-amd64 /usr/local/bin/argo
ADD https://github.com/argoproj/argo/releases/download/v2.4.3/argo-linux-amd64 /usr/local/bin/argo
RUN chmod +x /usr/local/bin/argo
COPY sdk/python/requirements.txt .
RUN python3 -m pip install -r requirements.txt --no-cache-dir
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY sdk sdk
@ -56,12 +50,16 @@ COPY backend/src/apiserver/config/sample_config.json /samples/
RUN set -e; \
< /samples/sample_config.json jq .[].file --raw-output | while read pipeline_yaml; do \
pipeline_py="${pipeline_yaml%.yaml}.py"; \
pipeline_py="${pipeline_yaml%.yaml}.py"; \
mode=`< /samples/sample_config.json jq ".[] | select(.file == \"${pipeline_yaml}\") | (if .mode == null then \"V1\" else .mode end)" --raw-output`; \
mv "$pipeline_py" "${pipeline_py}.tmp"; \
echo 'import kfp; kfp.components.default_base_image_or_builder="gcr.io/google-appengine/python:2020-03-31-141326"' | cat - "${pipeline_py}.tmp" > "$pipeline_py"; \
python3 "$pipeline_py"; \
dsl-compile-tekton --py "$pipeline_py" --output "$pipeline_yaml" || python3 "$pipeline_py"; \
done
FROM golang:1.13.0
# 3. Start api web server
FROM debian:stable
ARG COMMIT_SHA=unknown
ENV COMMIT_SHA=${COMMIT_SHA}
@ -70,18 +68,17 @@ ENV TAG_NAME=${TAG_NAME}
WORKDIR /bin
COPY third_party/license.txt /bin/license.txt
COPY --from=builder /go/src/github.com/kubeflow/pipelines/bazel-bin/backend/src/apiserver/ /usr/local/apiserver
RUN cp /usr/local/apiserver/apiserver /bin/apiserver && \
rm -rf /usr/local/apiserver
COPY backend/src/apiserver/config/ /config
COPY --from=builder /bin/apiserver /bin/apiserver
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
COPY --from=compiler /samples/ /samples/
RUN chmod +x /bin/apiserver
# Adding CA certificate so API server can download pipeline through URL
RUN apt-get update && apt-get install -y ca-certificates \
# wget is used for liveness/readiness probe command
wget
# Adding CA certificate so API server can download pipeline through URL and wget is used for liveness/readiness probe command
RUN apt-get update && apt-get install -y ca-certificates wget
# Pin sample doc links to the commit that built the backend image
# Commented out due to no commit sha for non-release build
@ -92,4 +89,4 @@ RUN apt-get update && apt-get install -y ca-certificates \
EXPOSE 8888
# Start the apiserver
CMD apiserver --config=/config --sampleconfig=/config/sample_config.json -logtostderr=true
CMD /bin/apiserver --config=/config --sampleconfig=/config/sample_config.json -logtostderr=true

View File

@ -1,30 +0,0 @@
FROM ubuntu:18.04 as builder
RUN apt-get update \
&& apt-get install -y --no-install-recommends software-properties-common \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
RUN apt-get update \
&& apt-get install -y curl \
openjdk-11-jdk \
openjdk-11-jre-headless \
pkg-config \
zip \
g++ \
git \
zlib1g-dev \
unzip \
python \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
ENV BAZEL_VERSION=0.24.0
RUN mkdir -p /bazel \
&& cd /bazel \
&& curl -fSsL -O https://github.com/bazelbuild/bazel/releases/download/$BAZEL_VERSION/bazel-$BAZEL_VERSION-dist.zip \
&& unzip bazel-$BAZEL_VERSION-dist.zip \
&& ./compile.sh \
&& cp output/bazel /usr/local/bin \
&& rm -rf /bazel

View File

@ -1,5 +1,19 @@
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Dockerfile for building the source code of cache_server
FROM golang:1.11-alpine3.7 as builder
FROM golang:1.19.3-alpine3.15 as builder
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
@ -8,13 +22,21 @@ WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
RUN GO111MODULE=on go build -o /bin/cache_server backend/src/cache/*.go
RUN git clone https://github.com/hashicorp/golang-lru.git /kfp/cache/golang-lru/
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/cache
RUN go-licenses csv ./backend/src/cache > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/cache_server.csv && \
go-licenses save ./backend/src/cache --save_path /tmp/NOTICES
FROM alpine:3.8
WORKDIR /bin
COPY --from=builder /bin/cache_server /bin/cache_server
COPY --from=builder /go/src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
COPY --from=builder /kfp/cache/golang-lru/* /bin/golang-lru/
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENTRYPOINT [ "/bin/cache_server" ]
ENTRYPOINT [ "/bin/cache_server" ]

View File

@ -1,7 +1,20 @@
# Updated golang image to 1.13 since Tekton 0.13 onwards
# requires golang 1.13 in order to build certain packages.
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.13-alpine3.11 as builder
FROM golang:1.19.3-alpine3.15 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
@ -11,16 +24,35 @@ RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
RUN GO111MODULE=on go build -o /bin/persistence_agent backend/src/agent/persistence/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/agent/persistence
RUN go-licenses csv ./backend/src/agent/persistence > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/persistence_agent.csv && \
go-licenses save ./backend/src/agent/persistence --save_path /tmp/NOTICES
FROM alpine:3.11
WORKDIR /bin
COPY backend/src/apiserver/config/ /config
COPY --from=builder /bin/persistence_agent /bin/persistence_agent
COPY --from=builder /go/src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENV NAMESPACE ""
# Set Workflow TTL to 7 days
ENV TTL_SECONDS_AFTER_WORKFLOW_FINISH 604800
# Set Workflow TTL to 1 day. The way to use a different value for a particular Kubeflow Pipelines deployment is demonstrated in manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-deployment.yaml
ENV TTL_SECONDS_AFTER_WORKFLOW_FINISH 86400
CMD persistence_agent --logtostderr=true --namespace=${NAMESPACE} --ttlSecondsAfterWorkflowFinish=${TTL_SECONDS_AFTER_WORKFLOW_FINISH}
# NUM_WORKERS indicates now many worker goroutines
ENV NUM_WORKERS 2
#CHILDREFERENCESKIND kind of runs to search for the childReferences
ENV CHILDREFERENCES_KINDS ""
#LEGACY_STATUS_UPDATE legacy status update method to pass update via apiserver
ENV LEGACY_STATUS_UPDATE "false"
CMD persistence_agent --logtostderr=true --namespace=${NAMESPACE} --ttlSecondsAfterWorkflowFinish=${TTL_SECONDS_AFTER_WORKFLOW_FINISH} --numWorker=${NUM_WORKERS} --childReferencesKinds=${CHILDREFERENCES_KINDS} --legacyStatusUpdate=${LEGACY_STATUS_UPDATE} --config=/config

View File

@ -1,4 +1,18 @@
FROM golang:1.13-alpine3.11 as builder
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.19.3-alpine3.15 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
@ -8,13 +22,23 @@ RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
RUN GO111MODULE=on go build -o /bin/controller backend/src/crd/controller/scheduledworkflow/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/crd/controller/scheduledworkflow
RUN go-licenses csv ./backend/src/crd/controller/scheduledworkflow > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/swf.csv && \
go-licenses save ./backend/src/crd/controller/scheduledworkflow --save_path /tmp/NOTICES
FROM alpine:3.11
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
COPY --from=builder /go/src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/controller
RUN apk --no-cache add tzdata
ENV NAMESPACE ""

View File

@ -1,4 +1,18 @@
FROM golang:alpine as builder
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.17.6-alpine3.15 as builder
RUN apk update && apk upgrade
RUN apk add --no-cache git gcc musl-dev
@ -6,19 +20,25 @@ RUN apk add --no-cache git gcc musl-dev
WORKDIR /src/github.com/kubeflow/pipelines
COPY . .
RUN go mod vendor
RUN go build -o /bin/controller backend/src/crd/controller/viewer/*.go
RUN GO111MODULE=on go build -o /bin/controller backend/src/crd/controller/viewer/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/crd/controller/viewer
RUN go-licenses csv ./backend/src/crd/controller/viewer > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/viewer.csv && \
go-licenses save ./backend/src/crd/controller/viewer --save_path /tmp/NOTICES
FROM alpine
WORKDIR /src
COPY --from=builder /src/github.com/kubeflow/pipelines/vendor vendor
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
COPY --from=builder /src/github.com/kubeflow/pipelines/third_party/license.txt /bin/license.txt
RUN chmod +x /bin/controller
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENV MAX_NUM_VIEWERS "50"
ENV NAMESPACE "kubeflow"

View File

@ -1,46 +0,0 @@
# This docker file starts server.py (located at src/apiserver/visualization)
# which accepts a post request that resolves to html that depicts a specified
# visualization. More details about this process can be found in the server.py
# and exporter.py files in the directory specified above.
# Copyright 2019-2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM tensorflow/tensorflow:2.1.0-py3
RUN apt-get update \
&& apt-get install -y wget curl tar \
pkg-config libcairo2-dev libgirepository1.0-dev # For the pygobject and pycairo package setup due to licensing
RUN curl https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz > /tmp/google-cloud-sdk.tar.gz
RUN mkdir -p /usr/local/gcloud
RUN tar -C /usr/local/gcloud -xf /tmp/google-cloud-sdk.tar.gz
RUN /usr/local/gcloud/google-cloud-sdk/install.sh
ENV PATH $PATH:/usr/local/gcloud/google-cloud-sdk/bin
WORKDIR /src
COPY backend/src/apiserver/visualization/requirements.txt /src
RUN pip3 install -r requirements.txt
COPY backend/src/apiserver/visualization/license.sh /src
COPY backend/src/apiserver/visualization/third_party_licenses.csv /src
COPY backend/src/apiserver/visualization/third_party_licenses /usr/licenses
RUN ./license.sh third_party_licenses.csv /usr/licenses
COPY backend/src/apiserver/visualization /src
ENTRYPOINT [ "python3", "server.py" ]

60
backend/Makefile Normal file
View File

@ -0,0 +1,60 @@
BUILD=build
MOD_ROOT=..
CSV_PATH=backend/third_party_licenses
# Whenever build command for any of the binaries change, we should update them both here and in backend/Dockerfiles.
.PHONY: all
all: license_apiserver license_persistence_agent license_cache_server license_swf license_viewer
.PHONY: clean
clean:
rm -rf $(BUILD)
$(BUILD)/apiserver:
GO111MODULE=on go build -o $(BUILD)/apiserver github.com/kubeflow/pipelines/backend/src/apiserver
$(BUILD)/persistence_agent:
GO111MODULE=on go build -o $(BUILD)/persistence_agent github.com/kubeflow/pipelines/backend/src/agent/persistence
$(BUILD)/cache_server:
GO111MODULE=on go build -o $(BUILD)/cache_server github.com/kubeflow/pipelines/backend/src/cache
$(BUILD)/swf:
GO111MODULE=on go build -o $(BUILD)/swf github.com/kubeflow/pipelines/backend/src/crd/controller/scheduledworkflow
$(BUILD)/viewer:
GO111MODULE=on go build -o $(BUILD)/viewer github.com/kubeflow/pipelines/backend/src/crd/controller/viewer
# Update licenses info after dependencies changed.
# See README.md#updating-licenses-info section for more details.
.PHONY: license_apiserver
license_apiserver: $(BUILD)/apiserver
cd $(MOD_ROOT) && go-licenses csv ./backend/src/apiserver > $(CSV_PATH)/apiserver.csv
.PHONY: license_persistence_agent
license_persistence_agent: $(BUILD)/persistence_agent
cd $(MOD_ROOT) && go-licenses csv ./backend/src/agent/persistence > $(CSV_PATH)/persistence_agent.csv
.PHONY: license_cache_server
license_cache_server: $(BUILD)/cache_server
cd $(MOD_ROOT) && go-licenses csv ./backend/src/cache > $(CSV_PATH)/cache_server.csv
.PHONY: license_swf
license_swf: $(BUILD)/swf
cd $(MOD_ROOT) && go-licenses csv ./backend/src/crd/controller/scheduledworkflow > $(CSV_PATH)/swf.csv
.PHONY: license_viewer
license_viewer: $(BUILD)/viewer
cd $(MOD_ROOT) && go-licenses csv ./backend/src/crd/controller/viewer > $(CSV_PATH)/viewer.csv
.PHONY: image_all
image_all: image_apiserver image_persistence_agent image_cache image_swf image_viewer image_visualization
.PHONY: image_apiserver
image_apiserver:
cd $(MOD_ROOT) && docker build -t apiserver -f backend/Dockerfile .
.PHONY: image_persistence_agent
image_persistence_agent:
cd $(MOD_ROOT) && docker build -t persistence-agent -f backend/Dockerfile.persistenceagent .
.PHONY: image_cache
image_cache:
cd $(MOD_ROOT) && docker build -t cache-server -f backend/Dockerfile.cacheserver .
.PHONY: image_swf
image_swf:
cd $(MOD_ROOT) && docker build -t scheduledworkflow -f backend/Dockerfile.scheduledworkflow .
.PHONY: image_viewer
image_viewer:
cd $(MOD_ROOT) && docker build -t viewercontroller -f backend/Dockerfile.viewercontroller .

View File

@ -3,17 +3,23 @@ Pipelines backend.
## Building & Testing
All components can be built using [Bazel](https://bazel.build/). To build
everything under backend, run: `bazel build --action_env=PATH
--define=grpc_no_ares=true //backend/...`
To run all unit tests for backend:
To run all tests: `bazel test --action_env=PATH --define=grpc_no_ares=true
//backend/...`
```
go test -v -cover ./backend/...
```
The API server itself can only be built/tested using Bazel. The following
commands target building and testing just the API server. `bazel build
--action_env=PATH --define=grpc_no_ares=true backend/src/apiserver/...` `bazel
test --action_env=PATH --define=grpc_no_ares=true backend/src/apiserver/...`
To run the API server unit tests:
```
go test -v -cover ./backend/src/apiserver/...
```
The API server itself can be built using:
```
go build -o /tmp/apiserver backend/src/apiserver/*.go
```
## Building APIServer Image using Remote Build Execution
@ -26,28 +32,37 @@ speeding up the build. To do so, execute the following command:
./build_api_server.sh -i gcr.io/cloud-ml-pipelines-test/api-server:dev
```
## Building Go client library and swagger files
## Building APIServer image locally
After making changes to proto files, the Go client libraries and swagger files
need to be regenerated and checked-in. The backend/api/generate_api.sh script
takes care of this.
The API server image can be built from the root folder of the repo using:
```
export API_SERVER_IMAGE=api_server
docker build -f backend/Dockerfile . --tag $API_SERVER_IMAGE
```
## Updating BUILD files
## Building client library and swagger files
As the backend is written in Go, the BUILD files can be updated automatically
using [Gazelle](https://github.com/bazelbuild/bazel-gazelle). Whenever a Go file
is added or updated, run the following to ensure the BUILD files are updated as
well: `bazel run //:gazelle`
After making changes to proto files, the Go client libraries, Python client libraries and swagger files
need to be regenerated and checked-in. Refer to [backend/api](./api/README.md) for details.
## Updating licenses info
1. Install go-licenses tool and refer to [its documentation](https://github.com/google/go-licenses) for how to use it.
```bash
go install github.com/google/go-licenses@d483853
```
2. Run the tool to update all licenses:
```bash
make all
```
If a new external Go dependency is added, or an existing one has its version
bumped in the `go.mod` file, ensure the BUILD files pick this up by updating the
WORKSPACE go_repository rules using the following command: `bazel run
//:gazelle -- update-repos --from_file=go.mod`
## Updating python dependencies
[pip-tools](https://github.com/jazzband/pip-tools) is used to manage python
dependencies. To update dependencies, edit [requirements.in](requirements.in)
and run `./update_requirements.sh <requirements.in >requirements.txt` to update and pin the transitive
dependencies.
and run `./update_requirements.sh` to update and pin the transitive
and pin the transitive dependencies.

2
backend/api/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
# Makefile local token to indicate whether latest image has been built
.image-built

View File

@ -1,63 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_path")
load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
load("@com_github_grpc_ecosystem_grpc_gateway//protoc-gen-swagger:defs.bzl", "protoc_gen_swagger")
proto_library(
name = "go_client_proto",
srcs = [
"auth.proto",
"error.proto",
"experiment.proto",
"filter.proto",
"job.proto",
"parameter.proto",
"pipeline.proto",
"pipeline_spec.proto",
"report.proto",
"resource_reference.proto",
"run.proto",
"visualization.proto",
],
visibility = ["//visibility:public"],
deps = [
"@com_github_grpc_ecosystem_grpc_gateway//protoc-gen-swagger/options:options_proto",
"@com_google_protobuf//:any_proto",
"@com_google_protobuf//:empty_proto",
"@com_google_protobuf//:timestamp_proto",
"@go_googleapis//google/api:annotations_proto",
],
)
go_proto_library(
name = "go_client_go_proto",
compilers = [
"@io_bazel_rules_go//proto:go_grpc",
"@com_github_grpc_ecosystem_grpc_gateway//protoc-gen-grpc-gateway:go_gen_grpc_gateway",
],
importpath = "github.com/kubeflow/pipelines/backend/api/go_client", # keep
proto = ":go_client_proto",
visibility = ["//visibility:public"],
deps = [
"@com_github_grpc_ecosystem_grpc_gateway//protoc-gen-swagger/options:go_default_library",
"@go_googleapis//google/api:annotations_go_proto",
],
)
go_library(
name = "go_default_library",
embed = [":go_client_go_proto"],
importpath = "github.com/kubeflow/pipelines/backend/api/go_client",
visibility = ["//visibility:public"],
)
protoc_gen_swagger(
name = "api_swagger",
proto = ":go_client_proto",
)
go_path(
name = "api_generated_go_sources",
deps = [
":go_client_go_proto",
],
)

59
backend/api/Dockerfile Normal file
View File

@ -0,0 +1,59 @@
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generate client code (go & json) from API protocol buffers
FROM golang:1.15.10 as generator
ENV GRPC_GATEWAY_VERSION v2.11.3
ENV GO_SWAGGER_VERSION v0.30.4
ENV GOLANG_PROTOBUF_VERSION v1.5.2
ENV GRPC_VERSION v1.48.0
ENV PROTOC_VERSION 3.17.3
ENV GOBIN=/go/bin
# Install protoc.
RUN apt-get update -y && apt-get install -y jq sed unzip
RUN curl -L -o protoc.zip https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip
RUN unzip -o protoc.zip -d /usr/ bin/protoc
RUN unzip -o protoc.zip -d /usr/ 'include/*'
RUN rm -f protoc.zip
ENV PROTOCCOMPILER /usr/bin/protoc
ENV PROTOCINCLUDE /usr/include/google/protobuf
# Need grpc-gateway source code for -I in protoc command.
WORKDIR /go/src/github.com
RUN mkdir grpc-ecosystem && cd grpc-ecosystem && git clone --depth 1 --branch $GRPC_GATEWAY_VERSION https://github.com/grpc-ecosystem/grpc-gateway.git
RUN mkdir grpc && git clone --depth 1 --branch $GRPC_VERSION https://github.com/grpc/grpc-go
# Install protoc-gen-rpc-gateway && protoc-gen-swagger.
RUN cd grpc-ecosystem/grpc-gateway && GO111MODULE=on go mod vendor
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-openapiv2
# Download go-swagger binary.
# swagger doesn't exist for openapiv2 yet
RUN curl -LO "https://github.com/go-swagger/go-swagger/releases/download/${GO_SWAGGER_VERSION}/swagger_linux_amd64"
RUN chmod +x swagger_linux_amd64 && mv swagger_linux_amd64 /usr/bin/swagger
# Need protobuf source code for -I in protoc command.
RUN mkdir golang && cd golang && git clone --depth 1 --branch $GOLANG_PROTOBUF_VERSION https://github.com/golang/protobuf.git
# Install protoc-gen-go.
RUN cd golang/protobuf && GO111MODULE=on go mod vendor
RUN go install github.com/golang/protobuf/protoc-gen-go
# WORKAROUND: https://github.com/docker-library/golang/issues/225#issuecomment-403170792
ENV XDG_CACHE_HOME /tmp/.cache
# Make all files accessible to non-root users.
RUN chmod -R 777 /usr/bin/
RUN chmod -R 777 /usr/include/google
RUN chmod -R 777 /go

View File

@ -1,10 +1,10 @@
# Copyright 2018 Google LLC
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
@ -12,119 +12,46 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Makefile to generate KFP api clients from proto.
# Building requires protobuf version 3.6.0 or above. Unzip this directory to a
# suitable location and modify the values of the following two variables before
# running make.
protocompiler = ${HOME}/protobuf-3.6.1/bin/protoc
protoinclude = ${HOME}/protobuf-3.6.1/include
IMAGE_TAG=kfp-api-generator
# Contact one of Bobgy, or zijianjoy if this remote image needs an update.
REMOTE_IMAGE=gcr.io/ml-pipeline-test/api-generator
PREBUILT_REMOTE_IMAGE=aipipeline/api-generator:openapiv2.11.3
# PREBUILT_REMOTE_IMAGE=aipipeline/api-generator:test
API_VERSION=v1
TMPDIR := $(shell mktemp -d)
# Generate clients using a pre-built api-generator image.
.PHONY: generate
generate: hack/generator.sh $(API_VERSION)/*.proto
docker run --interactive --rm \
-e API_VERSION=$(API_VERSION) \
--user $$(id -u):$$(id -g) \
--mount type=bind,source="$$(pwd)/../..",target=/go/src/github.com/kubeflow/pipelines \
$(PREBUILT_REMOTE_IMAGE) /go/src/github.com/kubeflow/pipelines/backend/api/hack/generator.sh
default: all
# Generate clients starting by building api-generator image locally.
# Note, this should only be used for local development purposes. Once any change is made to the Dockerfile,
# we should push the new image remotely to ensure everyone is using the same tools.
.PHONY: generate-from-scratch
generate-from-scratch: .image-built hack/generator.sh $(API_VERSION)/*.proto
docker run --interactive --rm \
-e API_VERSION=$(API_VERSION) \
--user $$(id -u):$$(id -g) \
--mount type=bind,source="$$(pwd)/../..",target=/go/src/github.com/kubeflow/pipelines \
$(IMAGE_TAG) /go/src/github.com/kubeflow/pipelines/backend/api/hack/generator.sh
dependencies:
@echo "Using temporary directory: $(TMPDIR)"
GOBIN=$(TMPDIR) go install ../../vendor/github.com/go-swagger/go-swagger/cmd/swagger
GOBIN=$(TMPDIR) go install ../../vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
GOBIN=$(TMPDIR) go install ../../vendor/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
GOBIN=$(TMPDIR) go install ../../vendor/github.com/golang/protobuf/protoc-gen-go
# Build a local api-generator image.
.PHONY: image
image: .image-built
all: dependencies
# Push api-generator image remotely.
.PHONY: push
push: image
docker tag $(IMAGE_TAG) $(REMOTE_IMAGE)
docker push $(REMOTE_IMAGE)
# Delete currently generated code.
rm -r -f go_http_client/*
rm -r -f go_client/*
# Compile the *.proto files into *.pb.go (grpc client).
$(protocompiler) -I$(protoinclude) -I/usr/local/include -I. \
-I../../vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
-I../../vendor/github.com/grpc-ecosystem/grpc-gateway/ \
--plugin=protoc-gen-go=${TMPDIR}/protoc-gen-go \
--go_out=plugins=grpc:go_client \
*.proto
# Compile the *.proto files into *.pb.gw.go (grpc client).
$(protocompiler) -I$(protoinclude) -I/usr/local/include -I. \
-I../../vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
-I../../vendor/github.com/grpc-ecosystem/grpc-gateway/ \
--plugin=protoc-gen-grpc-gateway=${TMPDIR}/protoc-gen-grpc-gateway \
--grpc-gateway_out=logtostderr=true:go_client \
*.proto
# Filter.proto is implicitly used by clients and server, and transmitted as a
# serialized string in ListXXX requests. This hack defines a dummy service to
# coerce protoc-gen-swagger to generate Swagger JSON definitions for
# filter.proto.
cp -f filter.proto filter.proto.bak
printf "service DummyFilterService {\nrpc DoFilter(Filter) returns (Filter) {\n option (google.api.http) = { \nget: \"/apis/v1beta1/filters\"\n};\n }}" >> filter.proto
# Compile the *.proto files into *.swagger.json (swagger specification).
$(protocompiler) -I$(protoinclude) -I/usr/local/include -I. \
-I../../vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
-I../../vendor/github.com/grpc-ecosystem/grpc-gateway/ \
--plugin=protoc-gen-swagger=${TMPDIR}/protoc-gen-swagger \
--swagger_out=logtostderr=true:swagger \
*.proto
# Revert the dummy service definition.
mv -f filter.proto.bak filter.proto
# Compile the *.swagger.json into go REST clients.
# Instructions to install the 'swagger' command are located here:
# https://github.com/go-swagger/go-swagger
# swagger v0.17.0 was last used.
$(TMPDIR)/swagger generate client \
-f swagger/job.swagger.json \
-A job \
--principal models.Principal \
-c job_client \
-m job_model \
-t go_http_client
$(TMPDIR)/swagger generate client \
-f swagger/run.swagger.json \
-A run \
--principal models.Principal \
-c run_client \
-m run_model \
-t go_http_client
$(TMPDIR)/swagger generate client \
-f swagger/experiment.swagger.json \
-A experiment \
--principal models.Principal \
-c experiment_client \
-m experiment_model \
-t go_http_client
$(TMPDIR)/swagger generate client \
-f swagger/pipeline.upload.swagger.json \
-A pipeline_upload \
--principal models.Principal \
-c pipeline_upload_client \
-m pipeline_upload_model \
-t go_http_client
$(TMPDIR)/swagger generate client \
-f swagger/pipeline.swagger.json \
-A pipeline \
--principal models.Principal \
-c pipeline_client \
-m pipeline_model \
-t go_http_client
# Hack to fix an issue with go-swagger
# See https://github.com/go-swagger/go-swagger/issues/1381 for details.
sed -i -- 's/MaxConcurrency int64 `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' ./go_http_client/job_model/api_job.go
sed -i -- 's/IntervalSecond int64 `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' ./go_http_client/job_model/api_periodic_schedule.go
sed -i -- 's/MaxConcurrency string `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' ./go_http_client/job_model/api_job.go
sed -i -- 's/IntervalSecond string `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' ./go_http_client/job_model/api_periodic_schedule.go
# Executes the //go:generate directives in the generated code.
go generate ./...
# Add licenses to the generated files.
# Instructions to install the 'autogen' command are located here:
# https://github.com/mbrukman/autogen
find ./ -name "*.go" -exec autogen.sh -i --no-tlc -c "Google LLC" -l apache {} \;
@echo "Cleaning $(TMPDIR)"
rm -r -f $(TMPDIR)
# .image-built is a local token file to help Make determine the latest successful build.
.image-built: Dockerfile
docker build ../.. -t $(IMAGE_TAG) -f Dockerfile
touch .image-built

55
backend/api/README.md Normal file
View File

@ -0,0 +1,55 @@
# Kubeflow Pipelines API
## Before You Start
Tools needed:
* Docker
* Make
## Auto-generation of Go client and swagger definitions
Use `make generate` command to generate clients using a pre-built api-generator image:
```bash
make generate
```
Code will be generated in:
* `./${API_VERSION}/go_client`
* `./${API_VERSION}/go_http_client`
* `./${API_VERSION}/swagger`
## Auto-generation of Python client
This will generate the Python client for the API version specified in the environment variable.
```bash
./build_kfp_server_api_python_package.sh
```
Code will be generated in `./${API_VERSION}/python_http_client`.
## Auto-generation of API reference documentation
This directory contains API definitions. They are used to generate [the API reference on kubeflow.org](https://www.kubeflow.org/docs/pipelines/reference/api/kubeflow-pipeline-api-spec/).
* Use the tools [bootprint-openapi](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi) and [html-inline](https://github.com/substack/html-inline) to generate the API reference from [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json). These [instructions](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi#bootprint-openapi) have shown how to generate *a single self-contained html file* which is the API reference, from a json file.
* Use the above generated html to replace the html section, which is below the title section, in the file [kubeflow-pipeline-api-spec.html](https://github.com/kubeflow/website/blob/master/content/en/docs/pipelines/reference/api/kubeflow-pipeline-api-spec.html)
Note: whenever the API definition changes (i.e., the file [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json) changes), the API reference needs to be updated.
## Auto-generation of api generator image
```bash
make push
```
When you update the [Dockerfile](`./Dockerfile`), to make sure others are using the same image as you do:
1. push a new version of the api generator image to gcr.io/ml-pipeline-test/api-generator:latest.
2. update the PREBUILT_REMOTE_IMAGE var in Makefile to point to your new image.
3. push a new version of the release tools image to gcr.io/ml-pipeline-test/release:latest, run `make push` in [test/release/Makefile](../../test/release/Makefile).

View File

@ -1,6 +1,6 @@
#!/bin/bash -e
#
# Copyright 2018-2020 Google LLC
# Copyright 2018-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -34,6 +34,8 @@ if [ -z "$VERSION" ]; then
echo "ERROR: $REPO_ROOT/VERSION is empty"
exit 1
fi
API_VERSION=v1
codegen_file=/tmp/openapi-generator-cli.jar
# Browse all versions in: https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/
@ -45,17 +47,17 @@ fi
pushd "$(dirname "$0")"
CURRENT_DIR="$(pwd)"
DIR="$CURRENT_DIR/python_http_client"
swagger_file="$CURRENT_DIR/swagger/kfp_api_single_file.swagger.json"
DIR="$CURRENT_DIR/$API_VERSION/python_http_client"
swagger_file="$CURRENT_DIR/$API_VERSION/swagger/kfp_api_single_file.swagger.json"
echo "Removing old content in DIR first."
rm -rf "$DIR"
echo "Generating python code from swagger json in $DIR."
java -jar "$codegen_file" generate -g python -t "$CURRENT_DIR/python_http_client_template" -i "$swagger_file" -o "$DIR" -c <(echo '{
"packageName": "kfp_server_api",
java -jar "$codegen_file" generate -g python -t "$CURRENT_DIR/$API_VERSION/python_http_client_template" -i "$swagger_file" -o "$DIR" -c <(echo '{
"packageName": "'"kfp_tekton_server_api"'",
"packageVersion": "'"$VERSION"'",
"packageUrl": "https://github.com/kubeflow/pipelines"
"packageUrl": "https://github.com/kubeflow/kfp-tekton"
}')
echo "Copying LICENSE to $DIR"
@ -66,10 +68,6 @@ pushd "$DIR"
python3 setup.py --quiet sdist
popd
echo "Adding license header for generated python files in $DIR."
go get -u github.com/google/addlicense
addlicense "$DIR"
echo "Run the following commands to update the package on PyPI"
echo "python3 -m pip install twine"
echo "python3 -m twine upload --username kubeflow-pipelines $DIR/dist/*"

View File

@ -1,136 +0,0 @@
#!/bin/bash
# Copyright 2018-2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file generates API sources from the protocol buffers defined in this
# directory using Bazel, then copies them back into the source tree so they can
# be checked-in.
set -ex
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null && pwd)"
REPO_ROOT="$DIR/../.."
VERSION="$(cat $REPO_ROOT/VERSION)"
if [ -z "$VERSION" ]; then
echo "ERROR: $REPO_ROOT/VERSION is empty"
exit 1
fi
BAZEL_BINDIR=$(bazel info bazel-bin)
SWAGGER_CMD=${DIR}/../../bazel-bin/external/com_github_go_swagger/cmd/swagger/*stripped/swagger
AUTOGEN_CMD="${DIR}/../../bazel-bin/external/com_github_mbrukman_autogen/autogen_tool"
GENERATED_GO_PROTO_FILES="${BAZEL_BINDIR}/backend/api/api_generated_go_sources/src/github.com/kubeflow/pipelines/backend/api/go_client/*.go"
# TODO this script should be able to be run from anywhere, not just within .../backend/api/
# Delete currently generated code.
rm -r -f ${DIR}/go_http_client/*
rm -r -f ${DIR}/go_client/*
# Build required tools.
bazel build @com_github_mbrukman_autogen//:autogen_tool
bazel build @com_github_go_swagger//cmd/swagger
# Build .pb.go and .gw.pb.go files from the proto sources.
bazel build //backend/api:api_generated_go_sources
# Copy the generated files into the source tree and add license.
for f in $GENERATED_GO_PROTO_FILES; do
target=${DIR}/go_client/$(basename ${f})
cp $f $target
chmod 766 $target
${AUTOGEN_CMD} -i --no-tlc -c "Google LLC" -l apache $target
done
# Generate and copy back into source tree .swagger.json files.
bazel build //backend/api:api_swagger
cp ${BAZEL_BINDIR}/backend/api/*.swagger.json ${DIR}/swagger
jq -s '
reduce .[] as $item ({}; . * $item) |
.info.title = "Kubeflow Pipelines API" |
.info.description = "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition." |
.info.version = "'$VERSION'" |
.info.contact = { "name": "google", "email": "kubeflow-pipelines@google.com", "url": "https://www.google.com" } |
.info.license = { "name": "Apache 2.0", "url": "https://raw.githubusercontent.com/kubeflow/pipelines/master/LICENSE" }
' ${DIR}/swagger/{run,job,pipeline,experiment,pipeline.upload}.swagger.json > "${DIR}/swagger/kfp_api_single_file.swagger.json"
# Generate Go HTTP client from the swagger files.
${SWAGGER_CMD} generate client \
-f ${DIR}/swagger/job.swagger.json \
-A job \
--principal models.Principal \
-c job_client \
-m job_model \
-t ${DIR}/go_http_client
${SWAGGER_CMD} generate client \
-f ${DIR}/swagger/run.swagger.json \
-A run \
--principal models.Principal \
-c run_client \
-m run_model \
-t ${DIR}/go_http_client
${SWAGGER_CMD} generate client \
-f ${DIR}/swagger/experiment.swagger.json \
-A experiment \
--principal models.Principal \
-c experiment_client \
-m experiment_model \
-t ${DIR}/go_http_client
${SWAGGER_CMD} generate client \
-f ${DIR}/swagger/pipeline.upload.swagger.json \
-A pipeline_upload \
--principal models.Principal \
-c pipeline_upload_client \
-m pipeline_upload_model \
-t ${DIR}/go_http_client
${SWAGGER_CMD} generate client \
-f ${DIR}/swagger/pipeline.swagger.json \
-A pipeline \
--principal models.Principal \
-c pipeline_client \
-m pipeline_model \
-t ${DIR}/go_http_client
${SWAGGER_CMD} generate client \
-f ${DIR}/swagger/visualization.swagger.json \
-A visualization \
--principal models.Principal \
-c visualization_client \
-m visualization_model \
-t ${DIR}/go_http_client
# Hack to fix an issue with go-swagger
# See https://github.com/go-swagger/go-swagger/issues/1381 for details.
sed -i -- 's/MaxConcurrency int64 `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' ${DIR}/go_http_client/job_model/api_job.go
sed -i -- 's/IntervalSecond int64 `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' ${DIR}/go_http_client/job_model/api_periodic_schedule.go
sed -i -- 's/MaxConcurrency string `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' ${DIR}/go_http_client/job_model/api_job.go
sed -i -- 's/IntervalSecond string `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' ${DIR}/go_http_client/job_model/api_periodic_schedule.go
# Executes the //go:generate directives in the generated code.
go generate ./...
# Add license to files in go_http_client.
find ${DIR}/go_http_client/ -name "*.go" -exec ${AUTOGEN_CMD} -i --no-tlc -c "Google LLC" -l apache {} \;
# Finally, run gazelle to add BUILD files for the generated code.
bazel run //:gazelle
# HACK: remove unnecessary BUILD.bazels
rm -f "$REPO_ROOT/sdk/python/kfp/components/structures/BUILD.bazel" "$REPO_ROOT/tools/metadatastore-upgrade/BUILD.bazel"

View File

@ -1,43 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"auth.pb.go",
"auth.pb.gw.go",
"error.pb.go",
"experiment.pb.go",
"experiment.pb.gw.go",
"filter.pb.go",
"job.pb.go",
"job.pb.gw.go",
"parameter.pb.go",
"pipeline.pb.go",
"pipeline.pb.gw.go",
"pipeline_spec.pb.go",
"report.pb.go",
"report.pb.gw.go",
"resource_reference.pb.go",
"run.pb.go",
"run.pb.gw.go",
"visualization.pb.go",
"visualization.pb.gw.go",
],
importpath = "github.com/kubeflow/pipelines/backend/api/go_client",
visibility = ["//visibility:public"],
deps = [
"@com_github_golang_protobuf//proto:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway//protoc-gen-swagger/options:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway//runtime:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway//utilities:go_default_library",
"@go_googleapis//google/api:annotations_go_proto",
"@io_bazel_rules_go//proto/wkt:any_go_proto",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_google_grpc//grpclog:go_default_library",
"@org_golang_google_grpc//status:go_default_library",
"@org_golang_x_net//context:go_default_library",
],
)

View File

@ -1,260 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/auth.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import empty "github.com/golang/protobuf/ptypes/empty"
import _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type AuthorizeRequest_Resources int32
const (
AuthorizeRequest_UNASSIGNED_RESOURCES AuthorizeRequest_Resources = 0
AuthorizeRequest_VIEWERS AuthorizeRequest_Resources = 1
)
var AuthorizeRequest_Resources_name = map[int32]string{
0: "UNASSIGNED_RESOURCES",
1: "VIEWERS",
}
var AuthorizeRequest_Resources_value = map[string]int32{
"UNASSIGNED_RESOURCES": 0,
"VIEWERS": 1,
}
func (x AuthorizeRequest_Resources) String() string {
return proto.EnumName(AuthorizeRequest_Resources_name, int32(x))
}
func (AuthorizeRequest_Resources) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_auth_b463ef3269931e86, []int{0, 0}
}
type AuthorizeRequest_Verb int32
const (
AuthorizeRequest_UNASSIGNED_VERB AuthorizeRequest_Verb = 0
AuthorizeRequest_CREATE AuthorizeRequest_Verb = 1
AuthorizeRequest_GET AuthorizeRequest_Verb = 2
AuthorizeRequest_DELETE AuthorizeRequest_Verb = 3
)
var AuthorizeRequest_Verb_name = map[int32]string{
0: "UNASSIGNED_VERB",
1: "CREATE",
2: "GET",
3: "DELETE",
}
var AuthorizeRequest_Verb_value = map[string]int32{
"UNASSIGNED_VERB": 0,
"CREATE": 1,
"GET": 2,
"DELETE": 3,
}
func (x AuthorizeRequest_Verb) String() string {
return proto.EnumName(AuthorizeRequest_Verb_name, int32(x))
}
func (AuthorizeRequest_Verb) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_auth_b463ef3269931e86, []int{0, 1}
}
type AuthorizeRequest struct {
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
Resources AuthorizeRequest_Resources `protobuf:"varint,2,opt,name=resources,proto3,enum=api.AuthorizeRequest_Resources" json:"resources,omitempty"`
Verb AuthorizeRequest_Verb `protobuf:"varint,3,opt,name=verb,proto3,enum=api.AuthorizeRequest_Verb" json:"verb,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuthorizeRequest) Reset() { *m = AuthorizeRequest{} }
func (m *AuthorizeRequest) String() string { return proto.CompactTextString(m) }
func (*AuthorizeRequest) ProtoMessage() {}
func (*AuthorizeRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_auth_b463ef3269931e86, []int{0}
}
func (m *AuthorizeRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuthorizeRequest.Unmarshal(m, b)
}
func (m *AuthorizeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuthorizeRequest.Marshal(b, m, deterministic)
}
func (dst *AuthorizeRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuthorizeRequest.Merge(dst, src)
}
func (m *AuthorizeRequest) XXX_Size() int {
return xxx_messageInfo_AuthorizeRequest.Size(m)
}
func (m *AuthorizeRequest) XXX_DiscardUnknown() {
xxx_messageInfo_AuthorizeRequest.DiscardUnknown(m)
}
var xxx_messageInfo_AuthorizeRequest proto.InternalMessageInfo
func (m *AuthorizeRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
func (m *AuthorizeRequest) GetResources() AuthorizeRequest_Resources {
if m != nil {
return m.Resources
}
return AuthorizeRequest_UNASSIGNED_RESOURCES
}
func (m *AuthorizeRequest) GetVerb() AuthorizeRequest_Verb {
if m != nil {
return m.Verb
}
return AuthorizeRequest_UNASSIGNED_VERB
}
func init() {
proto.RegisterType((*AuthorizeRequest)(nil), "api.AuthorizeRequest")
proto.RegisterEnum("api.AuthorizeRequest_Resources", AuthorizeRequest_Resources_name, AuthorizeRequest_Resources_value)
proto.RegisterEnum("api.AuthorizeRequest_Verb", AuthorizeRequest_Verb_name, AuthorizeRequest_Verb_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// AuthServiceClient is the client API for AuthService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AuthServiceClient interface {
Authorize(ctx context.Context, in *AuthorizeRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}
type authServiceClient struct {
cc *grpc.ClientConn
}
func NewAuthServiceClient(cc *grpc.ClientConn) AuthServiceClient {
return &authServiceClient{cc}
}
func (c *authServiceClient) Authorize(ctx context.Context, in *AuthorizeRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.AuthService/Authorize", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// AuthServiceServer is the server API for AuthService service.
type AuthServiceServer interface {
Authorize(context.Context, *AuthorizeRequest) (*empty.Empty, error)
}
func RegisterAuthServiceServer(s *grpc.Server, srv AuthServiceServer) {
s.RegisterService(&_AuthService_serviceDesc, srv)
}
func _AuthService_Authorize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AuthorizeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AuthServiceServer).Authorize(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.AuthService/Authorize",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AuthServiceServer).Authorize(ctx, req.(*AuthorizeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _AuthService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.AuthService",
HandlerType: (*AuthServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Authorize",
Handler: _AuthService_Authorize_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/auth.proto",
}
func init() { proto.RegisterFile("backend/api/auth.proto", fileDescriptor_auth_b463ef3269931e86) }
var fileDescriptor_auth_b463ef3269931e86 = []byte{
// 460 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
0x14, 0x8c, 0x9d, 0x2a, 0xc1, 0x2f, 0x94, 0x9a, 0x6d, 0x29, 0x91, 0x09, 0x6a, 0x94, 0x53, 0x0f,
0xd4, 0x56, 0xd3, 0x2b, 0x1c, 0x92, 0x76, 0x55, 0x55, 0x82, 0x22, 0xad, 0xd3, 0x20, 0xf5, 0x52,
0xad, 0xdd, 0x17, 0x67, 0x55, 0xc7, 0x6b, 0xd6, 0xeb, 0x54, 0x70, 0x44, 0xe2, 0x03, 0x80, 0x4f,
0xe3, 0x17, 0xf8, 0x10, 0xe4, 0x4d, 0x9a, 0x46, 0x90, 0xd3, 0x6a, 0xdf, 0xcc, 0x9b, 0x19, 0x69,
0x1e, 0xec, 0x47, 0x3c, 0xbe, 0xc3, 0xec, 0x36, 0xe0, 0xb9, 0x08, 0x78, 0xa9, 0xa7, 0x7e, 0xae,
0xa4, 0x96, 0xa4, 0xce, 0x73, 0xe1, 0x75, 0x12, 0x29, 0x93, 0x14, 0x17, 0x58, 0x96, 0x49, 0xcd,
0xb5, 0x90, 0x59, 0xb1, 0xa0, 0x78, 0xaf, 0x96, 0xa8, 0xf9, 0x45, 0xe5, 0x24, 0xc0, 0x59, 0xae,
0xbf, 0x2c, 0xc1, 0x97, 0xeb, 0xba, 0xa8, 0x94, 0x54, 0x4b, 0xe0, 0x8d, 0x79, 0xe2, 0xa3, 0x04,
0xb3, 0xa3, 0xe2, 0x9e, 0x27, 0x09, 0xaa, 0x40, 0xe6, 0x46, 0xf7, 0x7f, 0x8f, 0xde, 0x0f, 0x1b,
0xdc, 0x41, 0xa9, 0xa7, 0x52, 0x89, 0xaf, 0xc8, 0xf0, 0x73, 0x89, 0x85, 0x26, 0x1d, 0x70, 0x32,
0x3e, 0xc3, 0x22, 0xe7, 0x31, 0xb6, 0xad, 0xae, 0x75, 0xe8, 0xb0, 0xc7, 0x01, 0x79, 0x07, 0x8e,
0xc2, 0x42, 0x96, 0x2a, 0xc6, 0xa2, 0x6d, 0x77, 0xad, 0xc3, 0x67, 0xfd, 0x03, 0x9f, 0xe7, 0xc2,
0xff, 0x57, 0xc7, 0x67, 0x0f, 0x34, 0xf6, 0xb8, 0x41, 0x7c, 0xd8, 0x9a, 0xa3, 0x8a, 0xda, 0x75,
0xb3, 0xe9, 0x6d, 0xde, 0x1c, 0xa3, 0x8a, 0x98, 0xe1, 0xf5, 0xfa, 0xe0, 0xac, 0x74, 0x48, 0x1b,
0xf6, 0xae, 0x2e, 0x07, 0x61, 0x78, 0x71, 0x7e, 0x49, 0xcf, 0x6e, 0x18, 0x0d, 0x3f, 0x5e, 0xb1,
0x53, 0x1a, 0xba, 0x35, 0xd2, 0x82, 0xe6, 0xf8, 0x82, 0x7e, 0xa2, 0x2c, 0x74, 0xad, 0xde, 0x5b,
0xd8, 0xaa, 0x14, 0xc8, 0x2e, 0xec, 0xac, 0xd1, 0xc7, 0x94, 0x0d, 0xdd, 0x1a, 0x01, 0x68, 0x9c,
0x32, 0x3a, 0x18, 0x51, 0xd7, 0x22, 0x4d, 0xa8, 0x9f, 0xd3, 0x91, 0x6b, 0x57, 0xc3, 0x33, 0xfa,
0x9e, 0x8e, 0xa8, 0x5b, 0xef, 0x23, 0xb4, 0xaa, 0x40, 0x21, 0xaa, 0xb9, 0x88, 0x91, 0x8c, 0xc1,
0x59, 0xe5, 0x23, 0x2f, 0x36, 0xe6, 0xf5, 0xf6, 0xfd, 0x45, 0x57, 0xfe, 0x43, 0x57, 0x3e, 0xad,
0xba, 0xea, 0x79, 0xdf, 0x7e, 0xff, 0xf9, 0x65, 0xef, 0x11, 0x52, 0xd5, 0x54, 0x04, 0xf3, 0xe3,
0x08, 0x35, 0x3f, 0x36, 0x77, 0x30, 0xfc, 0x6e, 0xfd, 0x1c, 0x7c, 0x60, 0x1d, 0x68, 0xde, 0xe2,
0x84, 0x97, 0xa9, 0x26, 0xcf, 0xc9, 0x0e, 0x6c, 0x7b, 0x2d, 0xe3, 0x10, 0x6a, 0xae, 0xcb, 0xe2,
0xfa, 0x00, 0x5e, 0x43, 0x63, 0x88, 0x5c, 0xa1, 0x22, 0xbb, 0x4f, 0x6c, 0x6f, 0x9b, 0x2f, 0x9d,
0x4d, 0x89, 0x5d, 0x3b, 0x7a, 0x0a, 0xb0, 0x22, 0xd4, 0xae, 0x4f, 0x12, 0xa1, 0xa7, 0x65, 0xe4,
0xc7, 0x72, 0x16, 0xdc, 0x95, 0x11, 0x4e, 0x52, 0x79, 0x1f, 0xe4, 0x22, 0xc7, 0x54, 0x64, 0x58,
0x04, 0xeb, 0x27, 0x93, 0xc8, 0x9b, 0x38, 0x15, 0x98, 0xe9, 0xa8, 0x61, 0x32, 0x9f, 0xfc, 0x0d,
0x00, 0x00, 0xff, 0xff, 0x8e, 0x47, 0x2d, 0x41, 0xaa, 0x02, 0x00, 0x00,
}

View File

@ -1,129 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/auth.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var (
filter_AuthService_Authorize_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_AuthService_Authorize_0(ctx context.Context, marshaler runtime.Marshaler, client AuthServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq AuthorizeRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_AuthService_Authorize_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Authorize(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterAuthServiceHandlerFromEndpoint is same as RegisterAuthServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterAuthServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterAuthServiceHandler(ctx, mux, conn)
}
// RegisterAuthServiceHandler registers the http handlers for service AuthService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterAuthServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterAuthServiceHandlerClient(ctx, mux, NewAuthServiceClient(conn))
}
// RegisterAuthServiceHandlerClient registers the http handlers for service AuthService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AuthServiceClient" to call the correct interceptors.
func RegisterAuthServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AuthServiceClient) error {
mux.Handle("GET", pattern_AuthService_Authorize_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_AuthService_Authorize_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_AuthService_Authorize_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_AuthService_Authorize_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "auth"}, ""))
)
var (
forward_AuthService_Authorize_0 = runtime.ForwardResponseMessage
)

View File

@ -1,160 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/error.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import any "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Error struct {
ErrorMessage string `protobuf:"bytes,1,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
ErrorDetails string `protobuf:"bytes,2,opt,name=error_details,json=errorDetails,proto3" json:"error_details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Error) Reset() { *m = Error{} }
func (m *Error) String() string { return proto.CompactTextString(m) }
func (*Error) ProtoMessage() {}
func (*Error) Descriptor() ([]byte, []int) {
return fileDescriptor_error_c958bf85e2691313, []int{0}
}
func (m *Error) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Error.Unmarshal(m, b)
}
func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Error.Marshal(b, m, deterministic)
}
func (dst *Error) XXX_Merge(src proto.Message) {
xxx_messageInfo_Error.Merge(dst, src)
}
func (m *Error) XXX_Size() int {
return xxx_messageInfo_Error.Size(m)
}
func (m *Error) XXX_DiscardUnknown() {
xxx_messageInfo_Error.DiscardUnknown(m)
}
var xxx_messageInfo_Error proto.InternalMessageInfo
func (m *Error) GetErrorMessage() string {
if m != nil {
return m.ErrorMessage
}
return ""
}
func (m *Error) GetErrorDetails() string {
if m != nil {
return m.ErrorDetails
}
return ""
}
type Status struct {
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Status) Reset() { *m = Status{} }
func (m *Status) String() string { return proto.CompactTextString(m) }
func (*Status) ProtoMessage() {}
func (*Status) Descriptor() ([]byte, []int) {
return fileDescriptor_error_c958bf85e2691313, []int{1}
}
func (m *Status) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Status.Unmarshal(m, b)
}
func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Status.Marshal(b, m, deterministic)
}
func (dst *Status) XXX_Merge(src proto.Message) {
xxx_messageInfo_Status.Merge(dst, src)
}
func (m *Status) XXX_Size() int {
return xxx_messageInfo_Status.Size(m)
}
func (m *Status) XXX_DiscardUnknown() {
xxx_messageInfo_Status.DiscardUnknown(m)
}
var xxx_messageInfo_Status proto.InternalMessageInfo
func (m *Status) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func (m *Status) GetCode() int32 {
if m != nil {
return m.Code
}
return 0
}
func (m *Status) GetDetails() []*any.Any {
if m != nil {
return m.Details
}
return nil
}
func init() {
proto.RegisterType((*Error)(nil), "api.Error")
proto.RegisterType((*Status)(nil), "api.Status")
}
func init() { proto.RegisterFile("backend/api/error.proto", fileDescriptor_error_c958bf85e2691313) }
var fileDescriptor_error_c958bf85e2691313 = []byte{
// 228 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0x4d, 0x4b, 0x03, 0x31,
0x10, 0x86, 0xa9, 0xeb, 0x56, 0x8c, 0x7a, 0x09, 0x05, 0xab, 0xa7, 0x52, 0x2f, 0x3d, 0x25, 0x60,
0xf1, 0x07, 0x28, 0x7a, 0xf4, 0xe0, 0x7a, 0xf3, 0x52, 0x92, 0xec, 0x34, 0x86, 0xa6, 0x99, 0x90,
0x0f, 0x64, 0xff, 0xbd, 0x98, 0xb8, 0x74, 0x6f, 0xc9, 0x33, 0xcf, 0xbc, 0xbc, 0x0c, 0xb9, 0x95,
0x42, 0x1d, 0xc0, 0xf5, 0x5c, 0x78, 0xc3, 0x21, 0x04, 0x0c, 0xcc, 0x07, 0x4c, 0x48, 0x1b, 0xe1,
0xcd, 0xfd, 0x9d, 0x46, 0xd4, 0x16, 0x78, 0x41, 0x32, 0xef, 0xb9, 0x70, 0x43, 0x9d, 0xaf, 0x3f,
0x48, 0xfb, 0xf6, 0xa7, 0xd3, 0x07, 0x72, 0x53, 0xf6, 0x76, 0x47, 0x88, 0x51, 0x68, 0x58, 0xce,
0x56, 0xb3, 0xcd, 0x65, 0x77, 0x5d, 0xe0, 0x7b, 0x65, 0x27, 0xa9, 0x87, 0x24, 0x8c, 0x8d, 0xcb,
0xb3, 0x89, 0xf4, 0x5a, 0xd9, 0x5a, 0x92, 0xf9, 0x67, 0x12, 0x29, 0x47, 0xba, 0x20, 0x6d, 0x99,
0xfc, 0x67, 0xd5, 0x0f, 0xa5, 0xe4, 0x5c, 0x61, 0x0f, 0x65, 0xb7, 0xed, 0xca, 0x9b, 0x32, 0x72,
0x31, 0x46, 0x36, 0xab, 0x66, 0x73, 0xf5, 0xb8, 0x60, 0xb5, 0x33, 0x1b, 0x3b, 0xb3, 0x67, 0x37,
0x74, 0xa3, 0xf4, 0xf2, 0xf4, 0xb5, 0xd5, 0x26, 0x7d, 0x67, 0xc9, 0x14, 0x1e, 0xf9, 0x21, 0x4b,
0xd8, 0x5b, 0xfc, 0xe1, 0xde, 0x78, 0xb0, 0xc6, 0x41, 0xe4, 0xd3, 0x7b, 0x68, 0xdc, 0x29, 0x6b,
0xc0, 0x25, 0x39, 0x2f, 0x69, 0xdb, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x2e, 0x20, 0xa9,
0x2f, 0x01, 0x00, 0x00,
}

View File

@ -1,17 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
package ignore

View File

@ -1,773 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/experiment.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import empty "github.com/golang/protobuf/ptypes/empty"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Experiment_StorageState int32
const (
Experiment_STORAGESTATE_UNSPECIFIED Experiment_StorageState = 0
Experiment_STORAGESTATE_AVAILABLE Experiment_StorageState = 1
Experiment_STORAGESTATE_ARCHIVED Experiment_StorageState = 2
)
var Experiment_StorageState_name = map[int32]string{
0: "STORAGESTATE_UNSPECIFIED",
1: "STORAGESTATE_AVAILABLE",
2: "STORAGESTATE_ARCHIVED",
}
var Experiment_StorageState_value = map[string]int32{
"STORAGESTATE_UNSPECIFIED": 0,
"STORAGESTATE_AVAILABLE": 1,
"STORAGESTATE_ARCHIVED": 2,
}
func (x Experiment_StorageState) String() string {
return proto.EnumName(Experiment_StorageState_name, int32(x))
}
func (Experiment_StorageState) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{5, 0}
}
type CreateExperimentRequest struct {
Experiment *Experiment `protobuf:"bytes,1,opt,name=experiment,proto3" json:"experiment,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateExperimentRequest) Reset() { *m = CreateExperimentRequest{} }
func (m *CreateExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*CreateExperimentRequest) ProtoMessage() {}
func (*CreateExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{0}
}
func (m *CreateExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateExperimentRequest.Unmarshal(m, b)
}
func (m *CreateExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateExperimentRequest.Marshal(b, m, deterministic)
}
func (dst *CreateExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateExperimentRequest.Merge(dst, src)
}
func (m *CreateExperimentRequest) XXX_Size() int {
return xxx_messageInfo_CreateExperimentRequest.Size(m)
}
func (m *CreateExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateExperimentRequest proto.InternalMessageInfo
func (m *CreateExperimentRequest) GetExperiment() *Experiment {
if m != nil {
return m.Experiment
}
return nil
}
type GetExperimentRequest struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetExperimentRequest) Reset() { *m = GetExperimentRequest{} }
func (m *GetExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*GetExperimentRequest) ProtoMessage() {}
func (*GetExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{1}
}
func (m *GetExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetExperimentRequest.Unmarshal(m, b)
}
func (m *GetExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetExperimentRequest.Marshal(b, m, deterministic)
}
func (dst *GetExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetExperimentRequest.Merge(dst, src)
}
func (m *GetExperimentRequest) XXX_Size() int {
return xxx_messageInfo_GetExperimentRequest.Size(m)
}
func (m *GetExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetExperimentRequest proto.InternalMessageInfo
func (m *GetExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type ListExperimentsRequest struct {
PageToken string `protobuf:"bytes,1,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
SortBy string `protobuf:"bytes,3,opt,name=sort_by,json=sortBy,proto3" json:"sort_by,omitempty"`
Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
ResourceReferenceKey *ResourceKey `protobuf:"bytes,5,opt,name=resource_reference_key,json=resourceReferenceKey,proto3" json:"resource_reference_key,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListExperimentsRequest) Reset() { *m = ListExperimentsRequest{} }
func (m *ListExperimentsRequest) String() string { return proto.CompactTextString(m) }
func (*ListExperimentsRequest) ProtoMessage() {}
func (*ListExperimentsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{2}
}
func (m *ListExperimentsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListExperimentsRequest.Unmarshal(m, b)
}
func (m *ListExperimentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListExperimentsRequest.Marshal(b, m, deterministic)
}
func (dst *ListExperimentsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListExperimentsRequest.Merge(dst, src)
}
func (m *ListExperimentsRequest) XXX_Size() int {
return xxx_messageInfo_ListExperimentsRequest.Size(m)
}
func (m *ListExperimentsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListExperimentsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListExperimentsRequest proto.InternalMessageInfo
func (m *ListExperimentsRequest) GetPageToken() string {
if m != nil {
return m.PageToken
}
return ""
}
func (m *ListExperimentsRequest) GetPageSize() int32 {
if m != nil {
return m.PageSize
}
return 0
}
func (m *ListExperimentsRequest) GetSortBy() string {
if m != nil {
return m.SortBy
}
return ""
}
func (m *ListExperimentsRequest) GetFilter() string {
if m != nil {
return m.Filter
}
return ""
}
func (m *ListExperimentsRequest) GetResourceReferenceKey() *ResourceKey {
if m != nil {
return m.ResourceReferenceKey
}
return nil
}
type ListExperimentsResponse struct {
Experiments []*Experiment `protobuf:"bytes,1,rep,name=experiments,proto3" json:"experiments,omitempty"`
TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListExperimentsResponse) Reset() { *m = ListExperimentsResponse{} }
func (m *ListExperimentsResponse) String() string { return proto.CompactTextString(m) }
func (*ListExperimentsResponse) ProtoMessage() {}
func (*ListExperimentsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{3}
}
func (m *ListExperimentsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListExperimentsResponse.Unmarshal(m, b)
}
func (m *ListExperimentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListExperimentsResponse.Marshal(b, m, deterministic)
}
func (dst *ListExperimentsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListExperimentsResponse.Merge(dst, src)
}
func (m *ListExperimentsResponse) XXX_Size() int {
return xxx_messageInfo_ListExperimentsResponse.Size(m)
}
func (m *ListExperimentsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListExperimentsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListExperimentsResponse proto.InternalMessageInfo
func (m *ListExperimentsResponse) GetExperiments() []*Experiment {
if m != nil {
return m.Experiments
}
return nil
}
func (m *ListExperimentsResponse) GetTotalSize() int32 {
if m != nil {
return m.TotalSize
}
return 0
}
func (m *ListExperimentsResponse) GetNextPageToken() string {
if m != nil {
return m.NextPageToken
}
return ""
}
type DeleteExperimentRequest struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DeleteExperimentRequest) Reset() { *m = DeleteExperimentRequest{} }
func (m *DeleteExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*DeleteExperimentRequest) ProtoMessage() {}
func (*DeleteExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{4}
}
func (m *DeleteExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DeleteExperimentRequest.Unmarshal(m, b)
}
func (m *DeleteExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DeleteExperimentRequest.Marshal(b, m, deterministic)
}
func (dst *DeleteExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DeleteExperimentRequest.Merge(dst, src)
}
func (m *DeleteExperimentRequest) XXX_Size() int {
return xxx_messageInfo_DeleteExperimentRequest.Size(m)
}
func (m *DeleteExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DeleteExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DeleteExperimentRequest proto.InternalMessageInfo
func (m *DeleteExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type Experiment struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
CreatedAt *timestamp.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"`
ResourceReferences []*ResourceReference `protobuf:"bytes,5,rep,name=resource_references,json=resourceReferences,proto3" json:"resource_references,omitempty"`
StorageState Experiment_StorageState `protobuf:"varint,6,opt,name=storage_state,json=storageState,proto3,enum=api.Experiment_StorageState" json:"storage_state,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Experiment) Reset() { *m = Experiment{} }
func (m *Experiment) String() string { return proto.CompactTextString(m) }
func (*Experiment) ProtoMessage() {}
func (*Experiment) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{5}
}
func (m *Experiment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Experiment.Unmarshal(m, b)
}
func (m *Experiment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Experiment.Marshal(b, m, deterministic)
}
func (dst *Experiment) XXX_Merge(src proto.Message) {
xxx_messageInfo_Experiment.Merge(dst, src)
}
func (m *Experiment) XXX_Size() int {
return xxx_messageInfo_Experiment.Size(m)
}
func (m *Experiment) XXX_DiscardUnknown() {
xxx_messageInfo_Experiment.DiscardUnknown(m)
}
var xxx_messageInfo_Experiment proto.InternalMessageInfo
func (m *Experiment) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Experiment) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Experiment) GetDescription() string {
if m != nil {
return m.Description
}
return ""
}
func (m *Experiment) GetCreatedAt() *timestamp.Timestamp {
if m != nil {
return m.CreatedAt
}
return nil
}
func (m *Experiment) GetResourceReferences() []*ResourceReference {
if m != nil {
return m.ResourceReferences
}
return nil
}
func (m *Experiment) GetStorageState() Experiment_StorageState {
if m != nil {
return m.StorageState
}
return Experiment_STORAGESTATE_UNSPECIFIED
}
type ArchiveExperimentRequest struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ArchiveExperimentRequest) Reset() { *m = ArchiveExperimentRequest{} }
func (m *ArchiveExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*ArchiveExperimentRequest) ProtoMessage() {}
func (*ArchiveExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{6}
}
func (m *ArchiveExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ArchiveExperimentRequest.Unmarshal(m, b)
}
func (m *ArchiveExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ArchiveExperimentRequest.Marshal(b, m, deterministic)
}
func (dst *ArchiveExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ArchiveExperimentRequest.Merge(dst, src)
}
func (m *ArchiveExperimentRequest) XXX_Size() int {
return xxx_messageInfo_ArchiveExperimentRequest.Size(m)
}
func (m *ArchiveExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ArchiveExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ArchiveExperimentRequest proto.InternalMessageInfo
func (m *ArchiveExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type UnarchiveExperimentRequest struct {
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UnarchiveExperimentRequest) Reset() { *m = UnarchiveExperimentRequest{} }
func (m *UnarchiveExperimentRequest) String() string { return proto.CompactTextString(m) }
func (*UnarchiveExperimentRequest) ProtoMessage() {}
func (*UnarchiveExperimentRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_experiment_b177a3d23629b3a5, []int{7}
}
func (m *UnarchiveExperimentRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UnarchiveExperimentRequest.Unmarshal(m, b)
}
func (m *UnarchiveExperimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UnarchiveExperimentRequest.Marshal(b, m, deterministic)
}
func (dst *UnarchiveExperimentRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_UnarchiveExperimentRequest.Merge(dst, src)
}
func (m *UnarchiveExperimentRequest) XXX_Size() int {
return xxx_messageInfo_UnarchiveExperimentRequest.Size(m)
}
func (m *UnarchiveExperimentRequest) XXX_DiscardUnknown() {
xxx_messageInfo_UnarchiveExperimentRequest.DiscardUnknown(m)
}
var xxx_messageInfo_UnarchiveExperimentRequest proto.InternalMessageInfo
func (m *UnarchiveExperimentRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func init() {
proto.RegisterType((*CreateExperimentRequest)(nil), "api.CreateExperimentRequest")
proto.RegisterType((*GetExperimentRequest)(nil), "api.GetExperimentRequest")
proto.RegisterType((*ListExperimentsRequest)(nil), "api.ListExperimentsRequest")
proto.RegisterType((*ListExperimentsResponse)(nil), "api.ListExperimentsResponse")
proto.RegisterType((*DeleteExperimentRequest)(nil), "api.DeleteExperimentRequest")
proto.RegisterType((*Experiment)(nil), "api.Experiment")
proto.RegisterType((*ArchiveExperimentRequest)(nil), "api.ArchiveExperimentRequest")
proto.RegisterType((*UnarchiveExperimentRequest)(nil), "api.UnarchiveExperimentRequest")
proto.RegisterEnum("api.Experiment_StorageState", Experiment_StorageState_name, Experiment_StorageState_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ExperimentServiceClient is the client API for ExperimentService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ExperimentServiceClient interface {
CreateExperiment(ctx context.Context, in *CreateExperimentRequest, opts ...grpc.CallOption) (*Experiment, error)
GetExperiment(ctx context.Context, in *GetExperimentRequest, opts ...grpc.CallOption) (*Experiment, error)
ListExperiment(ctx context.Context, in *ListExperimentsRequest, opts ...grpc.CallOption) (*ListExperimentsResponse, error)
DeleteExperiment(ctx context.Context, in *DeleteExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error)
ArchiveExperiment(ctx context.Context, in *ArchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error)
UnarchiveExperiment(ctx context.Context, in *UnarchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}
type experimentServiceClient struct {
cc *grpc.ClientConn
}
func NewExperimentServiceClient(cc *grpc.ClientConn) ExperimentServiceClient {
return &experimentServiceClient{cc}
}
func (c *experimentServiceClient) CreateExperiment(ctx context.Context, in *CreateExperimentRequest, opts ...grpc.CallOption) (*Experiment, error) {
out := new(Experiment)
err := c.cc.Invoke(ctx, "/api.ExperimentService/CreateExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) GetExperiment(ctx context.Context, in *GetExperimentRequest, opts ...grpc.CallOption) (*Experiment, error) {
out := new(Experiment)
err := c.cc.Invoke(ctx, "/api.ExperimentService/GetExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) ListExperiment(ctx context.Context, in *ListExperimentsRequest, opts ...grpc.CallOption) (*ListExperimentsResponse, error) {
out := new(ListExperimentsResponse)
err := c.cc.Invoke(ctx, "/api.ExperimentService/ListExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) DeleteExperiment(ctx context.Context, in *DeleteExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ExperimentService/DeleteExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) ArchiveExperiment(ctx context.Context, in *ArchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ExperimentService/ArchiveExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *experimentServiceClient) UnarchiveExperiment(ctx context.Context, in *UnarchiveExperimentRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ExperimentService/UnarchiveExperiment", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ExperimentServiceServer is the server API for ExperimentService service.
type ExperimentServiceServer interface {
CreateExperiment(context.Context, *CreateExperimentRequest) (*Experiment, error)
GetExperiment(context.Context, *GetExperimentRequest) (*Experiment, error)
ListExperiment(context.Context, *ListExperimentsRequest) (*ListExperimentsResponse, error)
DeleteExperiment(context.Context, *DeleteExperimentRequest) (*empty.Empty, error)
ArchiveExperiment(context.Context, *ArchiveExperimentRequest) (*empty.Empty, error)
UnarchiveExperiment(context.Context, *UnarchiveExperimentRequest) (*empty.Empty, error)
}
func RegisterExperimentServiceServer(s *grpc.Server, srv ExperimentServiceServer) {
s.RegisterService(&_ExperimentService_serviceDesc, srv)
}
func _ExperimentService_CreateExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).CreateExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/CreateExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).CreateExperiment(ctx, req.(*CreateExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_GetExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).GetExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/GetExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).GetExperiment(ctx, req.(*GetExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_ListExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListExperimentsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).ListExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/ListExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).ListExperiment(ctx, req.(*ListExperimentsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_DeleteExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).DeleteExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/DeleteExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).DeleteExperiment(ctx, req.(*DeleteExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_ArchiveExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ArchiveExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).ArchiveExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/ArchiveExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).ArchiveExperiment(ctx, req.(*ArchiveExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExperimentService_UnarchiveExperiment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UnarchiveExperimentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExperimentServiceServer).UnarchiveExperiment(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ExperimentService/UnarchiveExperiment",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExperimentServiceServer).UnarchiveExperiment(ctx, req.(*UnarchiveExperimentRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ExperimentService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.ExperimentService",
HandlerType: (*ExperimentServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateExperiment",
Handler: _ExperimentService_CreateExperiment_Handler,
},
{
MethodName: "GetExperiment",
Handler: _ExperimentService_GetExperiment_Handler,
},
{
MethodName: "ListExperiment",
Handler: _ExperimentService_ListExperiment_Handler,
},
{
MethodName: "DeleteExperiment",
Handler: _ExperimentService_DeleteExperiment_Handler,
},
{
MethodName: "ArchiveExperiment",
Handler: _ExperimentService_ArchiveExperiment_Handler,
},
{
MethodName: "UnarchiveExperiment",
Handler: _ExperimentService_UnarchiveExperiment_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/experiment.proto",
}
func init() {
proto.RegisterFile("backend/api/experiment.proto", fileDescriptor_experiment_b177a3d23629b3a5)
}
var fileDescriptor_experiment_b177a3d23629b3a5 = []byte{
// 889 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x51, 0x53, 0xdb, 0x46,
0x10, 0x8e, 0x4c, 0x70, 0xc2, 0x1a, 0x83, 0x39, 0x52, 0x5b, 0x08, 0x53, 0x5c, 0x4d, 0x87, 0xba,
0x4c, 0xb0, 0x0a, 0x79, 0x6a, 0xde, 0x0c, 0x38, 0x94, 0x86, 0xb6, 0x19, 0xd9, 0xc9, 0x43, 0x5e,
0x3c, 0x67, 0x79, 0x6d, 0x6e, 0xb0, 0x75, 0xea, 0xdd, 0x89, 0xc4, 0x74, 0x3a, 0xd3, 0xe9, 0x4c,
0xff, 0x40, 0xf3, 0xb3, 0x3a, 0x7d, 0xea, 0x5f, 0xe8, 0xef, 0xe8, 0x74, 0x74, 0x96, 0x41, 0xb6,
0xec, 0x84, 0x27, 0xb8, 0xdd, 0xcf, 0xb7, 0xf7, 0x7d, 0xfb, 0xed, 0x0a, 0xca, 0x1d, 0xea, 0x5d,
0xa1, 0xdf, 0x75, 0x68, 0xc0, 0x1c, 0x7c, 0x1f, 0xa0, 0x60, 0x43, 0xf4, 0x55, 0x2d, 0x10, 0x5c,
0x71, 0xb2, 0x44, 0x03, 0x66, 0x95, 0xa6, 0x20, 0x42, 0x70, 0x31, 0xce, 0x5a, 0x5f, 0x26, 0x13,
0x02, 0x25, 0x0f, 0x85, 0x87, 0x6d, 0x81, 0x3d, 0x14, 0xe8, 0x7b, 0x18, 0xa3, 0xca, 0x7d, 0xce,
0xfb, 0x03, 0xd4, 0x20, 0xea, 0xfb, 0x5c, 0x51, 0xc5, 0xb8, 0x2f, 0xe3, 0xec, 0x76, 0x9c, 0xd5,
0xa7, 0x4e, 0xd8, 0x73, 0x70, 0x18, 0xa8, 0x51, 0x9c, 0xdc, 0x9d, 0x4d, 0x2a, 0x36, 0x44, 0xa9,
0xe8, 0x30, 0x88, 0x01, 0x4f, 0xf5, 0x1f, 0xef, 0xa0, 0x8f, 0xfe, 0x81, 0x7c, 0x47, 0xfb, 0x7d,
0x14, 0x0e, 0x0f, 0xf4, 0xfd, 0xe9, 0x5a, 0xf6, 0xf7, 0x50, 0x3a, 0x11, 0x48, 0x15, 0x36, 0x6e,
0x79, 0xba, 0xf8, 0x73, 0x88, 0x52, 0x11, 0x07, 0xe0, 0x8e, 0xbc, 0x69, 0x54, 0x8c, 0x6a, 0xee,
0x68, 0xbd, 0x46, 0x03, 0x56, 0x4b, 0x60, 0x13, 0x10, 0x7b, 0x0f, 0x9e, 0x9c, 0xa1, 0x4a, 0x5f,
0xb4, 0x06, 0x19, 0xd6, 0xd5, 0x17, 0xac, 0xb8, 0x19, 0xd6, 0xb5, 0xff, 0x36, 0xa0, 0x78, 0xc1,
0x64, 0x02, 0x29, 0x27, 0xd0, 0x1d, 0x80, 0x80, 0xf6, 0xb1, 0xad, 0xf8, 0x15, 0xfa, 0xf1, 0x4f,
0x56, 0xa2, 0x48, 0x2b, 0x0a, 0x90, 0x6d, 0xd0, 0x87, 0xb6, 0x64, 0x37, 0x68, 0x66, 0x2a, 0x46,
0x75, 0xd9, 0x7d, 0x1c, 0x05, 0x9a, 0xec, 0x06, 0x49, 0x09, 0x1e, 0x49, 0x2e, 0x54, 0xbb, 0x33,
0x32, 0x97, 0xf4, 0x0f, 0xb3, 0xd1, 0xf1, 0x78, 0x44, 0x8a, 0x90, 0xed, 0xb1, 0x81, 0x42, 0x61,
0x3e, 0x1c, 0xc7, 0xc7, 0x27, 0xf2, 0x02, 0x8a, 0xe9, 0x0e, 0xb5, 0xaf, 0x70, 0x64, 0x2e, 0x6b,
0xb2, 0x05, 0x4d, 0xd6, 0x8d, 0x21, 0x2f, 0x71, 0xe4, 0x3e, 0x99, 0xe0, 0xdd, 0x09, 0xfc, 0x25,
0x8e, 0xec, 0x0f, 0x06, 0x94, 0x52, 0x7c, 0x64, 0xc0, 0x7d, 0x89, 0xe4, 0x10, 0x72, 0x77, 0x0a,
0x49, 0xd3, 0xa8, 0x2c, 0xcd, 0x53, 0x31, 0x89, 0x89, 0x34, 0x50, 0x5c, 0xd1, 0xc1, 0x98, 0xe5,
0x92, 0x66, 0xb9, 0xa2, 0x23, 0x9a, 0xe6, 0x1e, 0xac, 0xfb, 0xf8, 0x5e, 0xb5, 0x13, 0x3a, 0x65,
0x34, 0xad, 0x7c, 0x14, 0x7e, 0x35, 0xd1, 0xca, 0xfe, 0x1a, 0x4a, 0xa7, 0x38, 0xc0, 0x79, 0x9d,
0x9d, 0x6d, 0xc8, 0x7f, 0x19, 0x80, 0x3b, 0xd4, 0x6c, 0x9a, 0x10, 0x78, 0xe8, 0xd3, 0x21, 0xc6,
0x65, 0xf4, 0xff, 0xa4, 0x02, 0xb9, 0x2e, 0x4a, 0x4f, 0x30, 0xed, 0xac, 0x58, 0xf0, 0x64, 0x88,
0x7c, 0x0b, 0xe0, 0x69, 0x67, 0x75, 0xdb, 0x54, 0x69, 0xe5, 0x73, 0x47, 0x56, 0x6d, 0xec, 0xde,
0xda, 0xc4, 0xbd, 0xb5, 0xd6, 0xc4, 0xbd, 0xee, 0x4a, 0x8c, 0xae, 0x2b, 0x72, 0x06, 0x9b, 0xe9,
0xc6, 0x48, 0x73, 0x59, 0x8b, 0x57, 0x9c, 0xea, 0xca, 0x6d, 0x23, 0x5c, 0x92, 0xea, 0x8d, 0x24,
0x75, 0xc8, 0x4b, 0xc5, 0x85, 0xb6, 0x8c, 0xa2, 0x0a, 0xcd, 0x6c, 0xc5, 0xa8, 0xae, 0x1d, 0x95,
0x67, 0xf4, 0xaf, 0x35, 0xc7, 0xa0, 0x66, 0x84, 0x71, 0x57, 0x65, 0xe2, 0x64, 0x7b, 0xb0, 0x9a,
0xcc, 0x92, 0x32, 0x98, 0xcd, 0xd6, 0x4f, 0x6e, 0xfd, 0xac, 0xd1, 0x6c, 0xd5, 0x5b, 0x8d, 0xf6,
0xeb, 0x1f, 0x9b, 0xaf, 0x1a, 0x27, 0xe7, 0x2f, 0xce, 0x1b, 0xa7, 0x85, 0x07, 0xc4, 0x82, 0xe2,
0x54, 0xb6, 0xfe, 0xa6, 0x7e, 0x7e, 0x51, 0x3f, 0xbe, 0x68, 0x14, 0x0c, 0xb2, 0x05, 0x9f, 0x4d,
0xe7, 0xdc, 0x93, 0xef, 0xce, 0xdf, 0x34, 0x4e, 0x0b, 0x19, 0x7b, 0x1f, 0xcc, 0xba, 0xf0, 0x2e,
0xd9, 0xf5, 0x3d, 0x9a, 0xf5, 0x14, 0xac, 0xd7, 0x3e, 0xbd, 0x27, 0xfa, 0xe8, 0xaf, 0x65, 0xd8,
0xb8, 0x43, 0x35, 0x51, 0x5c, 0x33, 0x0f, 0x49, 0x00, 0x85, 0xd9, 0xa9, 0x27, 0x63, 0x51, 0x16,
0x2c, 0x03, 0x6b, 0xd6, 0xb2, 0xf6, 0xc1, 0xef, 0xff, 0xfc, 0xfb, 0x21, 0xf3, 0x95, 0xbd, 0x15,
0x2d, 0x31, 0xe9, 0x5c, 0x1f, 0x76, 0x50, 0xd1, 0xc3, 0xc4, 0xba, 0x94, 0xcf, 0x13, 0xbb, 0x81,
0x78, 0x90, 0x9f, 0xda, 0x0d, 0x64, 0x4b, 0x5f, 0x38, 0x6f, 0x5f, 0xa4, 0x6b, 0xed, 0xe9, 0x5a,
0x15, 0xf2, 0xf9, 0xc2, 0x5a, 0xce, 0x2f, 0xac, 0xfb, 0x2b, 0xf1, 0x61, 0x6d, 0x7a, 0x0e, 0xc9,
0xb6, 0xbe, 0x6a, 0xfe, 0xb2, 0xb1, 0xca, 0xf3, 0x93, 0xe3, 0xc9, 0xb5, 0xbf, 0xd0, 0x45, 0xb7,
0xc9, 0x62, 0x82, 0x91, 0x8c, 0xb3, 0x23, 0x16, 0xcb, 0xb8, 0x60, 0xf2, 0xac, 0x62, 0x6a, 0x00,
0x1a, 0xd1, 0x6e, 0x9f, 0x30, 0xdc, 0xff, 0x14, 0xc3, 0x1b, 0xd8, 0x48, 0x19, 0x85, 0xec, 0xe8,
0x92, 0x8b, 0x0c, 0xb4, 0xb0, 0x66, 0x4d, 0xd7, 0xac, 0xda, 0x7b, 0x1f, 0xaf, 0xf9, 0x3c, 0xf6,
0x1a, 0xf9, 0xcd, 0x80, 0xcd, 0x39, 0xce, 0x23, 0xbb, 0xba, 0xfc, 0x62, 0x4f, 0x2e, 0x7c, 0xc0,
0x37, 0xfa, 0x01, 0xfb, 0x76, 0xf5, 0x13, 0x0f, 0x08, 0x27, 0x57, 0x1f, 0xff, 0x61, 0xfc, 0x59,
0xff, 0xc1, 0x2d, 0xc3, 0xa3, 0x2e, 0xf6, 0x68, 0x38, 0x50, 0x64, 0x83, 0xac, 0x43, 0xde, 0xca,
0xe9, 0x17, 0x44, 0xf3, 0x19, 0xca, 0xb7, 0xbb, 0xb0, 0x03, 0xd9, 0x63, 0xa4, 0x02, 0x05, 0xd9,
0x7c, 0x9c, 0xb1, 0xf2, 0x34, 0x54, 0x97, 0x5c, 0xb0, 0x1b, 0xfd, 0xdd, 0xab, 0x64, 0x3a, 0xab,
0x00, 0xb7, 0x80, 0x07, 0x6f, 0x9f, 0xf5, 0x99, 0xba, 0x0c, 0x3b, 0x35, 0x8f, 0x0f, 0x9d, 0xab,
0xb0, 0x83, 0xbd, 0x01, 0x7f, 0xe7, 0x04, 0x2c, 0xc0, 0x01, 0xf3, 0x51, 0x3a, 0xc9, 0xcf, 0x79,
0x9f, 0xb7, 0xbd, 0x01, 0x43, 0x5f, 0x75, 0xb2, 0x9a, 0xc9, 0xb3, 0xff, 0x03, 0x00, 0x00, 0xff,
0xff, 0x0b, 0x8f, 0xfa, 0xad, 0x2a, 0x08, 0x00, 0x00,
}

View File

@ -1,374 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/experiment.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_ExperimentService_CreateExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateExperimentRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Experiment); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_GetExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_ExperimentService_ListExperiment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_ExperimentService_ListExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListExperimentsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_ExperimentService_ListExperiment_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_DeleteExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_ArchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ArchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.ArchiveExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ExperimentService_UnarchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq UnarchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.UnarchiveExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterExperimentServiceHandlerFromEndpoint is same as RegisterExperimentServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterExperimentServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterExperimentServiceHandler(ctx, mux, conn)
}
// RegisterExperimentServiceHandler registers the http handlers for service ExperimentService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterExperimentServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterExperimentServiceHandlerClient(ctx, mux, NewExperimentServiceClient(conn))
}
// RegisterExperimentServiceHandlerClient registers the http handlers for service ExperimentService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ExperimentServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ExperimentServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ExperimentServiceClient" to call the correct interceptors.
func RegisterExperimentServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ExperimentServiceClient) error {
mux.Handle("POST", pattern_ExperimentService_CreateExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_CreateExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_CreateExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_GetExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_GetExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_GetExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_ListExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_ListExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ListExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_ExperimentService_DeleteExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_DeleteExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_DeleteExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_ArchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_ArchiveExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ArchiveExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_UnarchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_UnarchiveExperiment_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_UnarchiveExperiment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_ExperimentService_CreateExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "experiments"}, ""))
pattern_ExperimentService_GetExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, ""))
pattern_ExperimentService_ListExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "experiments"}, ""))
pattern_ExperimentService_DeleteExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, ""))
pattern_ExperimentService_ArchiveExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, "archive"))
pattern_ExperimentService_UnarchiveExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "experiments", "id"}, "unarchive"))
)
var (
forward_ExperimentService_CreateExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_GetExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_ListExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_DeleteExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_ArchiveExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_UnarchiveExperiment_0 = runtime.ForwardResponseMessage
)

View File

@ -1,670 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/filter.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Predicate_Op int32
const (
Predicate_UNKNOWN Predicate_Op = 0
Predicate_EQUALS Predicate_Op = 1
Predicate_NOT_EQUALS Predicate_Op = 2
Predicate_GREATER_THAN Predicate_Op = 3
Predicate_GREATER_THAN_EQUALS Predicate_Op = 5
Predicate_LESS_THAN Predicate_Op = 6
Predicate_LESS_THAN_EQUALS Predicate_Op = 7
Predicate_IN Predicate_Op = 8
Predicate_IS_SUBSTRING Predicate_Op = 9
)
var Predicate_Op_name = map[int32]string{
0: "UNKNOWN",
1: "EQUALS",
2: "NOT_EQUALS",
3: "GREATER_THAN",
5: "GREATER_THAN_EQUALS",
6: "LESS_THAN",
7: "LESS_THAN_EQUALS",
8: "IN",
9: "IS_SUBSTRING",
}
var Predicate_Op_value = map[string]int32{
"UNKNOWN": 0,
"EQUALS": 1,
"NOT_EQUALS": 2,
"GREATER_THAN": 3,
"GREATER_THAN_EQUALS": 5,
"LESS_THAN": 6,
"LESS_THAN_EQUALS": 7,
"IN": 8,
"IS_SUBSTRING": 9,
}
func (x Predicate_Op) String() string {
return proto.EnumName(Predicate_Op_name, int32(x))
}
func (Predicate_Op) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_filter_117257288386a875, []int{0, 0}
}
type Predicate struct {
Op Predicate_Op `protobuf:"varint,1,opt,name=op,proto3,enum=api.Predicate_Op" json:"op,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
// Types that are valid to be assigned to Value:
// *Predicate_IntValue
// *Predicate_LongValue
// *Predicate_StringValue
// *Predicate_TimestampValue
// *Predicate_IntValues
// *Predicate_LongValues
// *Predicate_StringValues
Value isPredicate_Value `protobuf_oneof:"value"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Predicate) Reset() { *m = Predicate{} }
func (m *Predicate) String() string { return proto.CompactTextString(m) }
func (*Predicate) ProtoMessage() {}
func (*Predicate) Descriptor() ([]byte, []int) {
return fileDescriptor_filter_117257288386a875, []int{0}
}
func (m *Predicate) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Predicate.Unmarshal(m, b)
}
func (m *Predicate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Predicate.Marshal(b, m, deterministic)
}
func (dst *Predicate) XXX_Merge(src proto.Message) {
xxx_messageInfo_Predicate.Merge(dst, src)
}
func (m *Predicate) XXX_Size() int {
return xxx_messageInfo_Predicate.Size(m)
}
func (m *Predicate) XXX_DiscardUnknown() {
xxx_messageInfo_Predicate.DiscardUnknown(m)
}
var xxx_messageInfo_Predicate proto.InternalMessageInfo
func (m *Predicate) GetOp() Predicate_Op {
if m != nil {
return m.Op
}
return Predicate_UNKNOWN
}
func (m *Predicate) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
type isPredicate_Value interface {
isPredicate_Value()
}
type Predicate_IntValue struct {
IntValue int32 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
}
type Predicate_LongValue struct {
LongValue int64 `protobuf:"varint,4,opt,name=long_value,json=longValue,proto3,oneof"`
}
type Predicate_StringValue struct {
StringValue string `protobuf:"bytes,5,opt,name=string_value,json=stringValue,proto3,oneof"`
}
type Predicate_TimestampValue struct {
TimestampValue *timestamp.Timestamp `protobuf:"bytes,6,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
}
type Predicate_IntValues struct {
IntValues *IntValues `protobuf:"bytes,7,opt,name=int_values,json=intValues,proto3,oneof"`
}
type Predicate_LongValues struct {
LongValues *LongValues `protobuf:"bytes,8,opt,name=long_values,json=longValues,proto3,oneof"`
}
type Predicate_StringValues struct {
StringValues *StringValues `protobuf:"bytes,9,opt,name=string_values,json=stringValues,proto3,oneof"`
}
func (*Predicate_IntValue) isPredicate_Value() {}
func (*Predicate_LongValue) isPredicate_Value() {}
func (*Predicate_StringValue) isPredicate_Value() {}
func (*Predicate_TimestampValue) isPredicate_Value() {}
func (*Predicate_IntValues) isPredicate_Value() {}
func (*Predicate_LongValues) isPredicate_Value() {}
func (*Predicate_StringValues) isPredicate_Value() {}
func (m *Predicate) GetValue() isPredicate_Value {
if m != nil {
return m.Value
}
return nil
}
func (m *Predicate) GetIntValue() int32 {
if x, ok := m.GetValue().(*Predicate_IntValue); ok {
return x.IntValue
}
return 0
}
func (m *Predicate) GetLongValue() int64 {
if x, ok := m.GetValue().(*Predicate_LongValue); ok {
return x.LongValue
}
return 0
}
func (m *Predicate) GetStringValue() string {
if x, ok := m.GetValue().(*Predicate_StringValue); ok {
return x.StringValue
}
return ""
}
func (m *Predicate) GetTimestampValue() *timestamp.Timestamp {
if x, ok := m.GetValue().(*Predicate_TimestampValue); ok {
return x.TimestampValue
}
return nil
}
func (m *Predicate) GetIntValues() *IntValues {
if x, ok := m.GetValue().(*Predicate_IntValues); ok {
return x.IntValues
}
return nil
}
func (m *Predicate) GetLongValues() *LongValues {
if x, ok := m.GetValue().(*Predicate_LongValues); ok {
return x.LongValues
}
return nil
}
func (m *Predicate) GetStringValues() *StringValues {
if x, ok := m.GetValue().(*Predicate_StringValues); ok {
return x.StringValues
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Predicate) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Predicate_OneofMarshaler, _Predicate_OneofUnmarshaler, _Predicate_OneofSizer, []interface{}{
(*Predicate_IntValue)(nil),
(*Predicate_LongValue)(nil),
(*Predicate_StringValue)(nil),
(*Predicate_TimestampValue)(nil),
(*Predicate_IntValues)(nil),
(*Predicate_LongValues)(nil),
(*Predicate_StringValues)(nil),
}
}
func _Predicate_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Predicate)
// value
switch x := m.Value.(type) {
case *Predicate_IntValue:
b.EncodeVarint(3<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.IntValue))
case *Predicate_LongValue:
b.EncodeVarint(4<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.LongValue))
case *Predicate_StringValue:
b.EncodeVarint(5<<3 | proto.WireBytes)
b.EncodeStringBytes(x.StringValue)
case *Predicate_TimestampValue:
b.EncodeVarint(6<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.TimestampValue); err != nil {
return err
}
case *Predicate_IntValues:
b.EncodeVarint(7<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.IntValues); err != nil {
return err
}
case *Predicate_LongValues:
b.EncodeVarint(8<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.LongValues); err != nil {
return err
}
case *Predicate_StringValues:
b.EncodeVarint(9<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.StringValues); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Predicate.Value has unexpected type %T", x)
}
return nil
}
func _Predicate_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Predicate)
switch tag {
case 3: // value.int_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Predicate_IntValue{int32(x)}
return true, err
case 4: // value.long_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Value = &Predicate_LongValue{int64(x)}
return true, err
case 5: // value.string_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Value = &Predicate_StringValue{x}
return true, err
case 6: // value.timestamp_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(timestamp.Timestamp)
err := b.DecodeMessage(msg)
m.Value = &Predicate_TimestampValue{msg}
return true, err
case 7: // value.int_values
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(IntValues)
err := b.DecodeMessage(msg)
m.Value = &Predicate_IntValues{msg}
return true, err
case 8: // value.long_values
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(LongValues)
err := b.DecodeMessage(msg)
m.Value = &Predicate_LongValues{msg}
return true, err
case 9: // value.string_values
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(StringValues)
err := b.DecodeMessage(msg)
m.Value = &Predicate_StringValues{msg}
return true, err
default:
return false, nil
}
}
func _Predicate_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Predicate)
// value
switch x := m.Value.(type) {
case *Predicate_IntValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.IntValue))
case *Predicate_LongValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.LongValue))
case *Predicate_StringValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.StringValue)))
n += len(x.StringValue)
case *Predicate_TimestampValue:
s := proto.Size(x.TimestampValue)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *Predicate_IntValues:
s := proto.Size(x.IntValues)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *Predicate_LongValues:
s := proto.Size(x.LongValues)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *Predicate_StringValues:
s := proto.Size(x.StringValues)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type IntValues struct {
Values []int32 `protobuf:"varint,1,rep,packed,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IntValues) Reset() { *m = IntValues{} }
func (m *IntValues) String() string { return proto.CompactTextString(m) }
func (*IntValues) ProtoMessage() {}
func (*IntValues) Descriptor() ([]byte, []int) {
return fileDescriptor_filter_117257288386a875, []int{1}
}
func (m *IntValues) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IntValues.Unmarshal(m, b)
}
func (m *IntValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IntValues.Marshal(b, m, deterministic)
}
func (dst *IntValues) XXX_Merge(src proto.Message) {
xxx_messageInfo_IntValues.Merge(dst, src)
}
func (m *IntValues) XXX_Size() int {
return xxx_messageInfo_IntValues.Size(m)
}
func (m *IntValues) XXX_DiscardUnknown() {
xxx_messageInfo_IntValues.DiscardUnknown(m)
}
var xxx_messageInfo_IntValues proto.InternalMessageInfo
func (m *IntValues) GetValues() []int32 {
if m != nil {
return m.Values
}
return nil
}
type StringValues struct {
Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StringValues) Reset() { *m = StringValues{} }
func (m *StringValues) String() string { return proto.CompactTextString(m) }
func (*StringValues) ProtoMessage() {}
func (*StringValues) Descriptor() ([]byte, []int) {
return fileDescriptor_filter_117257288386a875, []int{2}
}
func (m *StringValues) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StringValues.Unmarshal(m, b)
}
func (m *StringValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StringValues.Marshal(b, m, deterministic)
}
func (dst *StringValues) XXX_Merge(src proto.Message) {
xxx_messageInfo_StringValues.Merge(dst, src)
}
func (m *StringValues) XXX_Size() int {
return xxx_messageInfo_StringValues.Size(m)
}
func (m *StringValues) XXX_DiscardUnknown() {
xxx_messageInfo_StringValues.DiscardUnknown(m)
}
var xxx_messageInfo_StringValues proto.InternalMessageInfo
func (m *StringValues) GetValues() []string {
if m != nil {
return m.Values
}
return nil
}
type LongValues struct {
Values []int64 `protobuf:"varint,3,rep,packed,name=values,proto3" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *LongValues) Reset() { *m = LongValues{} }
func (m *LongValues) String() string { return proto.CompactTextString(m) }
func (*LongValues) ProtoMessage() {}
func (*LongValues) Descriptor() ([]byte, []int) {
return fileDescriptor_filter_117257288386a875, []int{3}
}
func (m *LongValues) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_LongValues.Unmarshal(m, b)
}
func (m *LongValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_LongValues.Marshal(b, m, deterministic)
}
func (dst *LongValues) XXX_Merge(src proto.Message) {
xxx_messageInfo_LongValues.Merge(dst, src)
}
func (m *LongValues) XXX_Size() int {
return xxx_messageInfo_LongValues.Size(m)
}
func (m *LongValues) XXX_DiscardUnknown() {
xxx_messageInfo_LongValues.DiscardUnknown(m)
}
var xxx_messageInfo_LongValues proto.InternalMessageInfo
func (m *LongValues) GetValues() []int64 {
if m != nil {
return m.Values
}
return nil
}
type Filter struct {
Predicates []*Predicate `protobuf:"bytes,1,rep,name=predicates,proto3" json:"predicates,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Filter) Reset() { *m = Filter{} }
func (m *Filter) String() string { return proto.CompactTextString(m) }
func (*Filter) ProtoMessage() {}
func (*Filter) Descriptor() ([]byte, []int) {
return fileDescriptor_filter_117257288386a875, []int{4}
}
func (m *Filter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Filter.Unmarshal(m, b)
}
func (m *Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Filter.Marshal(b, m, deterministic)
}
func (dst *Filter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Filter.Merge(dst, src)
}
func (m *Filter) XXX_Size() int {
return xxx_messageInfo_Filter.Size(m)
}
func (m *Filter) XXX_DiscardUnknown() {
xxx_messageInfo_Filter.DiscardUnknown(m)
}
var xxx_messageInfo_Filter proto.InternalMessageInfo
func (m *Filter) GetPredicates() []*Predicate {
if m != nil {
return m.Predicates
}
return nil
}
func init() {
proto.RegisterType((*Predicate)(nil), "api.Predicate")
proto.RegisterType((*IntValues)(nil), "api.IntValues")
proto.RegisterType((*StringValues)(nil), "api.StringValues")
proto.RegisterType((*LongValues)(nil), "api.LongValues")
proto.RegisterType((*Filter)(nil), "api.Filter")
proto.RegisterEnum("api.Predicate_Op", Predicate_Op_name, Predicate_Op_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// DummyFilterServiceClient is the client API for DummyFilterService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DummyFilterServiceClient interface {
GetFilter(ctx context.Context, in *Filter, opts ...grpc.CallOption) (*Filter, error)
}
type dummyFilterServiceClient struct {
cc *grpc.ClientConn
}
func NewDummyFilterServiceClient(cc *grpc.ClientConn) DummyFilterServiceClient {
return &dummyFilterServiceClient{cc}
}
func (c *dummyFilterServiceClient) GetFilter(ctx context.Context, in *Filter, opts ...grpc.CallOption) (*Filter, error) {
out := new(Filter)
err := c.cc.Invoke(ctx, "/api.DummyFilterService/GetFilter", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DummyFilterServiceServer is the server API for DummyFilterService service.
type DummyFilterServiceServer interface {
GetFilter(context.Context, *Filter) (*Filter, error)
}
func RegisterDummyFilterServiceServer(s *grpc.Server, srv DummyFilterServiceServer) {
s.RegisterService(&_DummyFilterService_serviceDesc, srv)
}
func _DummyFilterService_GetFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Filter)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DummyFilterServiceServer).GetFilter(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.DummyFilterService/GetFilter",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DummyFilterServiceServer).GetFilter(ctx, req.(*Filter))
}
return interceptor(ctx, in, info, handler)
}
var _DummyFilterService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.DummyFilterService",
HandlerType: (*DummyFilterServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetFilter",
Handler: _DummyFilterService_GetFilter_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/filter.proto",
}
func init() { proto.RegisterFile("backend/api/filter.proto", fileDescriptor_filter_117257288386a875) }
var fileDescriptor_filter_117257288386a875 = []byte{
// 553 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x93, 0xdf, 0x8f, 0xd2, 0x40,
0x10, 0xc7, 0xfb, 0xe3, 0x28, 0xd7, 0x81, 0xe3, 0xea, 0x6a, 0xb4, 0x21, 0x9a, 0xab, 0x9c, 0xd1,
0x3e, 0xb5, 0x09, 0x17, 0x93, 0x7b, 0xf1, 0x01, 0x22, 0x02, 0x91, 0x14, 0x6d, 0x41, 0x13, 0x5f,
0x48, 0xe1, 0x16, 0xdc, 0x50, 0xba, 0x1b, 0xba, 0x9c, 0xb9, 0xbf, 0xc4, 0xff, 0xc2, 0xbf, 0xd1,
0xb4, 0xdb, 0x2e, 0x7d, 0xeb, 0xcc, 0x7c, 0xbe, 0xd3, 0xf9, 0x4e, 0xa7, 0x60, 0xaf, 0xe3, 0xcd,
0x1e, 0xa7, 0x0f, 0x7e, 0xcc, 0x88, 0xbf, 0x25, 0x09, 0xc7, 0x47, 0x8f, 0x1d, 0x29, 0xa7, 0x48,
0x8f, 0x19, 0xe9, 0xbe, 0xde, 0x51, 0xba, 0x4b, 0x70, 0x51, 0x8d, 0xd3, 0x94, 0xf2, 0x98, 0x13,
0x9a, 0x66, 0x02, 0xe9, 0xde, 0x94, 0xd5, 0x22, 0x5a, 0x9f, 0xb6, 0x3e, 0x27, 0x07, 0x9c, 0xf1,
0xf8, 0xc0, 0x04, 0xd0, 0xfb, 0x77, 0x01, 0xe6, 0xb7, 0x23, 0x7e, 0x20, 0x9b, 0x98, 0x63, 0xf4,
0x16, 0x34, 0xca, 0x6c, 0xd5, 0x51, 0xdd, 0x4e, 0xff, 0x99, 0x17, 0x33, 0xe2, 0xc9, 0x9a, 0x37,
0x67, 0xa1, 0x46, 0x19, 0xb2, 0x40, 0xdf, 0xe3, 0x27, 0x5b, 0x73, 0x54, 0xd7, 0x0c, 0xf3, 0x47,
0xf4, 0x06, 0x4c, 0x92, 0xf2, 0xd5, 0x63, 0x9c, 0x9c, 0xb0, 0xad, 0x3b, 0xaa, 0xdb, 0x98, 0x28,
0xe1, 0x25, 0x49, 0xf9, 0x8f, 0x3c, 0x83, 0x6e, 0x00, 0x12, 0x9a, 0xee, 0xca, 0xfa, 0x85, 0xa3,
0xba, 0xfa, 0x44, 0x09, 0xcd, 0x3c, 0x27, 0x80, 0x5b, 0x68, 0x67, 0xfc, 0x48, 0x24, 0xd2, 0xc8,
0x5b, 0x4f, 0x94, 0xb0, 0x25, 0xb2, 0x02, 0x1a, 0xc1, 0xb5, 0x1c, 0xbd, 0xe4, 0x0c, 0x47, 0x75,
0x5b, 0xfd, 0xae, 0x27, 0x2c, 0x7a, 0x95, 0x45, 0x6f, 0x51, 0x71, 0x13, 0x25, 0xec, 0x48, 0x91,
0x68, 0xe3, 0x03, 0xc8, 0x59, 0x33, 0xbb, 0x59, 0x74, 0xe8, 0x14, 0x46, 0xa7, 0xe5, 0xbc, 0x59,
0x3e, 0x5c, 0x35, 0x7c, 0x86, 0xfa, 0xd0, 0x3a, 0x4f, 0x9f, 0xd9, 0x97, 0x85, 0xe2, 0xba, 0x50,
0xcc, 0x2a, 0x07, 0xb9, 0x04, 0xa4, 0x9f, 0x0c, 0xdd, 0xc3, 0x55, 0xdd, 0x50, 0x66, 0x9b, 0x85,
0x4a, 0x2c, 0x34, 0x3a, 0x9b, 0xca, 0x75, 0xed, 0x9a, 0xc9, 0xac, 0xf7, 0x57, 0x05, 0x6d, 0xce,
0x50, 0x0b, 0x9a, 0xcb, 0xe0, 0x6b, 0x30, 0xff, 0x19, 0x58, 0x0a, 0x02, 0x30, 0x46, 0xdf, 0x97,
0x83, 0x59, 0x64, 0xa9, 0xa8, 0x03, 0x10, 0xcc, 0x17, 0xab, 0x32, 0xd6, 0x90, 0x05, 0xed, 0x71,
0x38, 0x1a, 0x2c, 0x46, 0xe1, 0x6a, 0x31, 0x19, 0x04, 0x96, 0x8e, 0x5e, 0xc1, 0xf3, 0x7a, 0xa6,
0x42, 0x1b, 0xe8, 0x0a, 0xcc, 0xd9, 0x28, 0x8a, 0x04, 0x67, 0xa0, 0x17, 0x60, 0xc9, 0xb0, 0x82,
0x9a, 0xc8, 0x00, 0x6d, 0x1a, 0x58, 0x97, 0x79, 0xdf, 0x69, 0xb4, 0x8a, 0x96, 0xc3, 0x68, 0x11,
0x4e, 0x83, 0xb1, 0x65, 0x0e, 0x9b, 0xd0, 0x28, 0xcc, 0xf4, 0x6e, 0xc1, 0x94, 0xab, 0x42, 0x2f,
0xc1, 0x28, 0x2d, 0xaa, 0x8e, 0xee, 0x36, 0xc2, 0x32, 0xea, 0xbd, 0x87, 0x76, 0xdd, 0x67, 0x8d,
0xd3, 0x1c, 0xdd, 0x35, 0x25, 0xf7, 0x0e, 0xe0, 0xbc, 0xc5, 0x1a, 0xa5, 0x3b, 0xba, 0xab, 0x4b,
0xea, 0x1e, 0x8c, 0x2f, 0xc5, 0xdd, 0x23, 0x0f, 0x80, 0x55, 0x07, 0x29, 0xde, 0x59, 0x7d, 0x3e,
0x79, 0xa7, 0x61, 0x8d, 0xe8, 0x7f, 0x02, 0xf4, 0xf9, 0x74, 0x38, 0x3c, 0x09, 0x79, 0x84, 0x8f,
0x8f, 0x64, 0x83, 0xd1, 0x07, 0x30, 0xc7, 0x98, 0x97, 0x2d, 0x5b, 0x85, 0x5c, 0x04, 0xdd, 0x7a,
0xd0, 0x53, 0x86, 0x1f, 0x7f, 0xdd, 0xed, 0x08, 0xff, 0x7d, 0x5a, 0x7b, 0x1b, 0x7a, 0xf0, 0xf7,
0xa7, 0x35, 0xde, 0x26, 0xf4, 0x8f, 0xcf, 0x08, 0xc3, 0x09, 0x49, 0x71, 0xe6, 0xd7, 0x7f, 0xcd,
0x1d, 0x5d, 0x6d, 0x12, 0x82, 0x53, 0xbe, 0x36, 0x8a, 0x53, 0xbc, 0xfb, 0x1f, 0x00, 0x00, 0xff,
0xff, 0xf8, 0x38, 0x00, 0xb0, 0xba, 0x03, 0x00, 0x00,
}

View File

@ -1,17 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
package ignore

File diff suppressed because it is too large Load Diff

View File

@ -1,374 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/job.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_JobService_CreateJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateJobRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Job); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_GetJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_JobService_ListJobs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_JobService_ListJobs_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListJobsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_JobService_ListJobs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListJobs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_EnableJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq EnableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.EnableJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_DisableJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DisableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DisableJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_JobService_DeleteJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterJobServiceHandlerFromEndpoint is same as RegisterJobServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterJobServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterJobServiceHandler(ctx, mux, conn)
}
// RegisterJobServiceHandler registers the http handlers for service JobService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterJobServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterJobServiceHandlerClient(ctx, mux, NewJobServiceClient(conn))
}
// RegisterJobServiceHandlerClient registers the http handlers for service JobService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "JobServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "JobServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "JobServiceClient" to call the correct interceptors.
func RegisterJobServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client JobServiceClient) error {
mux.Handle("POST", pattern_JobService_CreateJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_CreateJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_CreateJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_GetJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_GetJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_GetJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_ListJobs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_ListJobs_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_ListJobs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_EnableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_EnableJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_EnableJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_DisableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_DisableJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DisableJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_JobService_DeleteJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_DeleteJob_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DeleteJob_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_JobService_CreateJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "jobs"}, ""))
pattern_JobService_GetJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "jobs", "id"}, ""))
pattern_JobService_ListJobs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "jobs"}, ""))
pattern_JobService_EnableJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "jobs", "id", "enable"}, ""))
pattern_JobService_DisableJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "jobs", "id", "disable"}, ""))
pattern_JobService_DeleteJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "jobs", "id"}, ""))
)
var (
forward_JobService_CreateJob_0 = runtime.ForwardResponseMessage
forward_JobService_GetJob_0 = runtime.ForwardResponseMessage
forward_JobService_ListJobs_0 = runtime.ForwardResponseMessage
forward_JobService_EnableJob_0 = runtime.ForwardResponseMessage
forward_JobService_DisableJob_0 = runtime.ForwardResponseMessage
forward_JobService_DeleteJob_0 = runtime.ForwardResponseMessage
)

View File

@ -1,101 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/parameter.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Parameter struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Parameter) Reset() { *m = Parameter{} }
func (m *Parameter) String() string { return proto.CompactTextString(m) }
func (*Parameter) ProtoMessage() {}
func (*Parameter) Descriptor() ([]byte, []int) {
return fileDescriptor_parameter_096b3b61dd8773be, []int{0}
}
func (m *Parameter) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Parameter.Unmarshal(m, b)
}
func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Parameter.Marshal(b, m, deterministic)
}
func (dst *Parameter) XXX_Merge(src proto.Message) {
xxx_messageInfo_Parameter.Merge(dst, src)
}
func (m *Parameter) XXX_Size() int {
return xxx_messageInfo_Parameter.Size(m)
}
func (m *Parameter) XXX_DiscardUnknown() {
xxx_messageInfo_Parameter.DiscardUnknown(m)
}
var xxx_messageInfo_Parameter proto.InternalMessageInfo
func (m *Parameter) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Parameter) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
func init() {
proto.RegisterType((*Parameter)(nil), "api.Parameter")
}
func init() {
proto.RegisterFile("backend/api/parameter.proto", fileDescriptor_parameter_096b3b61dd8773be)
}
var fileDescriptor_parameter_096b3b61dd8773be = []byte{
// 147 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x4a, 0x4c, 0xce,
0x4e, 0xcd, 0x4b, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x4d, 0x2d, 0x49,
0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4e, 0x2c, 0xc8, 0x54, 0x32, 0xe5, 0xe2,
0x0c, 0x80, 0x89, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
0x70, 0x06, 0x81, 0xd9, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, 0x4c, 0x60,
0x41, 0x08, 0xc7, 0xc9, 0x34, 0xca, 0x38, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f,
0x57, 0x3f, 0xbb, 0x34, 0x29, 0x35, 0x2d, 0x27, 0xbf, 0x5c, 0xbf, 0x20, 0xb3, 0x20, 0x35, 0x27,
0x33, 0x2f, 0xb5, 0x58, 0x1f, 0xd9, 0xe2, 0xf4, 0xfc, 0xf8, 0xe4, 0x9c, 0xcc, 0xd4, 0xbc, 0x92,
0x24, 0x36, 0xb0, 0xcd, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x47, 0x98, 0xfd, 0x98,
0x00, 0x00, 0x00,
}

View File

@ -1,17 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
package ignore

File diff suppressed because it is too large Load Diff

View File

@ -1,558 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/pipeline.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_PipelineService_CreatePipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreatePipelineRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Pipeline); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreatePipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetPipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetPipelineRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetPipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_PipelineService_ListPipelines_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_PipelineService_ListPipelines_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListPipelinesRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_PipelineService_ListPipelines_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListPipelines(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_DeletePipeline_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeletePipelineRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeletePipeline(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetTemplateRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_CreatePipelineVersion_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreatePipelineVersionRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Version); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreatePipelineVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetPipelineVersion_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetPipelineVersionRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["version_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "version_id")
}
protoReq.VersionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "version_id", err)
}
msg, err := client.GetPipelineVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_PipelineService_ListPipelineVersions_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_PipelineService_ListPipelineVersions_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListPipelineVersionsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_PipelineService_ListPipelineVersions_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListPipelineVersions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_DeletePipelineVersion_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeletePipelineVersionRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["version_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "version_id")
}
protoReq.VersionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "version_id", err)
}
msg, err := client.DeletePipelineVersion(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_PipelineService_GetPipelineVersionTemplate_0(ctx context.Context, marshaler runtime.Marshaler, client PipelineServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetPipelineVersionTemplateRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["version_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "version_id")
}
protoReq.VersionId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "version_id", err)
}
msg, err := client.GetPipelineVersionTemplate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterPipelineServiceHandlerFromEndpoint is same as RegisterPipelineServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterPipelineServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterPipelineServiceHandler(ctx, mux, conn)
}
// RegisterPipelineServiceHandler registers the http handlers for service PipelineService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterPipelineServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterPipelineServiceHandlerClient(ctx, mux, NewPipelineServiceClient(conn))
}
// RegisterPipelineServiceHandlerClient registers the http handlers for service PipelineService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "PipelineServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "PipelineServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "PipelineServiceClient" to call the correct interceptors.
func RegisterPipelineServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client PipelineServiceClient) error {
mux.Handle("POST", pattern_PipelineService_CreatePipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_CreatePipeline_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_CreatePipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetPipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetPipeline_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetPipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_ListPipelines_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_ListPipelines_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_ListPipelines_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_PipelineService_DeletePipeline_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_DeletePipeline_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_DeletePipeline_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetTemplate_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_PipelineService_CreatePipelineVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_CreatePipelineVersion_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_CreatePipelineVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetPipelineVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetPipelineVersion_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetPipelineVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_ListPipelineVersions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_ListPipelineVersions_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_ListPipelineVersions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_PipelineService_DeletePipelineVersion_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_DeletePipelineVersion_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_DeletePipelineVersion_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_PipelineService_GetPipelineVersionTemplate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_PipelineService_GetPipelineVersionTemplate_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_PipelineService_GetPipelineVersionTemplate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_PipelineService_CreatePipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipelines"}, ""))
pattern_PipelineService_GetPipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipelines", "id"}, ""))
pattern_PipelineService_ListPipelines_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipelines"}, ""))
pattern_PipelineService_DeletePipeline_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipelines", "id"}, ""))
pattern_PipelineService_GetTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "pipelines", "id", "templates"}, ""))
pattern_PipelineService_CreatePipelineVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipeline_versions"}, ""))
pattern_PipelineService_GetPipelineVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipeline_versions", "version_id"}, ""))
pattern_PipelineService_ListPipelineVersions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "pipeline_versions"}, ""))
pattern_PipelineService_DeletePipelineVersion_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "pipeline_versions", "version_id"}, ""))
pattern_PipelineService_GetPipelineVersionTemplate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "pipeline_versions", "version_id", "templates"}, ""))
)
var (
forward_PipelineService_CreatePipeline_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetPipeline_0 = runtime.ForwardResponseMessage
forward_PipelineService_ListPipelines_0 = runtime.ForwardResponseMessage
forward_PipelineService_DeletePipeline_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetTemplate_0 = runtime.ForwardResponseMessage
forward_PipelineService_CreatePipelineVersion_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetPipelineVersion_0 = runtime.ForwardResponseMessage
forward_PipelineService_ListPipelineVersions_0 = runtime.ForwardResponseMessage
forward_PipelineService_DeletePipelineVersion_0 = runtime.ForwardResponseMessage
forward_PipelineService_GetPipelineVersionTemplate_0 = runtime.ForwardResponseMessage
)

View File

@ -1,130 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/pipeline_spec.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type PipelineSpec struct {
PipelineId string `protobuf:"bytes,1,opt,name=pipeline_id,json=pipelineId,proto3" json:"pipeline_id,omitempty"`
PipelineName string `protobuf:"bytes,5,opt,name=pipeline_name,json=pipelineName,proto3" json:"pipeline_name,omitempty"`
WorkflowManifest string `protobuf:"bytes,2,opt,name=workflow_manifest,json=workflowManifest,proto3" json:"workflow_manifest,omitempty"`
PipelineManifest string `protobuf:"bytes,3,opt,name=pipeline_manifest,json=pipelineManifest,proto3" json:"pipeline_manifest,omitempty"`
Parameters []*Parameter `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PipelineSpec) Reset() { *m = PipelineSpec{} }
func (m *PipelineSpec) String() string { return proto.CompactTextString(m) }
func (*PipelineSpec) ProtoMessage() {}
func (*PipelineSpec) Descriptor() ([]byte, []int) {
return fileDescriptor_pipeline_spec_ab99afe9ca6994cc, []int{0}
}
func (m *PipelineSpec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PipelineSpec.Unmarshal(m, b)
}
func (m *PipelineSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PipelineSpec.Marshal(b, m, deterministic)
}
func (dst *PipelineSpec) XXX_Merge(src proto.Message) {
xxx_messageInfo_PipelineSpec.Merge(dst, src)
}
func (m *PipelineSpec) XXX_Size() int {
return xxx_messageInfo_PipelineSpec.Size(m)
}
func (m *PipelineSpec) XXX_DiscardUnknown() {
xxx_messageInfo_PipelineSpec.DiscardUnknown(m)
}
var xxx_messageInfo_PipelineSpec proto.InternalMessageInfo
func (m *PipelineSpec) GetPipelineId() string {
if m != nil {
return m.PipelineId
}
return ""
}
func (m *PipelineSpec) GetPipelineName() string {
if m != nil {
return m.PipelineName
}
return ""
}
func (m *PipelineSpec) GetWorkflowManifest() string {
if m != nil {
return m.WorkflowManifest
}
return ""
}
func (m *PipelineSpec) GetPipelineManifest() string {
if m != nil {
return m.PipelineManifest
}
return ""
}
func (m *PipelineSpec) GetParameters() []*Parameter {
if m != nil {
return m.Parameters
}
return nil
}
func init() {
proto.RegisterType((*PipelineSpec)(nil), "api.PipelineSpec")
}
func init() {
proto.RegisterFile("backend/api/pipeline_spec.proto", fileDescriptor_pipeline_spec_ab99afe9ca6994cc)
}
var fileDescriptor_pipeline_spec_ab99afe9ca6994cc = []byte{
// 236 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0xd0, 0xc1, 0x4a, 0xc3, 0x40,
0x10, 0x06, 0x60, 0x62, 0x54, 0x70, 0x5a, 0x45, 0x73, 0x0a, 0x7a, 0x68, 0xd1, 0x4b, 0x41, 0xd8,
0x80, 0xc5, 0x17, 0xf0, 0xe6, 0x41, 0x29, 0xf5, 0xe6, 0x25, 0x4c, 0x36, 0xd3, 0x3a, 0x24, 0xbb,
0x3b, 0x24, 0x5b, 0xfa, 0xb6, 0x3e, 0x8b, 0x34, 0xed, 0x2e, 0xf1, 0xfa, 0xcf, 0xb7, 0xb3, 0xcc,
0x0f, 0xb3, 0x0a, 0x75, 0x43, 0xb6, 0x2e, 0x50, 0xb8, 0x10, 0x16, 0x6a, 0xd9, 0x52, 0xd9, 0x0b,
0x69, 0x25, 0x9d, 0xf3, 0x2e, 0x4b, 0x51, 0xf8, 0xfe, 0xe1, 0x9f, 0xc2, 0x0e, 0x0d, 0x79, 0xea,
0x8e, 0xe2, 0xf1, 0x37, 0x81, 0xe9, 0xea, 0xf4, 0xf2, 0x4b, 0x48, 0x67, 0x33, 0x98, 0xc4, 0x4d,
0x5c, 0xe7, 0xc9, 0x3c, 0x59, 0x5c, 0xad, 0x21, 0x44, 0xef, 0x75, 0xf6, 0x04, 0xd7, 0x11, 0x58,
0x34, 0x94, 0x5f, 0x0c, 0x64, 0x1a, 0xc2, 0x4f, 0x34, 0x94, 0x3d, 0xc3, 0xdd, 0xde, 0x75, 0xcd,
0xa6, 0x75, 0xfb, 0xd2, 0xa0, 0xe5, 0x0d, 0xf5, 0x3e, 0x3f, 0x1b, 0xe0, 0x6d, 0x18, 0x7c, 0x9c,
0xf2, 0x03, 0x8e, 0x1b, 0x23, 0x4e, 0x8f, 0x38, 0x0c, 0x22, 0x56, 0x00, 0xf1, 0x86, 0x3e, 0x3f,
0x9f, 0xa7, 0x8b, 0xc9, 0xcb, 0x8d, 0x42, 0x61, 0xb5, 0x0a, 0xf1, 0x7a, 0x24, 0xde, 0x5e, 0xbf,
0x97, 0x5b, 0xf6, 0x3f, 0xbb, 0x4a, 0x69, 0x67, 0x8a, 0x66, 0x57, 0xd1, 0xe1, 0xef, 0xd8, 0x56,
0x5f, 0x8c, 0xdb, 0xd9, 0xba, 0x52, 0xb7, 0x4c, 0xd6, 0x57, 0x97, 0x43, 0x3d, 0xcb, 0xbf, 0x00,
0x00, 0x00, 0xff, 0xff, 0xeb, 0x6c, 0x41, 0xaf, 0x63, 0x01, 0x00, 0x00,
}

View File

@ -1,17 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
package ignore

View File

@ -1,252 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/report.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import empty "github.com/golang/protobuf/ptypes/empty"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ReportWorkflowRequest struct {
Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportWorkflowRequest) Reset() { *m = ReportWorkflowRequest{} }
func (m *ReportWorkflowRequest) String() string { return proto.CompactTextString(m) }
func (*ReportWorkflowRequest) ProtoMessage() {}
func (*ReportWorkflowRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_report_cafa40ca8b98916d, []int{0}
}
func (m *ReportWorkflowRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReportWorkflowRequest.Unmarshal(m, b)
}
func (m *ReportWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReportWorkflowRequest.Marshal(b, m, deterministic)
}
func (dst *ReportWorkflowRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportWorkflowRequest.Merge(dst, src)
}
func (m *ReportWorkflowRequest) XXX_Size() int {
return xxx_messageInfo_ReportWorkflowRequest.Size(m)
}
func (m *ReportWorkflowRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReportWorkflowRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReportWorkflowRequest proto.InternalMessageInfo
func (m *ReportWorkflowRequest) GetWorkflow() string {
if m != nil {
return m.Workflow
}
return ""
}
type ReportScheduledWorkflowRequest struct {
ScheduledWorkflow string `protobuf:"bytes,1,opt,name=scheduled_workflow,json=scheduledWorkflow,proto3" json:"scheduled_workflow,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ReportScheduledWorkflowRequest) Reset() { *m = ReportScheduledWorkflowRequest{} }
func (m *ReportScheduledWorkflowRequest) String() string { return proto.CompactTextString(m) }
func (*ReportScheduledWorkflowRequest) ProtoMessage() {}
func (*ReportScheduledWorkflowRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_report_cafa40ca8b98916d, []int{1}
}
func (m *ReportScheduledWorkflowRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ReportScheduledWorkflowRequest.Unmarshal(m, b)
}
func (m *ReportScheduledWorkflowRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ReportScheduledWorkflowRequest.Marshal(b, m, deterministic)
}
func (dst *ReportScheduledWorkflowRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ReportScheduledWorkflowRequest.Merge(dst, src)
}
func (m *ReportScheduledWorkflowRequest) XXX_Size() int {
return xxx_messageInfo_ReportScheduledWorkflowRequest.Size(m)
}
func (m *ReportScheduledWorkflowRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ReportScheduledWorkflowRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ReportScheduledWorkflowRequest proto.InternalMessageInfo
func (m *ReportScheduledWorkflowRequest) GetScheduledWorkflow() string {
if m != nil {
return m.ScheduledWorkflow
}
return ""
}
func init() {
proto.RegisterType((*ReportWorkflowRequest)(nil), "api.ReportWorkflowRequest")
proto.RegisterType((*ReportScheduledWorkflowRequest)(nil), "api.ReportScheduledWorkflowRequest")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// ReportServiceClient is the client API for ReportService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ReportServiceClient interface {
ReportWorkflow(ctx context.Context, in *ReportWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error)
ReportScheduledWorkflow(ctx context.Context, in *ReportScheduledWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}
type reportServiceClient struct {
cc *grpc.ClientConn
}
func NewReportServiceClient(cc *grpc.ClientConn) ReportServiceClient {
return &reportServiceClient{cc}
}
func (c *reportServiceClient) ReportWorkflow(ctx context.Context, in *ReportWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ReportService/ReportWorkflow", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *reportServiceClient) ReportScheduledWorkflow(ctx context.Context, in *ReportScheduledWorkflowRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
out := new(empty.Empty)
err := c.cc.Invoke(ctx, "/api.ReportService/ReportScheduledWorkflow", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ReportServiceServer is the server API for ReportService service.
type ReportServiceServer interface {
ReportWorkflow(context.Context, *ReportWorkflowRequest) (*empty.Empty, error)
ReportScheduledWorkflow(context.Context, *ReportScheduledWorkflowRequest) (*empty.Empty, error)
}
func RegisterReportServiceServer(s *grpc.Server, srv ReportServiceServer) {
s.RegisterService(&_ReportService_serviceDesc, srv)
}
func _ReportService_ReportWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportWorkflowRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportServiceServer).ReportWorkflow(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ReportService/ReportWorkflow",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportServiceServer).ReportWorkflow(ctx, req.(*ReportWorkflowRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ReportService_ReportScheduledWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportScheduledWorkflowRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportServiceServer).ReportScheduledWorkflow(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.ReportService/ReportScheduledWorkflow",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportServiceServer).ReportScheduledWorkflow(ctx, req.(*ReportScheduledWorkflowRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.ReportService",
HandlerType: (*ReportServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportWorkflow",
Handler: _ReportService_ReportWorkflow_Handler,
},
{
MethodName: "ReportScheduledWorkflow",
Handler: _ReportService_ReportScheduledWorkflow_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/report.proto",
}
func init() { proto.RegisterFile("backend/api/report.proto", fileDescriptor_report_cafa40ca8b98916d) }
var fileDescriptor_report_cafa40ca8b98916d = []byte{
// 310 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x4a, 0xc3, 0x40,
0x10, 0x87, 0x69, 0x05, 0xd1, 0x05, 0x05, 0x17, 0xb4, 0x25, 0x8a, 0x94, 0xf4, 0xa2, 0x07, 0x77,
0xa9, 0x45, 0x0f, 0xe2, 0x49, 0xf0, 0x2c, 0xd4, 0x83, 0xe0, 0xa5, 0xec, 0xa6, 0xd3, 0x74, 0x69,
0xba, 0xb3, 0x66, 0x37, 0x2d, 0x5e, 0x7d, 0x05, 0x05, 0x1f, 0xcc, 0x57, 0xf0, 0x41, 0x24, 0x7f,
0x6d, 0x43, 0x73, 0x9c, 0xf9, 0x65, 0x26, 0xdf, 0x37, 0x2c, 0xe9, 0x4a, 0x11, 0xcc, 0x41, 0x4f,
0xb8, 0x30, 0x8a, 0xc7, 0x60, 0x30, 0x76, 0xcc, 0xc4, 0xe8, 0x90, 0xee, 0x08, 0xa3, 0xbc, 0xb3,
0x10, 0x31, 0x8c, 0x20, 0x4b, 0x85, 0xd6, 0xe8, 0x84, 0x53, 0xa8, 0x6d, 0xfe, 0x89, 0x77, 0x5a,
0xa4, 0x59, 0x25, 0x93, 0x29, 0x87, 0x85, 0x71, 0xef, 0x79, 0xe8, 0x0f, 0xc9, 0xf1, 0x28, 0xdb,
0xf7, 0x82, 0xf1, 0x7c, 0x1a, 0xe1, 0x6a, 0x04, 0x6f, 0x09, 0x58, 0x47, 0x3d, 0xb2, 0xb7, 0x2a,
0x5a, 0xdd, 0x56, 0xaf, 0x75, 0xb1, 0x3f, 0xaa, 0x6a, 0xff, 0x89, 0x9c, 0xe7, 0x43, 0xcf, 0xc1,
0x0c, 0x26, 0x49, 0x04, 0x93, 0xfa, 0xf4, 0x15, 0xa1, 0xb6, 0xcc, 0xc6, 0xb5, 0x3d, 0x47, 0xb6,
0x3e, 0x75, 0xfd, 0xdd, 0x26, 0x07, 0xc5, 0x46, 0x88, 0x97, 0x2a, 0x00, 0x8a, 0xe4, 0x70, 0x93,
0x8b, 0x7a, 0x4c, 0x18, 0xc5, 0xb6, 0xc2, 0x7a, 0x27, 0x2c, 0x77, 0x64, 0xa5, 0x23, 0x7b, 0x4c,
0x1d, 0xfd, 0xcb, 0x8f, 0x9f, 0xdf, 0xcf, 0x76, 0xdf, 0xef, 0xa4, 0xa7, 0xb1, 0x7c, 0x39, 0x90,
0xe0, 0xc4, 0x80, 0x97, 0x40, 0xf6, 0xae, 0x72, 0xa2, 0x5f, 0x2d, 0xd2, 0x69, 0x90, 0xa2, 0xfd,
0xb5, 0x5f, 0x37, 0x29, 0x37, 0x32, 0xdc, 0x67, 0x0c, 0xb7, 0x7e, 0x6f, 0x93, 0xa1, 0x3a, 0xc2,
0x3f, 0xcc, 0x96, 0x93, 0x3d, 0xdc, 0xbc, 0x0e, 0x43, 0xe5, 0x66, 0x89, 0x64, 0x01, 0x2e, 0xf8,
0x3c, 0x91, 0x90, 0xb6, 0xb9, 0x51, 0x06, 0x22, 0xa5, 0xc1, 0xf2, 0xf5, 0x97, 0x11, 0xe2, 0x38,
0x88, 0x14, 0x68, 0x27, 0x77, 0x33, 0x88, 0xe1, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0xe5,
0x5d, 0x89, 0x39, 0x02, 0x00, 0x00,
}

View File

@ -1,170 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/report.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_ReportService_ReportWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, client ReportServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ReportWorkflow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_ReportService_ReportScheduledWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, client ReportServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportScheduledWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.ScheduledWorkflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ReportScheduledWorkflow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterReportServiceHandlerFromEndpoint is same as RegisterReportServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterReportServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterReportServiceHandler(ctx, mux, conn)
}
// RegisterReportServiceHandler registers the http handlers for service ReportService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterReportServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterReportServiceHandlerClient(ctx, mux, NewReportServiceClient(conn))
}
// RegisterReportServiceHandlerClient registers the http handlers for service ReportService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ReportServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ReportServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ReportServiceClient" to call the correct interceptors.
func RegisterReportServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ReportServiceClient) error {
mux.Handle("POST", pattern_ReportService_ReportWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ReportService_ReportWorkflow_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportWorkflow_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ReportService_ReportScheduledWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ReportService_ReportScheduledWorkflow_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportScheduledWorkflow_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_ReportService_ReportWorkflow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "workflows"}, ""))
pattern_ReportService_ReportScheduledWorkflow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "scheduledworkflows"}, ""))
)
var (
forward_ReportService_ReportWorkflow_0 = runtime.ForwardResponseMessage
forward_ReportService_ReportScheduledWorkflow_0 = runtime.ForwardResponseMessage
)

View File

@ -1,232 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/resource_reference.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ResourceType int32
const (
ResourceType_UNKNOWN_RESOURCE_TYPE ResourceType = 0
ResourceType_EXPERIMENT ResourceType = 1
ResourceType_JOB ResourceType = 2
ResourceType_PIPELINE ResourceType = 3
ResourceType_PIPELINE_VERSION ResourceType = 4
ResourceType_NAMESPACE ResourceType = 5
)
var ResourceType_name = map[int32]string{
0: "UNKNOWN_RESOURCE_TYPE",
1: "EXPERIMENT",
2: "JOB",
3: "PIPELINE",
4: "PIPELINE_VERSION",
5: "NAMESPACE",
}
var ResourceType_value = map[string]int32{
"UNKNOWN_RESOURCE_TYPE": 0,
"EXPERIMENT": 1,
"JOB": 2,
"PIPELINE": 3,
"PIPELINE_VERSION": 4,
"NAMESPACE": 5,
}
func (x ResourceType) String() string {
return proto.EnumName(ResourceType_name, int32(x))
}
func (ResourceType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{0}
}
type Relationship int32
const (
Relationship_UNKNOWN_RELATIONSHIP Relationship = 0
Relationship_OWNER Relationship = 1
Relationship_CREATOR Relationship = 2
)
var Relationship_name = map[int32]string{
0: "UNKNOWN_RELATIONSHIP",
1: "OWNER",
2: "CREATOR",
}
var Relationship_value = map[string]int32{
"UNKNOWN_RELATIONSHIP": 0,
"OWNER": 1,
"CREATOR": 2,
}
func (x Relationship) String() string {
return proto.EnumName(Relationship_name, int32(x))
}
func (Relationship) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{1}
}
type ResourceKey struct {
Type ResourceType `protobuf:"varint,1,opt,name=type,proto3,enum=api.ResourceType" json:"type,omitempty"`
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResourceKey) Reset() { *m = ResourceKey{} }
func (m *ResourceKey) String() string { return proto.CompactTextString(m) }
func (*ResourceKey) ProtoMessage() {}
func (*ResourceKey) Descriptor() ([]byte, []int) {
return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{0}
}
func (m *ResourceKey) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResourceKey.Unmarshal(m, b)
}
func (m *ResourceKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ResourceKey.Marshal(b, m, deterministic)
}
func (dst *ResourceKey) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceKey.Merge(dst, src)
}
func (m *ResourceKey) XXX_Size() int {
return xxx_messageInfo_ResourceKey.Size(m)
}
func (m *ResourceKey) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceKey.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceKey proto.InternalMessageInfo
func (m *ResourceKey) GetType() ResourceType {
if m != nil {
return m.Type
}
return ResourceType_UNKNOWN_RESOURCE_TYPE
}
func (m *ResourceKey) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type ResourceReference struct {
Key *ResourceKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
Relationship Relationship `protobuf:"varint,2,opt,name=relationship,proto3,enum=api.Relationship" json:"relationship,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ResourceReference) Reset() { *m = ResourceReference{} }
func (m *ResourceReference) String() string { return proto.CompactTextString(m) }
func (*ResourceReference) ProtoMessage() {}
func (*ResourceReference) Descriptor() ([]byte, []int) {
return fileDescriptor_resource_reference_876ea904b7b7aed8, []int{1}
}
func (m *ResourceReference) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ResourceReference.Unmarshal(m, b)
}
func (m *ResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ResourceReference.Marshal(b, m, deterministic)
}
func (dst *ResourceReference) XXX_Merge(src proto.Message) {
xxx_messageInfo_ResourceReference.Merge(dst, src)
}
func (m *ResourceReference) XXX_Size() int {
return xxx_messageInfo_ResourceReference.Size(m)
}
func (m *ResourceReference) XXX_DiscardUnknown() {
xxx_messageInfo_ResourceReference.DiscardUnknown(m)
}
var xxx_messageInfo_ResourceReference proto.InternalMessageInfo
func (m *ResourceReference) GetKey() *ResourceKey {
if m != nil {
return m.Key
}
return nil
}
func (m *ResourceReference) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *ResourceReference) GetRelationship() Relationship {
if m != nil {
return m.Relationship
}
return Relationship_UNKNOWN_RELATIONSHIP
}
func init() {
proto.RegisterType((*ResourceKey)(nil), "api.ResourceKey")
proto.RegisterType((*ResourceReference)(nil), "api.ResourceReference")
proto.RegisterEnum("api.ResourceType", ResourceType_name, ResourceType_value)
proto.RegisterEnum("api.Relationship", Relationship_name, Relationship_value)
}
func init() {
proto.RegisterFile("backend/api/resource_reference.proto", fileDescriptor_resource_reference_876ea904b7b7aed8)
}
var fileDescriptor_resource_reference_876ea904b7b7aed8 = []byte{
// 366 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0xc1, 0x6b, 0x9c, 0x40,
0x14, 0xc6, 0xa3, 0x6e, 0x9a, 0xee, 0xdb, 0xed, 0x32, 0x79, 0xa4, 0x60, 0x6f, 0x61, 0x69, 0x21,
0xe4, 0xa0, 0x90, 0x90, 0x7b, 0xcd, 0x76, 0xa0, 0x76, 0x93, 0x51, 0x46, 0xd3, 0xb4, 0xbd, 0x88,
0xba, 0x2f, 0xc9, 0xb0, 0x46, 0x07, 0x57, 0x29, 0x5e, 0xfb, 0x97, 0x97, 0x48, 0xc4, 0xec, 0x6d,
0x86, 0xdf, 0xc7, 0xf7, 0xfb, 0xe0, 0xc1, 0xe7, 0x2c, 0xcd, 0xb7, 0x54, 0x6e, 0xdc, 0x54, 0x2b,
0xb7, 0xa6, 0x5d, 0xd5, 0xd6, 0x39, 0x25, 0x35, 0x3d, 0x50, 0x4d, 0x65, 0x4e, 0x8e, 0xae, 0xab,
0xa6, 0x42, 0x2b, 0xd5, 0x6a, 0xf9, 0x0d, 0x66, 0xf2, 0x35, 0xb0, 0xa6, 0x0e, 0xbf, 0xc0, 0xa4,
0xe9, 0x34, 0xd9, 0xc6, 0xa9, 0x71, 0xb6, 0xb8, 0x38, 0x76, 0x52, 0xad, 0x9c, 0x81, 0xc7, 0x9d,
0x26, 0xd9, 0x63, 0x5c, 0x80, 0xa9, 0x36, 0xb6, 0x79, 0x6a, 0x9c, 0x4d, 0xa5, 0xa9, 0x36, 0xcb,
0x7f, 0x06, 0x1c, 0x0f, 0x31, 0x39, 0x68, 0x70, 0x09, 0xd6, 0x96, 0xba, 0xbe, 0x6b, 0x76, 0xc1,
0xf6, 0xba, 0xd6, 0xd4, 0xc9, 0x17, 0x88, 0x08, 0x93, 0x32, 0x7d, 0x26, 0xdb, 0xea, 0xbb, 0xfa,
0x37, 0x5e, 0xc1, 0xbc, 0xa6, 0x22, 0x6d, 0x54, 0x55, 0xee, 0x9e, 0x94, 0xee, 0x3d, 0xe3, 0x98,
0x11, 0xc8, 0xbd, 0xd8, 0x79, 0x0b, 0xf3, 0xb7, 0x53, 0xf1, 0x13, 0x7c, 0xbc, 0x13, 0x6b, 0x11,
0xdc, 0x8b, 0x44, 0xf2, 0x28, 0xb8, 0x93, 0x2b, 0x9e, 0xc4, 0xbf, 0x43, 0xce, 0x0e, 0x70, 0x01,
0xc0, 0x7f, 0x85, 0x5c, 0xfa, 0xb7, 0x5c, 0xc4, 0xcc, 0xc0, 0x23, 0xb0, 0x7e, 0x04, 0xd7, 0xcc,
0xc4, 0x39, 0xbc, 0x0f, 0xfd, 0x90, 0xdf, 0xf8, 0x82, 0x33, 0x0b, 0x4f, 0x80, 0x0d, 0xbf, 0xe4,
0x27, 0x97, 0x91, 0x1f, 0x08, 0x36, 0xc1, 0x0f, 0x30, 0x15, 0xde, 0x2d, 0x8f, 0x42, 0x6f, 0xc5,
0xd9, 0xe1, 0xf9, 0xd7, 0x17, 0xed, 0x38, 0x03, 0x6d, 0x38, 0x19, 0xb5, 0x37, 0x5e, 0xec, 0x07,
0x22, 0xfa, 0xee, 0x87, 0xec, 0x00, 0xa7, 0x70, 0x18, 0xdc, 0x0b, 0x2e, 0x99, 0x81, 0x33, 0x38,
0x5a, 0x49, 0xee, 0xc5, 0x81, 0x64, 0xe6, 0xf5, 0xd5, 0x9f, 0xcb, 0x47, 0xd5, 0x3c, 0xb5, 0x99,
0x93, 0x57, 0xcf, 0xee, 0xb6, 0xcd, 0xe8, 0xa1, 0xa8, 0xfe, 0xba, 0x5a, 0x69, 0x2a, 0x54, 0x49,
0x3b, 0xf7, 0xed, 0x39, 0x1f, 0xab, 0x24, 0x2f, 0x14, 0x95, 0x4d, 0xf6, 0xae, 0x3f, 0xe3, 0xe5,
0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x58, 0x92, 0x1b, 0xee, 0x01, 0x00, 0x00,
}

View File

@ -1,17 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
package ignore

File diff suppressed because it is too large Load Diff

View File

@ -1,608 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/run.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_RunService_CreateRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateRunRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Run); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_GetRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.GetRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
var (
filter_RunService_ListRuns_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_RunService_ListRuns_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListRunsRequest
var metadata runtime.ServerMetadata
if err := runtime.PopulateQueryParameters(&protoReq, req.URL.Query(), filter_RunService_ListRuns_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListRuns(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_ArchiveRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ArchiveRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.ArchiveRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_UnarchiveRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq UnarchiveRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.UnarchiveRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_DeleteRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_ReportRunMetrics_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportRunMetricsRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.ReportRunMetrics(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_ReadArtifact_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReadArtifactRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
val, ok = pathParams["node_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "node_id")
}
protoReq.NodeId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "node_id", err)
}
val, ok = pathParams["artifact_name"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "artifact_name")
}
protoReq.ArtifactName, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "artifact_name", err)
}
msg, err := client.ReadArtifact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_TerminateRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq TerminateRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.TerminateRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func request_RunService_RetryRun_0(ctx context.Context, marshaler runtime.Marshaler, client RunServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq RetryRunRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["run_id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "run_id")
}
protoReq.RunId, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "run_id", err)
}
msg, err := client.RetryRun(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterRunServiceHandlerFromEndpoint is same as RegisterRunServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterRunServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterRunServiceHandler(ctx, mux, conn)
}
// RegisterRunServiceHandler registers the http handlers for service RunService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterRunServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterRunServiceHandlerClient(ctx, mux, NewRunServiceClient(conn))
}
// RegisterRunServiceHandlerClient registers the http handlers for service RunService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "RunServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "RunServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "RunServiceClient" to call the correct interceptors.
func RegisterRunServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client RunServiceClient) error {
mux.Handle("POST", pattern_RunService_CreateRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_CreateRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_CreateRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_RunService_GetRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_GetRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_GetRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_RunService_ListRuns_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ListRuns_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ListRuns_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_ArchiveRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ArchiveRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ArchiveRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_UnarchiveRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_UnarchiveRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_UnarchiveRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_RunService_DeleteRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_DeleteRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_DeleteRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_ReportRunMetrics_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ReportRunMetrics_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ReportRunMetrics_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_RunService_ReadArtifact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_ReadArtifact_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_ReadArtifact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_TerminateRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_TerminateRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_TerminateRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_RunService_RetryRun_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_RunService_RetryRun_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_RunService_RetryRun_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_RunService_CreateRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "runs"}, ""))
pattern_RunService_GetRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "run_id"}, ""))
pattern_RunService_ListRuns_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1beta1", "runs"}, ""))
pattern_RunService_ArchiveRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "id"}, "archive"))
pattern_RunService_UnarchiveRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "id"}, "unarchive"))
pattern_RunService_DeleteRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "id"}, ""))
pattern_RunService_ReportRunMetrics_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "runs", "run_id"}, "reportMetrics"))
pattern_RunService_ReadArtifact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6, 1, 0, 4, 1, 5, 7}, []string{"apis", "v1beta1", "runs", "run_id", "nodes", "node_id", "artifacts", "artifact_name"}, "read"))
pattern_RunService_TerminateRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "runs", "run_id", "terminate"}, ""))
pattern_RunService_RetryRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1beta1", "runs", "run_id", "retry"}, ""))
)
var (
forward_RunService_CreateRun_0 = runtime.ForwardResponseMessage
forward_RunService_GetRun_0 = runtime.ForwardResponseMessage
forward_RunService_ListRuns_0 = runtime.ForwardResponseMessage
forward_RunService_ArchiveRun_0 = runtime.ForwardResponseMessage
forward_RunService_UnarchiveRun_0 = runtime.ForwardResponseMessage
forward_RunService_DeleteRun_0 = runtime.ForwardResponseMessage
forward_RunService_ReportRunMetrics_0 = runtime.ForwardResponseMessage
forward_RunService_ReadArtifact_0 = runtime.ForwardResponseMessage
forward_RunService_TerminateRun_0 = runtime.ForwardResponseMessage
forward_RunService_RetryRun_0 = runtime.ForwardResponseMessage
)

View File

@ -1,305 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: backend/api/visualization.proto
package go_client // import "github.com/kubeflow/pipelines/backend/api/go_client"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"
import _ "google.golang.org/genproto/googleapis/api/annotations"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Visualization_Type int32
const (
Visualization_ROC_CURVE Visualization_Type = 0
Visualization_TFDV Visualization_Type = 1
Visualization_TFMA Visualization_Type = 2
Visualization_TABLE Visualization_Type = 3
Visualization_CUSTOM Visualization_Type = 4
)
var Visualization_Type_name = map[int32]string{
0: "ROC_CURVE",
1: "TFDV",
2: "TFMA",
3: "TABLE",
4: "CUSTOM",
}
var Visualization_Type_value = map[string]int32{
"ROC_CURVE": 0,
"TFDV": 1,
"TFMA": 2,
"TABLE": 3,
"CUSTOM": 4,
}
func (x Visualization_Type) String() string {
return proto.EnumName(Visualization_Type_name, int32(x))
}
func (Visualization_Type) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_visualization_5c1ff36cbdec23a0, []int{1, 0}
}
type CreateVisualizationRequest struct {
Visualization *Visualization `protobuf:"bytes,1,opt,name=visualization,proto3" json:"visualization,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CreateVisualizationRequest) Reset() { *m = CreateVisualizationRequest{} }
func (m *CreateVisualizationRequest) String() string { return proto.CompactTextString(m) }
func (*CreateVisualizationRequest) ProtoMessage() {}
func (*CreateVisualizationRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_visualization_5c1ff36cbdec23a0, []int{0}
}
func (m *CreateVisualizationRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CreateVisualizationRequest.Unmarshal(m, b)
}
func (m *CreateVisualizationRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CreateVisualizationRequest.Marshal(b, m, deterministic)
}
func (dst *CreateVisualizationRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CreateVisualizationRequest.Merge(dst, src)
}
func (m *CreateVisualizationRequest) XXX_Size() int {
return xxx_messageInfo_CreateVisualizationRequest.Size(m)
}
func (m *CreateVisualizationRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CreateVisualizationRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CreateVisualizationRequest proto.InternalMessageInfo
func (m *CreateVisualizationRequest) GetVisualization() *Visualization {
if m != nil {
return m.Visualization
}
return nil
}
func (m *CreateVisualizationRequest) GetNamespace() string {
if m != nil {
return m.Namespace
}
return ""
}
type Visualization struct {
Type Visualization_Type `protobuf:"varint,1,opt,name=type,proto3,enum=api.Visualization_Type" json:"type,omitempty"`
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
Arguments string `protobuf:"bytes,3,opt,name=arguments,proto3" json:"arguments,omitempty"`
Html string `protobuf:"bytes,4,opt,name=html,proto3" json:"html,omitempty"`
Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Visualization) Reset() { *m = Visualization{} }
func (m *Visualization) String() string { return proto.CompactTextString(m) }
func (*Visualization) ProtoMessage() {}
func (*Visualization) Descriptor() ([]byte, []int) {
return fileDescriptor_visualization_5c1ff36cbdec23a0, []int{1}
}
func (m *Visualization) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Visualization.Unmarshal(m, b)
}
func (m *Visualization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Visualization.Marshal(b, m, deterministic)
}
func (dst *Visualization) XXX_Merge(src proto.Message) {
xxx_messageInfo_Visualization.Merge(dst, src)
}
func (m *Visualization) XXX_Size() int {
return xxx_messageInfo_Visualization.Size(m)
}
func (m *Visualization) XXX_DiscardUnknown() {
xxx_messageInfo_Visualization.DiscardUnknown(m)
}
var xxx_messageInfo_Visualization proto.InternalMessageInfo
func (m *Visualization) GetType() Visualization_Type {
if m != nil {
return m.Type
}
return Visualization_ROC_CURVE
}
func (m *Visualization) GetSource() string {
if m != nil {
return m.Source
}
return ""
}
func (m *Visualization) GetArguments() string {
if m != nil {
return m.Arguments
}
return ""
}
func (m *Visualization) GetHtml() string {
if m != nil {
return m.Html
}
return ""
}
func (m *Visualization) GetError() string {
if m != nil {
return m.Error
}
return ""
}
func init() {
proto.RegisterType((*CreateVisualizationRequest)(nil), "api.CreateVisualizationRequest")
proto.RegisterType((*Visualization)(nil), "api.Visualization")
proto.RegisterEnum("api.Visualization_Type", Visualization_Type_name, Visualization_Type_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// VisualizationServiceClient is the client API for VisualizationService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type VisualizationServiceClient interface {
CreateVisualization(ctx context.Context, in *CreateVisualizationRequest, opts ...grpc.CallOption) (*Visualization, error)
}
type visualizationServiceClient struct {
cc *grpc.ClientConn
}
func NewVisualizationServiceClient(cc *grpc.ClientConn) VisualizationServiceClient {
return &visualizationServiceClient{cc}
}
func (c *visualizationServiceClient) CreateVisualization(ctx context.Context, in *CreateVisualizationRequest, opts ...grpc.CallOption) (*Visualization, error) {
out := new(Visualization)
err := c.cc.Invoke(ctx, "/api.VisualizationService/CreateVisualization", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// VisualizationServiceServer is the server API for VisualizationService service.
type VisualizationServiceServer interface {
CreateVisualization(context.Context, *CreateVisualizationRequest) (*Visualization, error)
}
func RegisterVisualizationServiceServer(s *grpc.Server, srv VisualizationServiceServer) {
s.RegisterService(&_VisualizationService_serviceDesc, srv)
}
func _VisualizationService_CreateVisualization_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CreateVisualizationRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(VisualizationServiceServer).CreateVisualization(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/api.VisualizationService/CreateVisualization",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VisualizationServiceServer).CreateVisualization(ctx, req.(*CreateVisualizationRequest))
}
return interceptor(ctx, in, info, handler)
}
var _VisualizationService_serviceDesc = grpc.ServiceDesc{
ServiceName: "api.VisualizationService",
HandlerType: (*VisualizationServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CreateVisualization",
Handler: _VisualizationService_CreateVisualization_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/visualization.proto",
}
func init() {
proto.RegisterFile("backend/api/visualization.proto", fileDescriptor_visualization_5c1ff36cbdec23a0)
}
var fileDescriptor_visualization_5c1ff36cbdec23a0 = []byte{
// 482 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0xad, 0x13, 0x27, 0x34, 0x53, 0x02, 0x61, 0x5b, 0x51, 0xcb, 0x0a, 0x4a, 0xe4, 0x53, 0x24,
0xa8, 0xad, 0xa6, 0x17, 0xe0, 0x02, 0x49, 0x28, 0x27, 0xa2, 0x4a, 0x4e, 0x9a, 0x43, 0x2f, 0xd5,
0xda, 0x9d, 0x3a, 0xab, 0x3a, 0xde, 0x65, 0x77, 0x9d, 0xaa, 0x20, 0x2e, 0x48, 0x9c, 0xb8, 0xc1,
0x2f, 0xf0, 0x47, 0xdc, 0x39, 0xf1, 0x21, 0x28, 0x9b, 0x28, 0xc4, 0x6a, 0x7b, 0xf2, 0xee, 0xec,
0x9b, 0xf7, 0xc6, 0x6f, 0x1e, 0xb4, 0x22, 0x1a, 0x5f, 0x61, 0x76, 0x11, 0x50, 0xc1, 0x82, 0x39,
0x53, 0x39, 0x4d, 0xd9, 0x27, 0xaa, 0x19, 0xcf, 0x7c, 0x21, 0xb9, 0xe6, 0xa4, 0x4c, 0x05, 0x73,
0x9b, 0x09, 0xe7, 0x49, 0x8a, 0x06, 0x44, 0xb3, 0x8c, 0x6b, 0x83, 0x50, 0x4b, 0x88, 0xbb, 0xbf,
0xc9, 0x81, 0x52, 0x72, 0xb9, 0x7a, 0x78, 0x61, 0x3e, 0xf1, 0x41, 0x82, 0xd9, 0x81, 0xba, 0xa6,
0x49, 0x82, 0x32, 0xe0, 0xc2, 0xb4, 0xde, 0xa6, 0xf1, 0x34, 0xb8, 0x03, 0x89, 0x54, 0xe3, 0x64,
0x73, 0x8c, 0x10, 0x3f, 0xe6, 0xa8, 0x34, 0x79, 0x09, 0xf5, 0xc2, 0x78, 0x8e, 0xd5, 0xb6, 0x3a,
0x3b, 0x5d, 0xe2, 0x53, 0xc1, 0xfc, 0x62, 0x47, 0x11, 0x48, 0x9a, 0x50, 0xcb, 0xe8, 0x0c, 0x95,
0xa0, 0x31, 0x3a, 0xa5, 0xb6, 0xd5, 0xa9, 0x85, 0xff, 0x0b, 0xde, 0x1f, 0x0b, 0xea, 0x85, 0x76,
0xf2, 0x1c, 0x6c, 0x7d, 0x23, 0xd0, 0x08, 0x3c, 0xea, 0xee, 0xdf, 0x16, 0xf0, 0xc7, 0x37, 0x02,
0x43, 0x03, 0x22, 0x4f, 0xa1, 0xaa, 0x78, 0x2e, 0xd7, 0xcc, 0xab, 0xdb, 0x42, 0x94, 0xca, 0x24,
0x9f, 0x61, 0xa6, 0x95, 0x53, 0x5e, 0x8a, 0xae, 0x0b, 0x84, 0x80, 0x3d, 0xd5, 0xb3, 0xd4, 0xb1,
0xcd, 0x83, 0x39, 0x93, 0x3d, 0xa8, 0x18, 0xef, 0x9c, 0x8a, 0x29, 0x2e, 0x2f, 0xde, 0x5b, 0xb0,
0x17, 0x6a, 0xa4, 0x0e, 0xb5, 0xf0, 0x64, 0x70, 0x3e, 0x38, 0x0d, 0x27, 0xc7, 0x8d, 0x2d, 0xb2,
0x0d, 0xf6, 0xf8, 0xfd, 0xbb, 0x49, 0xc3, 0x5a, 0x9e, 0x86, 0xbd, 0x46, 0x89, 0xd4, 0xa0, 0x32,
0xee, 0xf5, 0x3f, 0x1c, 0x37, 0xca, 0x04, 0xa0, 0x3a, 0x38, 0x1d, 0x8d, 0x4f, 0x86, 0x0d, 0xbb,
0xfb, 0xcb, 0x82, 0xbd, 0xc2, 0xf8, 0x23, 0x94, 0x73, 0x16, 0x23, 0xf9, 0x6e, 0xc1, 0xee, 0x1d,
0x86, 0x93, 0x96, 0xf9, 0xe3, 0xfb, 0x57, 0xe1, 0xde, 0xe1, 0xb9, 0xf7, 0xe6, 0xeb, 0xef, 0xbf,
0x3f, 0x4b, 0xaf, 0xbc, 0xce, 0x22, 0x04, 0x2a, 0x98, 0x1f, 0x46, 0xa8, 0xe9, 0x61, 0x31, 0x51,
0x2a, 0xf8, 0xbc, 0xf6, 0xfd, 0xcb, 0xeb, 0xe2, 0x96, 0xfa, 0xdf, 0xac, 0x1f, 0xbd, 0x61, 0xd8,
0x84, 0x07, 0x17, 0x78, 0x49, 0xf3, 0x54, 0x93, 0x27, 0xe4, 0x31, 0xd4, 0xdd, 0x1d, 0xa3, 0x35,
0xd2, 0x54, 0xe7, 0xea, 0xac, 0x05, 0xcf, 0xa0, 0xda, 0x47, 0x2a, 0x51, 0x92, 0xdd, 0xed, 0x92,
0x5b, 0xa7, 0xb9, 0x9e, 0x72, 0xb9, 0xa2, 0x68, 0x97, 0xa2, 0x87, 0x00, 0x6b, 0xc0, 0xd6, 0xd9,
0x51, 0xc2, 0xf4, 0x34, 0x8f, 0xfc, 0x98, 0xcf, 0x82, 0xab, 0x3c, 0xc2, 0xcb, 0x94, 0x5f, 0x07,
0x82, 0x09, 0x4c, 0x59, 0x86, 0x2a, 0xd8, 0x4c, 0x6d, 0xc2, 0xcf, 0xe3, 0x94, 0x61, 0xa6, 0xa3,
0xaa, 0x09, 0xe3, 0xd1, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x50, 0x9c, 0xbc, 0x19, 0x03,
0x00, 0x00,
}

View File

@ -1,147 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/visualization.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/golang/protobuf/proto"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/grpc-ecosystem/grpc-gateway/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
func request_VisualizationService_CreateVisualization_0(ctx context.Context, marshaler runtime.Marshaler, client VisualizationServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateVisualizationRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Visualization); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["namespace"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "namespace")
}
protoReq.Namespace, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "namespace", err)
}
msg, err := client.CreateVisualization(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
// RegisterVisualizationServiceHandlerFromEndpoint is same as RegisterVisualizationServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterVisualizationServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterVisualizationServiceHandler(ctx, mux, conn)
}
// RegisterVisualizationServiceHandler registers the http handlers for service VisualizationService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterVisualizationServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterVisualizationServiceHandlerClient(ctx, mux, NewVisualizationServiceClient(conn))
}
// RegisterVisualizationServiceHandlerClient registers the http handlers for service VisualizationService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "VisualizationServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "VisualizationServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "VisualizationServiceClient" to call the correct interceptors.
func RegisterVisualizationServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client VisualizationServiceClient) error {
mux.Handle("POST", pattern_VisualizationService_CreateVisualization_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_VisualizationService_CreateVisualization_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_VisualizationService_CreateVisualization_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_VisualizationService_CreateVisualization_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1beta1", "visualizations", "namespace"}, ""))
)
var (
forward_VisualizationService_CreateVisualization_0 = runtime.ForwardResponseMessage
)

View File

@ -1,14 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["experiment_client.go"],
importpath = "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_client",
visibility = ["//visibility:public"],
deps = [
"//backend/api/go_http_client/experiment_client/experiment_service:go_default_library",
"@com_github_go_openapi_runtime//:go_default_library",
"@com_github_go_openapi_runtime//client:go_default_library",
"@com_github_go_openapi_strfmt//:go_default_library",
],
)

View File

@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"archive_experiment_parameters.go",
"archive_experiment_responses.go",
"create_experiment_parameters.go",
"create_experiment_responses.go",
"delete_experiment_parameters.go",
"delete_experiment_responses.go",
"experiment_service_client.go",
"get_experiment_parameters.go",
"get_experiment_responses.go",
"list_experiment_parameters.go",
"list_experiment_responses.go",
"unarchive_experiment_parameters.go",
"unarchive_experiment_responses.go",
],
importpath = "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_client/experiment_service",
visibility = ["//visibility:public"],
deps = [
"//backend/api/go_http_client/experiment_model:go_default_library",
"@com_github_go_openapi_errors//:go_default_library",
"@com_github_go_openapi_runtime//:go_default_library",
"@com_github_go_openapi_runtime//client:go_default_library",
"@com_github_go_openapi_strfmt//:go_default_library",
"@com_github_go_openapi_swag//:go_default_library",
],
)

View File

@ -1,150 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewArchiveExperimentParams creates a new ArchiveExperimentParams object
// with the default values initialized.
func NewArchiveExperimentParams() *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewArchiveExperimentParamsWithTimeout creates a new ArchiveExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewArchiveExperimentParamsWithTimeout(timeout time.Duration) *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
timeout: timeout,
}
}
// NewArchiveExperimentParamsWithContext creates a new ArchiveExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewArchiveExperimentParamsWithContext(ctx context.Context) *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
Context: ctx,
}
}
// NewArchiveExperimentParamsWithHTTPClient creates a new ArchiveExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewArchiveExperimentParamsWithHTTPClient(client *http.Client) *ArchiveExperimentParams {
var ()
return &ArchiveExperimentParams{
HTTPClient: client,
}
}
/*ArchiveExperimentParams contains all the parameters to send to the API endpoint
for the archive experiment operation typically these are written to a http.Request
*/
type ArchiveExperimentParams struct {
/*ID
The ID of the experiment to be archived.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the archive experiment params
func (o *ArchiveExperimentParams) WithTimeout(timeout time.Duration) *ArchiveExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the archive experiment params
func (o *ArchiveExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the archive experiment params
func (o *ArchiveExperimentParams) WithContext(ctx context.Context) *ArchiveExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the archive experiment params
func (o *ArchiveExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the archive experiment params
func (o *ArchiveExperimentParams) WithHTTPClient(client *http.Client) *ArchiveExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the archive experiment params
func (o *ArchiveExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the archive experiment params
func (o *ArchiveExperimentParams) WithID(id string) *ArchiveExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the archive experiment params
func (o *ArchiveExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *ArchiveExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,124 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// ArchiveExperimentReader is a Reader for the ArchiveExperiment structure.
type ArchiveExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *ArchiveExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewArchiveExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewArchiveExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewArchiveExperimentOK creates a ArchiveExperimentOK with default headers values
func NewArchiveExperimentOK() *ArchiveExperimentOK {
return &ArchiveExperimentOK{}
}
/*ArchiveExperimentOK handles this case with default header values.
A successful response.
*/
type ArchiveExperimentOK struct {
Payload interface{}
}
func (o *ArchiveExperimentOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:archive][%d] archiveExperimentOK %+v", 200, o.Payload)
}
func (o *ArchiveExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewArchiveExperimentDefault creates a ArchiveExperimentDefault with default headers values
func NewArchiveExperimentDefault(code int) *ArchiveExperimentDefault {
return &ArchiveExperimentDefault{
_statusCode: code,
}
}
/*ArchiveExperimentDefault handles this case with default header values.
ArchiveExperimentDefault archive experiment default
*/
type ArchiveExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the archive experiment default response
func (o *ArchiveExperimentDefault) Code() int {
return o._statusCode
}
func (o *ArchiveExperimentDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:archive][%d] ArchiveExperiment default %+v", o._statusCode, o.Payload)
}
func (o *ArchiveExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,153 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// NewCreateExperimentParams creates a new CreateExperimentParams object
// with the default values initialized.
func NewCreateExperimentParams() *CreateExperimentParams {
var ()
return &CreateExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewCreateExperimentParamsWithTimeout creates a new CreateExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewCreateExperimentParamsWithTimeout(timeout time.Duration) *CreateExperimentParams {
var ()
return &CreateExperimentParams{
timeout: timeout,
}
}
// NewCreateExperimentParamsWithContext creates a new CreateExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewCreateExperimentParamsWithContext(ctx context.Context) *CreateExperimentParams {
var ()
return &CreateExperimentParams{
Context: ctx,
}
}
// NewCreateExperimentParamsWithHTTPClient creates a new CreateExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewCreateExperimentParamsWithHTTPClient(client *http.Client) *CreateExperimentParams {
var ()
return &CreateExperimentParams{
HTTPClient: client,
}
}
/*CreateExperimentParams contains all the parameters to send to the API endpoint
for the create experiment operation typically these are written to a http.Request
*/
type CreateExperimentParams struct {
/*Body
The experiment to be created.
*/
Body *experiment_model.APIExperiment
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the create experiment params
func (o *CreateExperimentParams) WithTimeout(timeout time.Duration) *CreateExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the create experiment params
func (o *CreateExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the create experiment params
func (o *CreateExperimentParams) WithContext(ctx context.Context) *CreateExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the create experiment params
func (o *CreateExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the create experiment params
func (o *CreateExperimentParams) WithHTTPClient(client *http.Client) *CreateExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the create experiment params
func (o *CreateExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithBody adds the body to the create experiment params
func (o *CreateExperimentParams) WithBody(body *experiment_model.APIExperiment) *CreateExperimentParams {
o.SetBody(body)
return o
}
// SetBody adds the body to the create experiment params
func (o *CreateExperimentParams) SetBody(body *experiment_model.APIExperiment) {
o.Body = body
}
// WriteToRequest writes these params to a swagger request
func (o *CreateExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Body != nil {
if err := r.SetBodyParam(o.Body); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,126 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// CreateExperimentReader is a Reader for the CreateExperiment structure.
type CreateExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *CreateExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewCreateExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewCreateExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewCreateExperimentOK creates a CreateExperimentOK with default headers values
func NewCreateExperimentOK() *CreateExperimentOK {
return &CreateExperimentOK{}
}
/*CreateExperimentOK handles this case with default header values.
A successful response.
*/
type CreateExperimentOK struct {
Payload *experiment_model.APIExperiment
}
func (o *CreateExperimentOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments][%d] createExperimentOK %+v", 200, o.Payload)
}
func (o *CreateExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIExperiment)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCreateExperimentDefault creates a CreateExperimentDefault with default headers values
func NewCreateExperimentDefault(code int) *CreateExperimentDefault {
return &CreateExperimentDefault{
_statusCode: code,
}
}
/*CreateExperimentDefault handles this case with default header values.
CreateExperimentDefault create experiment default
*/
type CreateExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the create experiment default response
func (o *CreateExperimentDefault) Code() int {
return o._statusCode
}
func (o *CreateExperimentDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments][%d] CreateExperiment default %+v", o._statusCode, o.Payload)
}
func (o *CreateExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,150 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewDeleteExperimentParams creates a new DeleteExperimentParams object
// with the default values initialized.
func NewDeleteExperimentParams() *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewDeleteExperimentParamsWithTimeout creates a new DeleteExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewDeleteExperimentParamsWithTimeout(timeout time.Duration) *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
timeout: timeout,
}
}
// NewDeleteExperimentParamsWithContext creates a new DeleteExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewDeleteExperimentParamsWithContext(ctx context.Context) *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
Context: ctx,
}
}
// NewDeleteExperimentParamsWithHTTPClient creates a new DeleteExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewDeleteExperimentParamsWithHTTPClient(client *http.Client) *DeleteExperimentParams {
var ()
return &DeleteExperimentParams{
HTTPClient: client,
}
}
/*DeleteExperimentParams contains all the parameters to send to the API endpoint
for the delete experiment operation typically these are written to a http.Request
*/
type DeleteExperimentParams struct {
/*ID
The ID of the experiment to be deleted.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the delete experiment params
func (o *DeleteExperimentParams) WithTimeout(timeout time.Duration) *DeleteExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the delete experiment params
func (o *DeleteExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the delete experiment params
func (o *DeleteExperimentParams) WithContext(ctx context.Context) *DeleteExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the delete experiment params
func (o *DeleteExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the delete experiment params
func (o *DeleteExperimentParams) WithHTTPClient(client *http.Client) *DeleteExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the delete experiment params
func (o *DeleteExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the delete experiment params
func (o *DeleteExperimentParams) WithID(id string) *DeleteExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the delete experiment params
func (o *DeleteExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *DeleteExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,124 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// DeleteExperimentReader is a Reader for the DeleteExperiment structure.
type DeleteExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DeleteExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDeleteExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewDeleteExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewDeleteExperimentOK creates a DeleteExperimentOK with default headers values
func NewDeleteExperimentOK() *DeleteExperimentOK {
return &DeleteExperimentOK{}
}
/*DeleteExperimentOK handles this case with default header values.
A successful response.
*/
type DeleteExperimentOK struct {
Payload interface{}
}
func (o *DeleteExperimentOK) Error() string {
return fmt.Sprintf("[DELETE /apis/v1beta1/experiments/{id}][%d] deleteExperimentOK %+v", 200, o.Payload)
}
func (o *DeleteExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewDeleteExperimentDefault creates a DeleteExperimentDefault with default headers values
func NewDeleteExperimentDefault(code int) *DeleteExperimentDefault {
return &DeleteExperimentDefault{
_statusCode: code,
}
}
/*DeleteExperimentDefault handles this case with default header values.
DeleteExperimentDefault delete experiment default
*/
type DeleteExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the delete experiment default response
func (o *DeleteExperimentDefault) Code() int {
return o._statusCode
}
func (o *DeleteExperimentDefault) Error() string {
return fmt.Sprintf("[DELETE /apis/v1beta1/experiments/{id}][%d] DeleteExperiment default %+v", o._statusCode, o.Payload)
}
func (o *DeleteExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,218 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// New creates a new experiment service API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client {
return &Client{transport: transport, formats: formats}
}
/*
Client for experiment service API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
/*
ArchiveExperiment archives an experiment and the experiment s runs and jobs
*/
func (a *Client) ArchiveExperiment(params *ArchiveExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*ArchiveExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewArchiveExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "ArchiveExperiment",
Method: "POST",
PathPattern: "/apis/v1beta1/experiments/{id}:archive",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &ArchiveExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*ArchiveExperimentOK), nil
}
/*
CreateExperiment creates a new experiment
*/
func (a *Client) CreateExperiment(params *CreateExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*CreateExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "CreateExperiment",
Method: "POST",
PathPattern: "/apis/v1beta1/experiments",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &CreateExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*CreateExperimentOK), nil
}
/*
DeleteExperiment deletes an experiment without deleting the experiment s runs and jobs to avoid unexpected behaviors delete an experiment s runs and jobs before deleting the experiment
*/
func (a *Client) DeleteExperiment(params *DeleteExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*DeleteExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewDeleteExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "DeleteExperiment",
Method: "DELETE",
PathPattern: "/apis/v1beta1/experiments/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &DeleteExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*DeleteExperimentOK), nil
}
/*
GetExperiment finds a specific experiment by ID
*/
func (a *Client) GetExperiment(params *GetExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*GetExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewGetExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "GetExperiment",
Method: "GET",
PathPattern: "/apis/v1beta1/experiments/{id}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &GetExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*GetExperimentOK), nil
}
/*
ListExperiment finds all experiments supports pagination and sorting on certain fields
*/
func (a *Client) ListExperiment(params *ListExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*ListExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewListExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "ListExperiment",
Method: "GET",
PathPattern: "/apis/v1beta1/experiments",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &ListExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*ListExperimentOK), nil
}
/*
UnarchiveExperiment restores an archived experiment the experiment s archived runs and jobs will stay archived
*/
func (a *Client) UnarchiveExperiment(params *UnarchiveExperimentParams, authInfo runtime.ClientAuthInfoWriter) (*UnarchiveExperimentOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewUnarchiveExperimentParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "UnarchiveExperiment",
Method: "POST",
PathPattern: "/apis/v1beta1/experiments/{id}:unarchive",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"http", "https"},
Params: params,
Reader: &UnarchiveExperimentReader{formats: a.formats},
AuthInfo: authInfo,
Context: params.Context,
Client: params.HTTPClient,
})
if err != nil {
return nil, err
}
return result.(*UnarchiveExperimentOK), nil
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}

View File

@ -1,150 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetExperimentParams creates a new GetExperimentParams object
// with the default values initialized.
func NewGetExperimentParams() *GetExperimentParams {
var ()
return &GetExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetExperimentParamsWithTimeout creates a new GetExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewGetExperimentParamsWithTimeout(timeout time.Duration) *GetExperimentParams {
var ()
return &GetExperimentParams{
timeout: timeout,
}
}
// NewGetExperimentParamsWithContext creates a new GetExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewGetExperimentParamsWithContext(ctx context.Context) *GetExperimentParams {
var ()
return &GetExperimentParams{
Context: ctx,
}
}
// NewGetExperimentParamsWithHTTPClient creates a new GetExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewGetExperimentParamsWithHTTPClient(client *http.Client) *GetExperimentParams {
var ()
return &GetExperimentParams{
HTTPClient: client,
}
}
/*GetExperimentParams contains all the parameters to send to the API endpoint
for the get experiment operation typically these are written to a http.Request
*/
type GetExperimentParams struct {
/*ID
The ID of the experiment to be retrieved.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the get experiment params
func (o *GetExperimentParams) WithTimeout(timeout time.Duration) *GetExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get experiment params
func (o *GetExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get experiment params
func (o *GetExperimentParams) WithContext(ctx context.Context) *GetExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get experiment params
func (o *GetExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get experiment params
func (o *GetExperimentParams) WithHTTPClient(client *http.Client) *GetExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get experiment params
func (o *GetExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the get experiment params
func (o *GetExperimentParams) WithID(id string) *GetExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the get experiment params
func (o *GetExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *GetExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,126 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// GetExperimentReader is a Reader for the GetExperiment structure.
type GetExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewGetExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGetExperimentOK creates a GetExperimentOK with default headers values
func NewGetExperimentOK() *GetExperimentOK {
return &GetExperimentOK{}
}
/*GetExperimentOK handles this case with default header values.
A successful response.
*/
type GetExperimentOK struct {
Payload *experiment_model.APIExperiment
}
func (o *GetExperimentOK) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments/{id}][%d] getExperimentOK %+v", 200, o.Payload)
}
func (o *GetExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIExperiment)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetExperimentDefault creates a GetExperimentDefault with default headers values
func NewGetExperimentDefault(code int) *GetExperimentDefault {
return &GetExperimentDefault{
_statusCode: code,
}
}
/*GetExperimentDefault handles this case with default header values.
GetExperimentDefault get experiment default
*/
type GetExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the get experiment default response
func (o *GetExperimentDefault) Code() int {
return o._statusCode
}
func (o *GetExperimentDefault) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments/{id}][%d] GetExperiment default %+v", o._statusCode, o.Payload)
}
func (o *GetExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,341 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
"github.com/go-openapi/swag"
strfmt "github.com/go-openapi/strfmt"
)
// NewListExperimentParams creates a new ListExperimentParams object
// with the default values initialized.
func NewListExperimentParams() *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
timeout: cr.DefaultTimeout,
}
}
// NewListExperimentParamsWithTimeout creates a new ListExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewListExperimentParamsWithTimeout(timeout time.Duration) *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
timeout: timeout,
}
}
// NewListExperimentParamsWithContext creates a new ListExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewListExperimentParamsWithContext(ctx context.Context) *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
Context: ctx,
}
}
// NewListExperimentParamsWithHTTPClient creates a new ListExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewListExperimentParamsWithHTTPClient(client *http.Client) *ListExperimentParams {
var (
resourceReferenceKeyTypeDefault = string("UNKNOWN_RESOURCE_TYPE")
)
return &ListExperimentParams{
ResourceReferenceKeyType: &resourceReferenceKeyTypeDefault,
HTTPClient: client,
}
}
/*ListExperimentParams contains all the parameters to send to the API endpoint
for the list experiment operation typically these are written to a http.Request
*/
type ListExperimentParams struct {
/*Filter
A url-encoded, JSON-serialized Filter protocol buffer (see
[filter.proto](https://github.com/kubeflow/pipelines/
blob/master/backend/api/filter.proto)).
*/
Filter *string
/*PageSize
The number of experiments to be listed per page. If there are more
experiments than this number, the response message will contain a
nextPageToken field you can use to fetch the next page.
*/
PageSize *int32
/*PageToken
A page token to request the next page of results. The token is acquried
from the nextPageToken field of the response from the previous
ListExperiment call or can be omitted when fetching the first page.
*/
PageToken *string
/*ResourceReferenceKeyID
The ID of the resource that referred to.
*/
ResourceReferenceKeyID *string
/*ResourceReferenceKeyType
The type of the resource that referred to.
*/
ResourceReferenceKeyType *string
/*SortBy
Can be format of "field_name", "field_name asc" or "field_name desc"
Ascending by default.
*/
SortBy *string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the list experiment params
func (o *ListExperimentParams) WithTimeout(timeout time.Duration) *ListExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the list experiment params
func (o *ListExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the list experiment params
func (o *ListExperimentParams) WithContext(ctx context.Context) *ListExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the list experiment params
func (o *ListExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the list experiment params
func (o *ListExperimentParams) WithHTTPClient(client *http.Client) *ListExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the list experiment params
func (o *ListExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithFilter adds the filter to the list experiment params
func (o *ListExperimentParams) WithFilter(filter *string) *ListExperimentParams {
o.SetFilter(filter)
return o
}
// SetFilter adds the filter to the list experiment params
func (o *ListExperimentParams) SetFilter(filter *string) {
o.Filter = filter
}
// WithPageSize adds the pageSize to the list experiment params
func (o *ListExperimentParams) WithPageSize(pageSize *int32) *ListExperimentParams {
o.SetPageSize(pageSize)
return o
}
// SetPageSize adds the pageSize to the list experiment params
func (o *ListExperimentParams) SetPageSize(pageSize *int32) {
o.PageSize = pageSize
}
// WithPageToken adds the pageToken to the list experiment params
func (o *ListExperimentParams) WithPageToken(pageToken *string) *ListExperimentParams {
o.SetPageToken(pageToken)
return o
}
// SetPageToken adds the pageToken to the list experiment params
func (o *ListExperimentParams) SetPageToken(pageToken *string) {
o.PageToken = pageToken
}
// WithResourceReferenceKeyID adds the resourceReferenceKeyID to the list experiment params
func (o *ListExperimentParams) WithResourceReferenceKeyID(resourceReferenceKeyID *string) *ListExperimentParams {
o.SetResourceReferenceKeyID(resourceReferenceKeyID)
return o
}
// SetResourceReferenceKeyID adds the resourceReferenceKeyId to the list experiment params
func (o *ListExperimentParams) SetResourceReferenceKeyID(resourceReferenceKeyID *string) {
o.ResourceReferenceKeyID = resourceReferenceKeyID
}
// WithResourceReferenceKeyType adds the resourceReferenceKeyType to the list experiment params
func (o *ListExperimentParams) WithResourceReferenceKeyType(resourceReferenceKeyType *string) *ListExperimentParams {
o.SetResourceReferenceKeyType(resourceReferenceKeyType)
return o
}
// SetResourceReferenceKeyType adds the resourceReferenceKeyType to the list experiment params
func (o *ListExperimentParams) SetResourceReferenceKeyType(resourceReferenceKeyType *string) {
o.ResourceReferenceKeyType = resourceReferenceKeyType
}
// WithSortBy adds the sortBy to the list experiment params
func (o *ListExperimentParams) WithSortBy(sortBy *string) *ListExperimentParams {
o.SetSortBy(sortBy)
return o
}
// SetSortBy adds the sortBy to the list experiment params
func (o *ListExperimentParams) SetSortBy(sortBy *string) {
o.SortBy = sortBy
}
// WriteToRequest writes these params to a swagger request
func (o *ListExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Filter != nil {
// query param filter
var qrFilter string
if o.Filter != nil {
qrFilter = *o.Filter
}
qFilter := qrFilter
if qFilter != "" {
if err := r.SetQueryParam("filter", qFilter); err != nil {
return err
}
}
}
if o.PageSize != nil {
// query param page_size
var qrPageSize int32
if o.PageSize != nil {
qrPageSize = *o.PageSize
}
qPageSize := swag.FormatInt32(qrPageSize)
if qPageSize != "" {
if err := r.SetQueryParam("page_size", qPageSize); err != nil {
return err
}
}
}
if o.PageToken != nil {
// query param page_token
var qrPageToken string
if o.PageToken != nil {
qrPageToken = *o.PageToken
}
qPageToken := qrPageToken
if qPageToken != "" {
if err := r.SetQueryParam("page_token", qPageToken); err != nil {
return err
}
}
}
if o.ResourceReferenceKeyID != nil {
// query param resource_reference_key.id
var qrResourceReferenceKeyID string
if o.ResourceReferenceKeyID != nil {
qrResourceReferenceKeyID = *o.ResourceReferenceKeyID
}
qResourceReferenceKeyID := qrResourceReferenceKeyID
if qResourceReferenceKeyID != "" {
if err := r.SetQueryParam("resource_reference_key.id", qResourceReferenceKeyID); err != nil {
return err
}
}
}
if o.ResourceReferenceKeyType != nil {
// query param resource_reference_key.type
var qrResourceReferenceKeyType string
if o.ResourceReferenceKeyType != nil {
qrResourceReferenceKeyType = *o.ResourceReferenceKeyType
}
qResourceReferenceKeyType := qrResourceReferenceKeyType
if qResourceReferenceKeyType != "" {
if err := r.SetQueryParam("resource_reference_key.type", qResourceReferenceKeyType); err != nil {
return err
}
}
}
if o.SortBy != nil {
// query param sort_by
var qrSortBy string
if o.SortBy != nil {
qrSortBy = *o.SortBy
}
qSortBy := qrSortBy
if qSortBy != "" {
if err := r.SetQueryParam("sort_by", qSortBy); err != nil {
return err
}
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,126 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// ListExperimentReader is a Reader for the ListExperiment structure.
type ListExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *ListExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewListExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewListExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewListExperimentOK creates a ListExperimentOK with default headers values
func NewListExperimentOK() *ListExperimentOK {
return &ListExperimentOK{}
}
/*ListExperimentOK handles this case with default header values.
A successful response.
*/
type ListExperimentOK struct {
Payload *experiment_model.APIListExperimentsResponse
}
func (o *ListExperimentOK) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments][%d] listExperimentOK %+v", 200, o.Payload)
}
func (o *ListExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIListExperimentsResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewListExperimentDefault creates a ListExperimentDefault with default headers values
func NewListExperimentDefault(code int) *ListExperimentDefault {
return &ListExperimentDefault{
_statusCode: code,
}
}
/*ListExperimentDefault handles this case with default header values.
ListExperimentDefault list experiment default
*/
type ListExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the list experiment default response
func (o *ListExperimentDefault) Code() int {
return o._statusCode
}
func (o *ListExperimentDefault) Error() string {
return fmt.Sprintf("[GET /apis/v1beta1/experiments][%d] ListExperiment default %+v", o._statusCode, o.Payload)
}
func (o *ListExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,150 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewUnarchiveExperimentParams creates a new UnarchiveExperimentParams object
// with the default values initialized.
func NewUnarchiveExperimentParams() *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
timeout: cr.DefaultTimeout,
}
}
// NewUnarchiveExperimentParamsWithTimeout creates a new UnarchiveExperimentParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewUnarchiveExperimentParamsWithTimeout(timeout time.Duration) *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
timeout: timeout,
}
}
// NewUnarchiveExperimentParamsWithContext creates a new UnarchiveExperimentParams object
// with the default values initialized, and the ability to set a context for a request
func NewUnarchiveExperimentParamsWithContext(ctx context.Context) *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
Context: ctx,
}
}
// NewUnarchiveExperimentParamsWithHTTPClient creates a new UnarchiveExperimentParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewUnarchiveExperimentParamsWithHTTPClient(client *http.Client) *UnarchiveExperimentParams {
var ()
return &UnarchiveExperimentParams{
HTTPClient: client,
}
}
/*UnarchiveExperimentParams contains all the parameters to send to the API endpoint
for the unarchive experiment operation typically these are written to a http.Request
*/
type UnarchiveExperimentParams struct {
/*ID
The ID of the experiment to be restored.
*/
ID string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithTimeout(timeout time.Duration) *UnarchiveExperimentParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithContext(ctx context.Context) *UnarchiveExperimentParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithHTTPClient(client *http.Client) *UnarchiveExperimentParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithID adds the id to the unarchive experiment params
func (o *UnarchiveExperimentParams) WithID(id string) *UnarchiveExperimentParams {
o.SetID(id)
return o
}
// SetID adds the id to the unarchive experiment params
func (o *UnarchiveExperimentParams) SetID(id string) {
o.ID = id
}
// WriteToRequest writes these params to a swagger request
func (o *UnarchiveExperimentParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param id
if err := r.SetPathParam("id", o.ID); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,124 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
experiment_model "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model"
)
// UnarchiveExperimentReader is a Reader for the UnarchiveExperiment structure.
type UnarchiveExperimentReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *UnarchiveExperimentReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewUnarchiveExperimentOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewUnarchiveExperimentDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewUnarchiveExperimentOK creates a UnarchiveExperimentOK with default headers values
func NewUnarchiveExperimentOK() *UnarchiveExperimentOK {
return &UnarchiveExperimentOK{}
}
/*UnarchiveExperimentOK handles this case with default header values.
A successful response.
*/
type UnarchiveExperimentOK struct {
Payload interface{}
}
func (o *UnarchiveExperimentOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:unarchive][%d] unarchiveExperimentOK %+v", 200, o.Payload)
}
func (o *UnarchiveExperimentOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewUnarchiveExperimentDefault creates a UnarchiveExperimentDefault with default headers values
func NewUnarchiveExperimentDefault(code int) *UnarchiveExperimentDefault {
return &UnarchiveExperimentDefault{
_statusCode: code,
}
}
/*UnarchiveExperimentDefault handles this case with default header values.
UnarchiveExperimentDefault unarchive experiment default
*/
type UnarchiveExperimentDefault struct {
_statusCode int
Payload *experiment_model.APIStatus
}
// Code gets the status code for the unarchive experiment default response
func (o *UnarchiveExperimentDefault) Code() int {
return o._statusCode
}
func (o *UnarchiveExperimentDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/experiments/{id}:unarchive][%d] UnarchiveExperiment default %+v", o._statusCode, o.Payload)
}
func (o *UnarchiveExperimentDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(experiment_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

View File

@ -1,24 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"api_experiment.go",
"api_list_experiments_response.go",
"api_relationship.go",
"api_resource_key.go",
"api_resource_reference.go",
"api_resource_type.go",
"api_status.go",
"experiment_storage_state.go",
"protobuf_any.go",
],
importpath = "github.com/kubeflow/pipelines/backend/api/go_http_client/experiment_model",
visibility = ["//visibility:public"],
deps = [
"@com_github_go_openapi_errors//:go_default_library",
"@com_github_go_openapi_strfmt//:go_default_library",
"@com_github_go_openapi_swag//:go_default_library",
"@com_github_go_openapi_validate//:go_default_library",
],
)

View File

@ -1,149 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// APIExperiment api experiment
// swagger:model apiExperiment
type APIExperiment struct {
// Output. The time that the experiment created.
// Format: date-time
CreatedAt strfmt.DateTime `json:"created_at,omitempty"`
// Optional input field. Describing the purpose of the experiment
Description string `json:"description,omitempty"`
// Output. Unique experiment ID. Generated by API server.
ID string `json:"id,omitempty"`
// Required input field. Unique experiment name provided by user.
Name string `json:"name,omitempty"`
// Optional input field. Specify which resource this run belongs to.
// For Experiment, the only valid resource reference is a single Namespace.
ResourceReferences []*APIResourceReference `json:"resource_references"`
// Output. Specifies whether this experiment is in archived or available state.
StorageState ExperimentStorageState `json:"storage_state,omitempty"`
}
// Validate validates this api experiment
func (m *APIExperiment) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCreatedAt(formats); err != nil {
res = append(res, err)
}
if err := m.validateResourceReferences(formats); err != nil {
res = append(res, err)
}
if err := m.validateStorageState(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIExperiment) validateCreatedAt(formats strfmt.Registry) error {
if swag.IsZero(m.CreatedAt) { // not required
return nil
}
if err := validate.FormatOf("created_at", "body", "date-time", m.CreatedAt.String(), formats); err != nil {
return err
}
return nil
}
func (m *APIExperiment) validateResourceReferences(formats strfmt.Registry) error {
if swag.IsZero(m.ResourceReferences) { // not required
return nil
}
for i := 0; i < len(m.ResourceReferences); i++ {
if swag.IsZero(m.ResourceReferences[i]) { // not required
continue
}
if m.ResourceReferences[i] != nil {
if err := m.ResourceReferences[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("resource_references" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *APIExperiment) validateStorageState(formats strfmt.Registry) error {
if swag.IsZero(m.StorageState) { // not required
return nil
}
if err := m.StorageState.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("storage_state")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *APIExperiment) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIExperiment) UnmarshalBinary(b []byte) error {
var res APIExperiment
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,100 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIListExperimentsResponse api list experiments response
// swagger:model apiListExperimentsResponse
type APIListExperimentsResponse struct {
// A list of experiments returned.
Experiments []*APIExperiment `json:"experiments"`
// The token to list the next page of experiments.
NextPageToken string `json:"next_page_token,omitempty"`
// The total number of experiments for the given query.
TotalSize int32 `json:"total_size,omitempty"`
}
// Validate validates this api list experiments response
func (m *APIListExperimentsResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateExperiments(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIListExperimentsResponse) validateExperiments(formats strfmt.Registry) error {
if swag.IsZero(m.Experiments) { // not required
return nil
}
for i := 0; i < len(m.Experiments); i++ {
if swag.IsZero(m.Experiments[i]) { // not required
continue
}
if m.Experiments[i] != nil {
if err := m.Experiments[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("experiments" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *APIListExperimentsResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIListExperimentsResponse) UnmarshalBinary(b []byte) error {
var res APIListExperimentsResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,80 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// APIRelationship api relationship
// swagger:model apiRelationship
type APIRelationship string
const (
// APIRelationshipUNKNOWNRELATIONSHIP captures enum value "UNKNOWN_RELATIONSHIP"
APIRelationshipUNKNOWNRELATIONSHIP APIRelationship = "UNKNOWN_RELATIONSHIP"
// APIRelationshipOWNER captures enum value "OWNER"
APIRelationshipOWNER APIRelationship = "OWNER"
// APIRelationshipCREATOR captures enum value "CREATOR"
APIRelationshipCREATOR APIRelationship = "CREATOR"
)
// for schema
var apiRelationshipEnum []interface{}
func init() {
var res []APIRelationship
if err := json.Unmarshal([]byte(`["UNKNOWN_RELATIONSHIP","OWNER","CREATOR"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
apiRelationshipEnum = append(apiRelationshipEnum, v)
}
}
func (m APIRelationship) validateAPIRelationshipEnum(path, location string, value APIRelationship) error {
if err := validate.Enum(path, location, value, apiRelationshipEnum); err != nil {
return err
}
return nil
}
// Validate validates this api relationship
func (m APIRelationship) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAPIRelationshipEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,86 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIResourceKey api resource key
// swagger:model apiResourceKey
type APIResourceKey struct {
// The ID of the resource that referred to.
ID string `json:"id,omitempty"`
// The type of the resource that referred to.
Type APIResourceType `json:"type,omitempty"`
}
// Validate validates this api resource key
func (m *APIResourceKey) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIResourceKey) validateType(formats strfmt.Registry) error {
if swag.IsZero(m.Type) { // not required
return nil
}
if err := m.Type.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("type")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *APIResourceKey) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIResourceKey) UnmarshalBinary(b []byte) error {
var res APIResourceKey
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,111 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIResourceReference api resource reference
// swagger:model apiResourceReference
type APIResourceReference struct {
// key
Key *APIResourceKey `json:"key,omitempty"`
// The name of the resource that referred to.
Name string `json:"name,omitempty"`
// Required field. The relationship from referred resource to the object.
Relationship APIRelationship `json:"relationship,omitempty"`
}
// Validate validates this api resource reference
func (m *APIResourceReference) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateKey(formats); err != nil {
res = append(res, err)
}
if err := m.validateRelationship(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIResourceReference) validateKey(formats strfmt.Registry) error {
if swag.IsZero(m.Key) { // not required
return nil
}
if m.Key != nil {
if err := m.Key.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("key")
}
return err
}
}
return nil
}
func (m *APIResourceReference) validateRelationship(formats strfmt.Registry) error {
if swag.IsZero(m.Relationship) { // not required
return nil
}
if err := m.Relationship.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("relationship")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *APIResourceReference) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIResourceReference) UnmarshalBinary(b []byte) error {
var res APIResourceReference
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,89 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// APIResourceType api resource type
// swagger:model apiResourceType
type APIResourceType string
const (
// APIResourceTypeUNKNOWNRESOURCETYPE captures enum value "UNKNOWN_RESOURCE_TYPE"
APIResourceTypeUNKNOWNRESOURCETYPE APIResourceType = "UNKNOWN_RESOURCE_TYPE"
// APIResourceTypeEXPERIMENT captures enum value "EXPERIMENT"
APIResourceTypeEXPERIMENT APIResourceType = "EXPERIMENT"
// APIResourceTypeJOB captures enum value "JOB"
APIResourceTypeJOB APIResourceType = "JOB"
// APIResourceTypePIPELINE captures enum value "PIPELINE"
APIResourceTypePIPELINE APIResourceType = "PIPELINE"
// APIResourceTypePIPELINEVERSION captures enum value "PIPELINE_VERSION"
APIResourceTypePIPELINEVERSION APIResourceType = "PIPELINE_VERSION"
// APIResourceTypeNAMESPACE captures enum value "NAMESPACE"
APIResourceTypeNAMESPACE APIResourceType = "NAMESPACE"
)
// for schema
var apiResourceTypeEnum []interface{}
func init() {
var res []APIResourceType
if err := json.Unmarshal([]byte(`["UNKNOWN_RESOURCE_TYPE","EXPERIMENT","JOB","PIPELINE","PIPELINE_VERSION","NAMESPACE"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
apiResourceTypeEnum = append(apiResourceTypeEnum, v)
}
}
func (m APIResourceType) validateAPIResourceTypeEnum(path, location string, value APIResourceType) error {
if err := validate.Enum(path, location, value, apiResourceTypeEnum); err != nil {
return err
}
return nil
}
// Validate validates this api resource type
func (m APIResourceType) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateAPIResourceTypeEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,100 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// APIStatus api status
// swagger:model apiStatus
type APIStatus struct {
// code
Code int32 `json:"code,omitempty"`
// details
Details []*ProtobufAny `json:"details"`
// error
Error string `json:"error,omitempty"`
}
// Validate validates this api status
func (m *APIStatus) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDetails(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *APIStatus) validateDetails(formats strfmt.Registry) error {
if swag.IsZero(m.Details) { // not required
return nil
}
for i := 0; i < len(m.Details); i++ {
if swag.IsZero(m.Details[i]) { // not required
continue
}
if m.Details[i] != nil {
if err := m.Details[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("details" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *APIStatus) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *APIStatus) UnmarshalBinary(b []byte) error {
var res APIStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,80 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// ExperimentStorageState experiment storage state
// swagger:model ExperimentStorageState
type ExperimentStorageState string
const (
// ExperimentStorageStateSTORAGESTATEUNSPECIFIED captures enum value "STORAGESTATE_UNSPECIFIED"
ExperimentStorageStateSTORAGESTATEUNSPECIFIED ExperimentStorageState = "STORAGESTATE_UNSPECIFIED"
// ExperimentStorageStateSTORAGESTATEAVAILABLE captures enum value "STORAGESTATE_AVAILABLE"
ExperimentStorageStateSTORAGESTATEAVAILABLE ExperimentStorageState = "STORAGESTATE_AVAILABLE"
// ExperimentStorageStateSTORAGESTATEARCHIVED captures enum value "STORAGESTATE_ARCHIVED"
ExperimentStorageStateSTORAGESTATEARCHIVED ExperimentStorageState = "STORAGESTATE_ARCHIVED"
)
// for schema
var experimentStorageStateEnum []interface{}
func init() {
var res []ExperimentStorageState
if err := json.Unmarshal([]byte(`["STORAGESTATE_UNSPECIFIED","STORAGESTATE_AVAILABLE","STORAGESTATE_ARCHIVED"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
experimentStorageStateEnum = append(experimentStorageStateEnum, v)
}
}
func (m ExperimentStorageState) validateExperimentStorageStateEnum(path, location string, value ExperimentStorageState) error {
if err := validate.Enum(path, location, value, experimentStorageStateEnum); err != nil {
return err
}
return nil
}
// Validate validates this experiment storage state
func (m ExperimentStorageState) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateExperimentStorageStateEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,185 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package experiment_model
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// ProtobufAny `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
// swagger:model protobufAny
type ProtobufAny struct {
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. The last segment of the URL's path must represent
// the fully qualified name of the type (as in
// `path/google.protobuf.Duration`). The name should be in a canonical form
// (e.g., leading "." is not accepted).
//
// In practice, teams usually precompile into the binary all types that they
// expect it to use in the context of Any. However, for URLs which use the
// scheme `http`, `https`, or no scheme, one can optionally set up a type
// server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Note: this functionality is not currently available in the official
// protobuf release, and it is not used for type URLs beginning with
// type.googleapis.com.
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
TypeURL string `json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
// Format: byte
Value strfmt.Base64 `json:"value,omitempty"`
}
// Validate validates this protobuf any
func (m *ProtobufAny) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateValue(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *ProtobufAny) validateValue(formats strfmt.Registry) error {
if swag.IsZero(m.Value) { // not required
return nil
}
// Format "byte" (base64 string) is already validated when unmarshalled
return nil
}
// MarshalBinary interface implementation
func (m *ProtobufAny) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ProtobufAny) UnmarshalBinary(b []byte) error {
var res ProtobufAny
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}

View File

@ -1,14 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["job_client.go"],
importpath = "github.com/kubeflow/pipelines/backend/api/go_http_client/job_client",
visibility = ["//visibility:public"],
deps = [
"//backend/api/go_http_client/job_client/job_service:go_default_library",
"@com_github_go_openapi_runtime//:go_default_library",
"@com_github_go_openapi_runtime//client:go_default_library",
"@com_github_go_openapi_strfmt//:go_default_library",
],
)

View File

@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"create_job_parameters.go",
"create_job_responses.go",
"delete_job_parameters.go",
"delete_job_responses.go",
"disable_job_parameters.go",
"disable_job_responses.go",
"enable_job_parameters.go",
"enable_job_responses.go",
"get_job_parameters.go",
"get_job_responses.go",
"job_service_client.go",
"list_jobs_parameters.go",
"list_jobs_responses.go",
],
importpath = "github.com/kubeflow/pipelines/backend/api/go_http_client/job_client/job_service",
visibility = ["//visibility:public"],
deps = [
"//backend/api/go_http_client/job_model:go_default_library",
"@com_github_go_openapi_errors//:go_default_library",
"@com_github_go_openapi_runtime//:go_default_library",
"@com_github_go_openapi_runtime//client:go_default_library",
"@com_github_go_openapi_strfmt//:go_default_library",
"@com_github_go_openapi_swag//:go_default_library",
],
)

View File

@ -1,153 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// NewCreateJobParams creates a new CreateJobParams object
// with the default values initialized.
func NewCreateJobParams() *CreateJobParams {
var ()
return &CreateJobParams{
timeout: cr.DefaultTimeout,
}
}
// NewCreateJobParamsWithTimeout creates a new CreateJobParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewCreateJobParamsWithTimeout(timeout time.Duration) *CreateJobParams {
var ()
return &CreateJobParams{
timeout: timeout,
}
}
// NewCreateJobParamsWithContext creates a new CreateJobParams object
// with the default values initialized, and the ability to set a context for a request
func NewCreateJobParamsWithContext(ctx context.Context) *CreateJobParams {
var ()
return &CreateJobParams{
Context: ctx,
}
}
// NewCreateJobParamsWithHTTPClient creates a new CreateJobParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewCreateJobParamsWithHTTPClient(client *http.Client) *CreateJobParams {
var ()
return &CreateJobParams{
HTTPClient: client,
}
}
/*CreateJobParams contains all the parameters to send to the API endpoint
for the create job operation typically these are written to a http.Request
*/
type CreateJobParams struct {
/*Body
The job to be created
*/
Body *job_model.APIJob
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the create job params
func (o *CreateJobParams) WithTimeout(timeout time.Duration) *CreateJobParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the create job params
func (o *CreateJobParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the create job params
func (o *CreateJobParams) WithContext(ctx context.Context) *CreateJobParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the create job params
func (o *CreateJobParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the create job params
func (o *CreateJobParams) WithHTTPClient(client *http.Client) *CreateJobParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the create job params
func (o *CreateJobParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithBody adds the body to the create job params
func (o *CreateJobParams) WithBody(body *job_model.APIJob) *CreateJobParams {
o.SetBody(body)
return o
}
// SetBody adds the body to the create job params
func (o *CreateJobParams) SetBody(body *job_model.APIJob) {
o.Body = body
}
// WriteToRequest writes these params to a swagger request
func (o *CreateJobParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Body != nil {
if err := r.SetBodyParam(o.Body); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}

View File

@ -1,126 +0,0 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by go-swagger; DO NOT EDIT.
package job_service
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
job_model "github.com/kubeflow/pipelines/backend/api/go_http_client/job_model"
)
// CreateJobReader is a Reader for the CreateJob structure.
type CreateJobReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *CreateJobReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewCreateJobOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
result := NewCreateJobDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewCreateJobOK creates a CreateJobOK with default headers values
func NewCreateJobOK() *CreateJobOK {
return &CreateJobOK{}
}
/*CreateJobOK handles this case with default header values.
A successful response.
*/
type CreateJobOK struct {
Payload *job_model.APIJob
}
func (o *CreateJobOK) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs][%d] createJobOK %+v", 200, o.Payload)
}
func (o *CreateJobOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIJob)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewCreateJobDefault creates a CreateJobDefault with default headers values
func NewCreateJobDefault(code int) *CreateJobDefault {
return &CreateJobDefault{
_statusCode: code,
}
}
/*CreateJobDefault handles this case with default header values.
CreateJobDefault create job default
*/
type CreateJobDefault struct {
_statusCode int
Payload *job_model.APIStatus
}
// Code gets the status code for the create job default response
func (o *CreateJobDefault) Code() int {
return o._statusCode
}
func (o *CreateJobDefault) Error() string {
return fmt.Sprintf("[POST /apis/v1beta1/jobs][%d] CreateJob default %+v", o._statusCode, o.Payload)
}
func (o *CreateJobDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(job_model.APIStatus)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More