Compare commits

...

No commits in common. "v2.0.1" and "master" have entirely different histories.

5674 changed files with 421870 additions and 788575 deletions

View File

@ -1,272 +0,0 @@
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Test before submit:
# gcloud builds submit --config=.cloudbuild.yaml --substitutions=COMMIT_SHA="$(git rev-parse HEAD)" --project=ml-pipeline-test
steps:
# # Build the Python SDK
# - name: 'python:3-alpine'
# entrypoint: '/bin/sh'
# args: ['-c', 'cd /workspace/sdk/python/; python3 setup.py sdist --format=gztar; cp dist/*.tar.gz /workspace/kfp.tar.gz']
# id: 'preparePythonSDK'
# waitFor: ["-"]
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', '/workspace/kfp.tar.gz', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp.tar.gz']
# id: 'copyPythonSDK'
# waitFor: ['preparePythonSDK']
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', '/workspace/kfp.tar.gz', 'gs://$PROJECT_ID/builds/latest/kfp.tar.gz']
# id: 'copyPythonSDKToLatest'
# waitFor: ['preparePythonSDK']
# Build the pipeline system images
- name: 'gcr.io/cloud-builders/docker'
entrypoint: /bin/bash
args:
- -ceux
- |
sed -i -e "s/ARG DATE/ENV DATE \"$(date -u)\"/" /workspace/frontend/Dockerfile
docker build -t gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA \
--build-arg COMMIT_HASH=$COMMIT_SHA \
--build-arg TAG_NAME="$(cat /workspace/VERSION)" \
-f /workspace/frontend/Dockerfile \
/workspace
id: 'buildFrontend'
waitFor: ['-']
- name: 'gcr.io/cloud-builders/docker'
entrypoint: /bin/bash
args:
- -ceux
- |
docker build -t gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA \
--build-arg COMMIT_SHA=$COMMIT_SHA \
--build-arg TAG_NAME="$(cat /workspace/VERSION)" \
-f /workspace/backend/Dockerfile /workspace
id: 'buildApiServer'
waitFor: ['-']
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.scheduledworkflow', '/workspace']
id: 'buildScheduledWorkflow'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.viewercontroller', '/workspace']
id: 'buildViewerCrdController'
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.persistenceagent', '/workspace']
id: 'buildPersistenceAgent'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', '-f',
'/workspace/proxy/Dockerfile', '/workspace/proxy']
id: 'buildInverseProxyAgent'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.visualization', '/workspace']
id: 'buildVisualizationServer'
waitFor: ["-"]
- id: 'buildMetadataWriter'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/metadata_writer/Dockerfile', '/workspace']
waitFor: ["-"]
- id: 'buildCacheServer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/Dockerfile.cacheserver', '/workspace']
waitFor: ["-"]
- id: 'buildCacheDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/backend/src/cache/deployer/Dockerfile', '/workspace']
waitFor: ["-"]
# Build marketplace deployer
- id: 'buildMarketplaceDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA',
'--build-arg', 'COMMIT_HASH=$COMMIT_SHA', '-f',
'/workspace/manifests/gcp_marketplace/deployer/Dockerfile', '/workspace/manifests/gcp_marketplace']
waitFor: ["-"]
# Build the Kubeflow-based pipeline component images
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-deployer:$COMMIT_SHA',
'/workspace/components/kubeflow/deployer']
id: 'buildDeployer'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/launcher && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildTFJobLauncher'
waitFor: ["-"]
- id: 'buildCpuTrainer'
name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer -b 2.3.0']
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/kubeflow/dnntrainer && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA -l ml-pipeline-kubeflow-tf-trainer-gpu -b 2.3.0-gpu']
id: 'buildGpuTrainer'
waitFor: ["-"]
# Build the local pipeline component images
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/local/confusion_matrix && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildConfusionMatrix'
waitFor: ["-"]
- name: 'gcr.io/cloud-builders/docker'
entrypoint: '/bin/bash'
args: ['-c', 'cd /workspace/components/local/roc && ./build_image.sh -p $PROJECT_ID -t $COMMIT_SHA']
id: 'buildROC'
waitFor: ["-"]
# Build third_party images
- name: 'gcr.io/cloud-builders/docker'
args: ['build', '-t', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', '-f',
'/workspace/third_party/metadata_envoy/Dockerfile', '/workspace']
id: 'buildMetadataEnvoy'
# Pull third_party images
# ! Sync to the same MLMD version:
# * backend/metadata_writer/requirements.in and requirements.txt
# * @kubeflow/frontend/src/mlmd/generated
# * .cloudbuild.yaml and .release.cloudbuild.yaml
# * manifests/kustomize/base/metadata/base/metadata-grpc-deployment.yaml
# * test/tag_for_hosted.sh
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0']
id: 'pullMetadataServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance']
id: 'pullMinio'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/mysql:8.0.26']
id: 'pullMysql'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/cloudsql-docker/gce-proxy:1.25.0']
id: 'pullCloudsqlProxy'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance']
id: 'pullArgoExecutor'
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance']
id: 'pullArgoWorkflowController'
# V2 related images
# Prerequisite: Make sure ko image is available on the same project by running the following:
# git clone https://github.com/GoogleCloudPlatform/cloud-builders-community.git
# cd cloud-builders-community/ko
# gcloud builds submit . --config=cloudbuild.yaml --project=$PROJECT_ID
# Reference: https://dev.to/amammay/effective-go-on-gcp-lean-containers-with-ko-on-cloud-build-51ek
# Temporarily disable v2 image build due to unblock kubeflow-pipeline-mkp-test
# We aren't building v2 images for MKP at this moment anyway.
#
# - name: 'gcr.io/$PROJECT_ID/ko'
# entrypoint: /bin/sh
# args:
# - -c
# - |
# cd /workspace/backend/src/v2/
# /ko publish --bare ./cmd/launcher-v2 -t $COMMIT_SHA
# env:
# - 'KO_DOCKER_REPO=gcr.io/$PROJECT_ID/kfp-launcher'
# id: 'buildLauncher'
# waitFor: ["-"]
# - name: 'gcr.io/$PROJECT_ID/ko'
# entrypoint: /bin/sh
# args:
# - -c
# - |
# cd /workspace/backend/src/v2/
# /ko publish --bare ./cmd/driver -t $COMMIT_SHA
# env:
# - 'KO_DOCKER_REPO=gcr.io/$PROJECT_ID/kfp-driver'
# id: 'buildDriver'
# waitFor: ["-"]
# Tag for Hosted - SemVersion to Major.Minor parsing
- id: "parseMajorMinorVersion"
waitFor: ["-"]
name: gcr.io/cloud-builders/docker
entrypoint: /bin/bash
args:
- -ceux
- |
# Parse major minor version and save to a file for reusing in other steps.
# e.g. 1.0.0-rc.1 and 1.0.1 are parsed as 1.0
cat /workspace/VERSION | sed -e "s#\([0-9]\+[.][0-9]\+\)[.].*#\1#" > /workspace/mm.ver
# Tag for Hosted - Tag to hosted folder with MKP friendly name
- id: 'tagForHosted'
waitFor: ['parseMajorMinorVersion', 'buildFrontend', 'buildApiServer', 'buildScheduledWorkflow',
'buildViewerCrdController', 'buildPersistenceAgent', 'buildInverseProxyAgent', 'buildVisualizationServer',
'buildMetadataWriter', 'buildCacheServer', 'buildCacheDeployer', 'buildMetadataEnvoy',
'buildMarketplaceDeployer', 'pullMetadataServer', 'pullMinio', 'pullMysql', 'pullCloudsqlProxy',
'pullArgoExecutor', 'pullArgoWorkflowController']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
/workspace/test/tag_for_hosted.sh $PROJECT_ID $COMMIT_SHA $(cat /workspace/VERSION) $(cat /workspace/mm.ver)
images:
# Images for the pipeline system itself
- 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA'
# Images for Marketplace
- 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA'
# Images for the Kubeflow-based pipeline components
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-deployer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tf-trainer-gpu:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-kubeflow-tfjob:$COMMIT_SHA'
# Images for the local components
- 'gcr.io/$PROJECT_ID/ml-pipeline-local-confusion-matrix:$COMMIT_SHA'
- 'gcr.io/$PROJECT_ID/ml-pipeline-local-roc:$COMMIT_SHA'
# Images for the third_party components
- 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA'
timeout: '3600s'
options:
diskSizeGb: 300
machineType: 'N1_HIGHCPU_8'
tags:
- build-each-commit

View File

@ -4,4 +4,3 @@ dist
**/node_modules
backend/build
v2/build
.venv

View File

@ -1,38 +0,0 @@
---
name: ⚠️ Report Backend Bug
about: Report a Backend bug on Kubeflow Pipelines
title: "[backend] <Bug Name>"
labels: kind/bug, area/backend
---
### Environment
* How did you deploy Kubeflow Pipelines (KFP)?
<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->
* KFP version:
<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.
To find the version number, See version number shows on bottom of KFP UI left sidenav. -->
* KFP SDK version:
<!-- Specify the output of the following shell command: $pip list | grep kfp -->
### Steps to reproduce
<!--
Specify how to reproduce the problem.
This may include information such as: a description of the process, code snippets, log output, or screenshots.
-->
### Expected result
<!-- What should the correct behavior be? -->
### Materials and Reference
<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->
---
<!-- Don't delete message below to encourage users to support your issue! -->
Impacted by this bug? Give it a 👍.

View File

@ -1,36 +0,0 @@
---
name: ⚠️ Report Frontend Bug
about: Report a Frontend bug on Kubeflow Pipelines
title: "[frontend] <Bug Name>"
labels: kind/bug, area/frontend
---
### Environment
* How did you deploy Kubeflow Pipelines (KFP)?
<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->
* KFP version:
<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.
To find the version number, See version number shows on bottom of KFP UI left sidenav. -->
### Steps to reproduce
<!--
Specify how to reproduce the problem.
This may include information such as: a description of the process, code snippets, log output, or screenshots.
-->
### Expected result
<!-- What should the correct behavior be? -->
### Materials and Reference
<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->
---
<!-- Don't delete message below to encourage users to support your issue! -->
Impacted by this bug? Give it a 👍.

View File

@ -1,48 +0,0 @@
---
name: Bug Report
title: "[bug] <Bug Name>"
about: Report any other issue encountered with Kubeflow Pipelines
labels: kind/bug
---
### Environment
<!-- Please fill in those that seem relevant. -->
* How do you deploy Kubeflow Pipelines (KFP)?
<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->
* KFP version:
<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.
To find the version number, See version number shows on bottom of KFP UI left sidenav. -->
* KFP SDK version:
<!-- Specify the output of the following shell command: $pip list | grep kfp -->
### Steps to reproduce
<!--
Specify how to reproduce the problem.
This may include information such as: a description of the process, code snippets, log output, or screenshots.
-->
### Expected result
<!-- What should the correct behavior be? -->
### Materials and reference
<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->
### Labels
<!-- Please include labels below by uncommenting them to help us better triage issues -->
<!-- /area frontend -->
<!-- /area backend -->
<!-- /area sdk -->
<!-- /area testing -->
<!-- /area samples -->
<!-- /area components -->
---
<!-- Don't delete message below to encourage users to support your issue! -->
Impacted by this bug? Give it a 👍.

View File

@ -1,38 +0,0 @@
---
name: ⚠️ Report SDK Bug
about: Report a SDK bug on Kubeflow Pipelines
title: "[sdk] <Bug Name>"
labels: kind/bug, area/sdk
---
### Environment
* KFP version:
<!-- For more information, see an overview of KFP installation options: https://www.kubeflow.org/docs/pipelines/installation/overview/. -->
* KFP SDK version:
<!-- Specify the version of Kubeflow Pipelines that you are using. The version number appears in the left side navigation of user interface.
To find the version number, See version number shows on bottom of KFP UI left sidenav. -->
* All dependencies version:
<!-- Specify the output of the following shell command: $pip list | grep kfp -->
### Steps to reproduce
<!--
Specify how to reproduce the problem.
This may include information such as: a description of the process, code snippets, log output, or screenshots.
-->
### Expected result
<!-- What should the correct behavior be? -->
### Materials and Reference
<!-- Help us debug this issue by providing resources such as: sample code, background context, or links to references. -->
---
<!-- Don't delete message below to encourage users to support your issue! -->
Impacted by this bug? Give it a 👍.

View File

@ -1,34 +0,0 @@
---
name: 💡 Feature Request
about: An idea to improve Kubeflow Pipelines
title: "[feature] <Description>"
labels: kind/feature
---
### Feature Area
<!-- Uncomment the labels below which are relevant to this feature: -->
<!-- /area frontend -->
<!-- /area backend -->
<!-- /area sdk -->
<!-- /area samples -->
<!-- /area components -->
### What feature would you like to see?
<!-- Provide a description of this feature and the user experience. -->
### What is the use case or pain point?
<!-- It helps us understand the benefit of this feature for your use case. -->
### Is there a workaround currently?
<!-- Without this feature, how do you accomplish your task today? -->
---
<!-- Don't delete message below to encourage users to support your feature request! -->
Love this idea? Give it a 👍.

26
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,26 @@
---
name: Bug report
about: Tell us about a problem you are experiencing
---
/kind bug
**What steps did you take and what happened:**
[A clear and concise description of what the bug is.]
**What did you expect to happen:**
**Additional information:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
* Python Version (use `python --version`):
* SDK Version:
* Tekton Version (use `tkn version`):
* Kubernetes Version (use `kubectl version`):
* OS (e.g. from `/etc/os-release`):

View File

@ -0,0 +1,14 @@
---
name: Feature enhancement request
about: Suggest an idea for this project
---
/kind feature
**Description:**
[A clear and concise description of what your proposal. What problem does it solve?]
**Additional information:**
[Miscellaneous information that will assist in solving the issue.]

10
.github/ISSUE_TEMPLATE/questions.md vendored Normal file
View File

@ -0,0 +1,10 @@
---
name: Question
about: Ask a question about this project
---
/kind question
**Question:**
[You can ask any question about this project.]

25
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,25 @@
**Which issue is resolved by this Pull Request:**
Resolves #
**Description of your changes:**
**Environment tested:**
* Python Version (use `python --version`):
* Tekton Version (use `tkn version`):
* Kubernetes Version (use `kubectl version`):
* OS (e.g. from `/etc/os-release`):
**Checklist:**
- [ ] The title for your pull request (PR) should follow our title convention. [Learn more about the pull request title convention used in this repository](https://github.com/kubeflow/pipelines/blob/master/CONTRIBUTING.md#pull-request-title-convention).
<!--
PR titles examples:
* `fix(frontend): fixes empty page. Fixes #1234`
Use `fix` to indicate that this PR fixes a bug.
* `feat(backend): configurable service account. Fixes #1234, fixes #1235`
Use `feat` to indicate that this PR adds a new feature.
* `chore: set up changelog generation tools`
Use `chore` to indicate that this PR makes some changes that users don't need to know.
* `test: fix CI failure. Part of #1234`
Use `part of` to indicate that a PR is working on an issue, but shouldn't close the issue when merged.
-->

6
.github/issue_label_bot.yaml vendored Normal file
View File

@ -0,0 +1,6 @@
# for https://mlbot.net
label-alias:
bug: 'kind/bug'
feature_request: 'kind/feature'
feature: 'kind/feature'
question: 'kind/question'

View File

@ -1,16 +0,0 @@
**Description of your changes:**
**Checklist:**
- [ ] The title for your pull request (PR) should follow our title convention. [Learn more about the pull request title convention used in this repository](https://github.com/kubeflow/pipelines/blob/master/CONTRIBUTING.md#pull-request-title-convention).
<!--
PR titles examples:
* `fix(frontend): fixes empty page. Fixes #1234`
Use `fix` to indicate that this PR fixes a bug.
* `feat(backend): configurable service account. Fixes #1234, fixes #1235`
Use `feat` to indicate that this PR adds a new feature.
* `chore: set up changelog generation tools`
Use `chore` to indicate that this PR makes some changes that users don't need to know.
* `test: fix CI failure. Part of #1234`
Use `part of` to indicate that a PR is working on an issue, but shouldn't close the issue when merged.
-->

View File

@ -1,5 +1,4 @@
{
"enabled": false,
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:base",

View File

@ -1,83 +0,0 @@
name: KFP Tekton backend unit tests
on:
push:
branches: [v2-integration]
# Run tests for any PRs which change the backend code
pull_request:
paths:
- 'go.mod'
- 'backend/**'
- 'scripts/deploy/github/**'
- 'manifests/kustomize/**'
env:
GITHUB_ACTION: "true"
SETUPTOOLS_USE_DISTUTILS: "stdlib"
jobs:
run-go-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v3
- name: "run go unit tests"
run: go test -v -cover ./backend/...
backend-integration:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Create k8s Kind Cluster
uses: container-tools/kind-action@v2
with:
cluster_name: kfp-tekton
kubectl_version: v1.26.3
version: v0.17.0
node_image: kindest/node:v1.26.3
- name: build images
run: ./scripts/deploy/github/build-images.sh
- name: Set up Python 3.10
uses: actions/setup-python@v2
with:
python-version: '3.10'
- name: "deploy kfp-tekton"
run: ./scripts/deploy/github/deploy-kfp.sh
- name: Install sdk
run: |
python3 -m venv .venv
. .venv/bin/activate
pip install -e sdk/python
- name: "flip coin test"
run: |
. .venv/bin/activate
TEST_SCRIPT="test-flip-coin.sh" ./scripts/deploy/github/e2e-test.sh
- name: "static loop test"
run: |
. .venv/bin/activate
TEST_SCRIPT="test-static-loop.sh" ./scripts/deploy/github/e2e-test.sh
- name: "dynamic loop test"
run: |
. .venv/bin/activate
TEST_SCRIPT="test-dynamic-loop.sh" ./scripts/deploy/github/e2e-test.sh
- name: "secret as env"
run: |
. .venv/bin/activate
TEST_SCRIPT="test-secret-as-env.sh" ./scripts/deploy/github/e2e-test.sh
- name: "secret as volume"
run: |
. .venv/bin/activate
TEST_SCRIPT="test-secret-as-volume.sh" ./scripts/deploy/github/e2e-test.sh
- name: "use env"
run: |
. .venv/bin/activate
TEST_SCRIPT="test-env.sh" ./scripts/deploy/github/e2e-test.sh
- name: "use volume"
run: |
. .venv/bin/activate
TEST_SCRIPT="test-volume.sh" ./scripts/deploy/github/e2e-test.sh

View File

@ -14,7 +14,7 @@ name: "CodeQL"
on:
schedule:
# Every Friday at 19:39
- cron: '39 19 * * 5'
- cron: '39 19 * * 5'
jobs:
analyze:

View File

@ -0,0 +1,182 @@
name: KFP Tekton Unit Tests
on:
push:
branches: [master]
# Run tests for any PRs.
pull_request:
env:
GITHUB_ACTION: "true"
jobs:
python-unittest:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: Unit Tests
run: VENV=$VIRTUAL_ENV make ci_unit_test
validate-testdata:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: "Generate testdata yaml files."
run: VENV=$VIRTUAL_ENV make unit_test GENERATE_GOLDEN_YAML=True
- name: "Test only required noninled yaml files are generated."
run: make validate-generated-test-yamls
- name: "Tekton validation for testdata."
run: make validate-testdata
- name: "Validation for examples data."
run: make validate-pipelineloop-examples
progress-report:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: Install dependencies
run: python -m pip install -e sdk/python
- name: "Progress report on compiling KFP DSL test scripts"
run: VENV=$VIRTUAL_ENV make report
python-lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.8
uses: actions/setup-python@v4
with:
python-version: 3.8
- name: "Lint Python code with flake8"
run: VENV=$VIRTUAL_ENV make lint
check-license:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: "Verify source files contain the license header"
run: make check_license
check-mdtoc:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: "Verify Markdown files have current table of contents"
run: make check_mdtoc
check-doc-links:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: "Verify Markdown files have valid links"
run: make check_doc_links
run-go-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v4
- name: "run go unit tests"
run: make run-go-unittests
- name: "Verify apiserver, agent, and workflow build"
run: make build-backend
run-pipelineloop-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v4
with:
go-version: 1.19.x
- name: Checkout code
uses: actions/checkout@v4
- name: "run go pipelineLoop unit tests"
run: cd tekton-catalog/pipeline-loops && make test-all
run-v2-custom-controller-image-builds:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: changes
uses: jitterbit/get-changed-files@v1
with:
format: 'json'
- name: backend changes
id: backend-changes
run: |
readarray -t backend_files <<<"$(jq -r '.[]|select(startswith("tekton-catalog"))|select(endswith("README.md")|not)' <<<'${{ steps.changes.outputs.all }}')"
echo "::set-output name=all::"
if [[ ${#backend_files[@]} -gt 0 ]]; then
if [[ -n "${backend_files[0]}" ]]; then
echo "::set-output name=all::yes"
fi
fi
- name: build images
if: ${{ steps.backend-changes.outputs.all }}
run: make build-v2-custom-controller-images
backend-integration:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- id: changes
uses: jitterbit/get-changed-files@v1
with:
format: 'json'
- name: backend changes
id: backend-changes
run: |
readarray -t backend_files <<<"$(jq -r '.[]|select(startswith("backend") or startswith("go.mod"))|select(endswith("README.md")|not)' <<<'${{ steps.changes.outputs.all }}')"
echo "::set-output name=all::"
if [[ ${#backend_files[@]} -gt 0 ]]; then
if [[ -n "${backend_files[0]}" ]]; then
echo "::set-output name=all::yes"
fi
fi
- name: Create k8s Kind Cluster
if: ${{ steps.backend-changes.outputs.all }}
uses: container-tools/kind-action@v2
with:
cluster_name: kfp-tekton
kubectl_version: v1.26.1
version: v0.17.0
node_image: kindest/node:v1.26.0
- name: build images
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/build-images.sh
- name: Set up Python 3.9
if: ${{ steps.backend-changes.outputs.all }}
uses: actions/setup-python@v4
with:
python-version: 3.9
- name: "deploy kfp-tekton"
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/deploy-kfp.sh
- name: Install sdk
if: ${{ steps.backend-changes.outputs.all }}
run: python -m pip install -e sdk/python
- name: "flip coin test"
if: ${{ steps.backend-changes.outputs.all }}
run: ./scripts/deploy/github/e2e-test.sh

View File

@ -1,30 +0,0 @@
name: KFP Tekton pipelineloop unit tests
on:
push:
branches: [v2-integration]
# Run tests for any PRs.
pull_request:
paths:
- 'tekton-catalog/pipeline-loops/go.mod'
- 'tekton-catalog/pipeline-loops/cmd/**'
- 'tekton-catalog/pipeline-loops/pkg/**'
- 'tekton-catalog/pipeline-loops/test/**'
env:
GITHUB_ACTION: "true"
SETUPTOOLS_USE_DISTUTILS: "stdlib"
jobs:
run-pipelineloop-unittests:
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.20.x
- name: Checkout code
uses: actions/checkout@v2
- name: "run go pipelineLoop unit tests"
run: cd tekton-catalog/pipeline-loops && make test-all

44
.gitignore vendored
View File

@ -1,6 +1,30 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
*.pyc
bin
*.tar.gz
*.ipynb_checkpoints
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# Python dev
.mypy_cache
.venv/
*.egg-info/
# Logs
logs
*.log
npm-debug.log*
# JS Sourcemaps
@ -33,19 +57,19 @@ frontend/test/ui/visual-regression/screenshots/screen
.idea/
.ijwb/
*.iml
*.swp
*.swo
*~
# Merge files
*.orig
*.pyc
# Operating system generated files
.DS_Store
build
.ipynb_checkpoints
*.egg-info
# go vendor
vendor
tekton-catalog/pipeline-loops/go.sum
# Go module cache
backend/pkg/mod/cache
@ -56,15 +80,21 @@ bazel-*
# VSCode
.vscode
# Local or temporary build files
build/
sdk/samples/*.yaml
temp/
# test yaml
sdk/python/tests/compiler/pipeline.yaml
sdk/python/tests/compiler/testdata/testpackage/pipeline.yaml
# Test temporary files
_artifacts
.failed_tests
# Generated Python SDK documentation
_build
docs/_build
# sed backups
*.bak

View File

@ -1,27 +0,0 @@
run:
timeout: 30m
skip-files:
- "api\\*.go$"
- "backend\\api\\*.go"
issues:
max-same-issues: 0
linters:
disable-all: true
enable: # please keep this alphabetized
- gocritic
- gosimple
- govet
- ineffassign
- misspell
- staticcheck
- stylecheck
- unused
linters-settings: # please keep this alphabetized
misspell:
locale: US
staticcheck:
checks:
- "all"

View File

@ -1,2 +0,0 @@
[settings]
profile=google

1
.kfp-rebase-version Normal file
View File

@ -0,0 +1 @@
1.8.4

View File

@ -1,61 +0,0 @@
repos:
# other fast helpful checks
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: check-yaml
args: [--allow-multiple-documents]
- id: check-json
- id: end-of-file-fixer
- id: trailing-whitespace
exclude: ^manifests/gcp_marketplace/test/*
- id: debug-statements
- id: check-merge-conflict
- id: name-tests-test
- id: double-quote-string-fixer
- id: no-commit-to-branch
args: [--branch, master]
# required formatting jobs (run these last)
# add comment "noqa" to ignore an import that should not be removed
# (e.g., for an import with desired side-effects)
- repo: https://github.com/hadialqattan/pycln
rev: v2.1.1
hooks:
- id: pycln
name: pycln
language: python
entry: pycln --all
- repo: https://github.com/pycqa/isort
rev: 5.11.5
hooks:
- id: isort
name: isort
entry: isort --profile google
- repo: https://github.com/pre-commit/mirrors-yapf
rev: "v0.32.0"
hooks:
- id: yapf
- repo: https://github.com/pycqa/docformatter
rev: v1.4
hooks:
- id: docformatter
name: docformatter
language: python
entry: docformatter -i -r
# docformatter messes up certain sphinx link formatting.
# The kubernetes_platform/python docstrings are heavy on links, so ignore.
exclude: (sdk/python/kfp/compiler/compiler_test.py|kubernetes_platform/python/)
# Golang pre-submit hooks
- repo: https://github.com/golangci/golangci-lint
rev: v1.52.2
hooks:
- id: golangci-lint
name: golangci-lint
description: Fast linters runner for Go.
entry: golangci-lint run --new-from-rev HEAD --fix
types: [go]
language: golang
require_serial: true
pass_filenames: false

View File

@ -66,7 +66,7 @@ confidence=
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
# TODO(numerology): enable missing-module-docstring after finish the effort.
disable=missing-module-docstring, unspecified-encoding, missing-function-docstring
disable=missing-module-docstring
[REPORTS]
@ -99,6 +99,10 @@ evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / stateme
[BASIC]
# Good variable names which should always be accepted, separated by a comma
# s3 is whitelisted for its special meaning.
good-names=i,j,k,ex,Run,_,s3
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
@ -113,6 +117,9 @@ include-naming-hint=no
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
@ -178,7 +185,6 @@ no-docstring-rgx=^test_
# ones are exempt.
docstring-min-length=-1
disable=invalid-name
[ELIF]
@ -231,6 +237,7 @@ logging-modules=logging
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[SIMILARITIES]
# Minimum lines number of a similarity.
@ -290,7 +297,6 @@ generated-members=set_shape,np.float32
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
disable=redefined-builtin
[VARIABLES]
@ -313,8 +319,6 @@ callbacks=cb_,_cb
# builtins.
redefining-builtins-modules=six.moves,future.builtins
disable=unused-argument
[CLASSES]
@ -329,7 +333,7 @@ valid-metaclass-classmethod-first-arg=mcs
# List of member names, which should be excluded from the protected access
# warning.
disable=protected-access
exclude-protected=_asdict,_fields,_replace,_source,_make
[DESIGN]

View File

@ -2,7 +2,7 @@
version: 2
sphinx:
configuration: docs/conf.py
python:
version: 3.7
install:
- requirements: docs/requirements.txt
build:
os: "ubuntu-22.04"
tools:
python: "3.10"

View File

@ -1,652 +0,0 @@
# Copyright 2018 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
steps:
# Marketplace Major.Minor parsing
- id: "parseMajorMinorVersion"
name: gcr.io/cloud-builders/docker
entrypoint: /bin/bash
args:
- -ceux
- |
# Parse major minor version and save to a file for reusing in other steps.
# e.g. 1.0.0-rc.1 and 1.0.1 are parsed as 1.0
echo $TAG_NAME | sed -e "s#\([0-9]\+[.][0-9]\+\)[.].*#\1#" > /workspace/mm.ver
# Pull and retag images for pipeline components
- id: 'retagComponentImages'
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
waitFor: ['-']
args:
- -ceux
- |
images=(
"ml-pipeline-kubeflow-deployer"
"ml-pipeline-kubeflow-tf-trainer"
"ml-pipeline-kubeflow-tf-trainer-gpu"
"ml-pipeline-kubeflow-tfjob"
"ml-pipeline-local-confusion-matrix"
"ml-pipeline-local-roc"
)
for image in "${images[@]}"
do
from_image="gcr.io/$PROJECT_ID/$image:$COMMIT_SHA"
target_image="gcr.io/ml-pipeline/$image:$TAG_NAME"
docker pull $from_image
docker tag $from_image $target_image
docker push $target_image
done
# Pull and retag the images for the pipeline system
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA']
id: 'pullFrontend'
- id: 'tagFrontendForMarketplaceMajorMin'
waitFor: ['pullFrontend', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/frontend:$COMMIT_SHA
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/frontend:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/frontend:$COMMIT_SHA
docker push gcr.io/ml-pipeline/google/pipelines/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/google/pipelines-test/frontend:$TAG_NAME
docker push gcr.io/ml-pipeline/google/pipelines/frontend:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/frontend:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA']
id: 'pullAPIServer'
- id: 'tagAPIServerForMarketplaceMajorMinor'
waitFor: ['pullAPIServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/api-server:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/api-server:$COMMIT_SHA'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME'
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/api-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
docker push 'gcr.io/ml-pipeline/api-server:$TAG_NAME'
docker push 'gcr.io/ml-pipeline/api-server:$COMMIT_SHA'
docker push 'gcr.io/ml-pipeline/google/pipelines/apiserver:$TAG_NAME'
docker push 'gcr.io/ml-pipeline/google/pipelines-test/apiserver:$TAG_NAME'
docker push gcr.io/ml-pipeline/google/pipelines/apiserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/apiserver:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA']
id: 'pullScheduledworkflow'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowVersionNumber'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/scheduledworkflow:$COMMIT_SHA']
id: 'tagScheduledworkflowCommitSHA'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowForMarketplace'
waitFor: ['pullScheduledworkflow']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME']
id: 'tagScheduledworkflowForMarketplaceTest'
waitFor: ['pullScheduledworkflow']
- id: 'tagScheduledworkflowForMarketplaceMajorMinor'
waitFor: ['pullScheduledworkflow', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/scheduledworkflow:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA']
id: 'pullViewerCrdController'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/viewer-crd-controller:$TAG_NAME']
id: 'tagViewerCrdControllerVersionNumber'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/viewer-crd-controller:$COMMIT_SHA']
id: 'tagViewerCrdControllerCommitSHA'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/viewercrd:$TAG_NAME']
id: 'tagViewerCrdControllerForMarketplace'
waitFor: ['pullViewerCrdController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME']
id: 'tagViewerCrdControllerForMarketplaceTest'
waitFor: ['pullViewerCrdController']
- id: 'tagViewerCrdControllerForMarketplaceMajorMinor'
waitFor: ['pullViewerCrdController', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/viewer-crd-controller:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/viewercrd:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA']
id: 'pullPersistenceagent'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentVersionNumber'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/persistenceagent:$COMMIT_SHA']
id: 'tagPersistenceagentCommitSHA'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentForMarketplace'
waitFor: ['pullPersistenceagent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME']
id: 'tagPersistenceagentForMarketplaceTest'
waitFor: ['pullPersistenceagent']
- id: 'tagPersistenceagentForMarketplaceMajorMinor'
waitFor: ['pullPersistenceagent', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/persistenceagent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/persistenceagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA']
id: 'pullInverseProxyAgent'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/inverse-proxy-agent:$TAG_NAME']
id: 'tagInverseProxyAgentVersionNumber'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/inverse-proxy-agent:$COMMIT_SHA']
id: 'tagInverseProxyAgentCommitSHA'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/proxyagent:$TAG_NAME']
id: 'tagInverseProxyAgentForMarketplace'
waitFor: ['pullInverseProxyAgent']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME']
id: 'tagInverseProxyAgentForMarketplaceTest'
waitFor: ['pullInverseProxyAgent']
- id: 'tagInverseProxyAgentForMarketplaceMajorMinor'
waitFor: ['pullInverseProxyAgent', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/inverse-proxy-agent:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/proxyagent:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA']
id: 'pullVisualizationServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/visualization-server:$TAG_NAME']
id: 'tagVisualizationServerVersionNumber'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/visualization-server:$COMMIT_SHA']
id: 'tagVisualizationServerCommitSHA'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/visualizationserver:$TAG_NAME']
id: 'tagVisualizationServerForMarketplace'
waitFor: ['pullVisualizationServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME']
id: 'tagVisualizationServerForMarketplaceTest'
waitFor: ['pullVisualizationServer']
- id: 'tagVisualizationServerForMarketplaceMajorMinor'
waitFor: ['pullVisualizationServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/visualization-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/visualizationserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$(cat /workspace/mm.ver)
# ! Sync to the same MLMD version:
# * backend/metadata_writer/requirements.in and requirements.txt
# * @kubeflow/frontend/src/mlmd/generated
# * .cloudbuild.yaml and .release.cloudbuild.yaml
# * manifests/kustomize/base/metadata/base/metadata-grpc-deployment.yaml
# * test/tag_for_hosted.sh
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0']
id: 'pullMetadataServer'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0', 'gcr.io/ml-pipeline/google/pipelines/metadataserver:$TAG_NAME']
id: 'tagMetadataServerForMarketplace'
waitFor: ['pullMetadataServer']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0', 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME']
id: 'tagMetadataServerForMarketplaceTest'
waitFor: ['pullMetadataServer']
- id: 'tagMetadataServerForMarketplaceMajorMinor'
waitFor: ['pullMetadataServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
docker tag gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadataserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$(cat /workspace/mm.ver)
- id: 'pullMetadataWriter'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagMetadataWriterVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME']
waitFor: ['pullMetadataWriter']
- id: 'tagMetadataWriterForMarketplaceMajorMinor'
waitFor: ['pullMetadataWriter', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/metadata-writer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadatawriter:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$(cat /workspace/mm.ver)
- id: 'pullCacheServer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagCacheServerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-server:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-server:$COMMIT_SHA']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/cacheserver:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$TAG_NAME']
waitFor: ['pullCacheServer']
- id: 'tagCacheServerForMarketplaceMajorMinor'
waitFor: ['pullCacheServer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/cacheserver:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/cache-server:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cacheserver:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$(cat /workspace/mm.ver)
- id: 'pullCacheDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagCacheDeployerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-deployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/cache-deployer:$COMMIT_SHA']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplace'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/cachedeployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplaceTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$TAG_NAME']
waitFor: ['pullCacheDeployer']
- id: 'tagCacheDeployerForMarketplaceMajorMinor'
waitFor: ['pullCacheDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/cachedeployer:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/cache-deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cachedeployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA']
id: 'pullMetadataEnvoy'
- id: 'tagMetadataEnvoyVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-envoy:$TAG_NAME']
waitFor: ['pullMetadataEnvoy']
- id: 'tagMetadataEnvoyCommitSHA'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/metadata-envoy:$COMMIT_SHA']
waitFor: ['pullMetadataEnvoy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME']
id: 'tagMetadataEnvoyForMarketplace'
waitFor: ['pullMetadataEnvoy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME']
id: 'tagMetadataEnvoyForMarketplaceTest'
waitFor: ['pullMetadataEnvoy']
- id: 'tagMetadataEnvoyForMarketplaceMajorMinor'
waitFor: ['pullMetadataEnvoy', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/metadata-envoy:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance']
id: 'pullMinio'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/minio:$TAG_NAME']
id: 'tagMinioForMarketplace'
waitFor: ['pullMinio']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME']
id: 'tagMinioForMarketplaceTest'
waitFor: ['pullMinio']
- id: 'tagMinioForMarketplaceMajorMinor'
waitFor: ['pullMinio', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/minio:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/minio:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/mysql:8.0.26']
id: 'pullMysql'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/mysql:8.0.26', 'gcr.io/ml-pipeline/google/pipelines/mysql:$TAG_NAME']
id: 'tagMySqlForMarketplace'
waitFor: ['pullMysql']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/mysql:8.0.26', 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME']
id: 'tagMySqlForMarketplaceTest'
waitFor: ['pullMysql']
- id: 'tagMySqlForMarketplaceMajorMinor'
waitFor: ['pullMysql', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/mysql:8.0.26 gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/mysql:8.0.26 gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/mysql:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/mysql:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/cloudsql-docker/gce-proxy:1.25.0']
id: 'pullCloudsqlProxy'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.25.0', 'gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$TAG_NAME']
id: 'tagCloudSqlProxyForMarketplace'
waitFor: ['pullCloudsqlProxy']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/cloudsql-docker/gce-proxy:1.25.0', 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME']
id: 'tagCloudSqlProxyForMarketplaceTest'
waitFor: ['pullCloudsqlProxy']
- id: 'tagCloudSqlProxyForMarketplaceMajorMinor'
waitFor: ['pullCloudsqlProxy', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/cloudsql-docker/gce-proxy:1.25.0 gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
docker tag gcr.io/cloudsql-docker/gce-proxy:1.25.0 gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance']
id: 'pullArgoExecutor'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME']
id: 'tagArgoExecutorForMarketplace'
waitFor: ['pullArgoExecutor']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME']
id: 'tagArgoExecutorForMarketplaceTest'
waitFor: ['pullArgoExecutor']
- id: 'tagArgoExecutorForMarketplaceMajorMinor'
waitFor: ['pullArgoExecutor', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/argoexecutor:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$(cat /workspace/mm.ver)
- name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance']
id: 'pullArgoWorkflowController'
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance', 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME']
id: 'tagArgoWorkflowControllerForMarketplace'
waitFor: ['pullArgoWorkflowController']
- name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance', 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME']
id: 'tagArgoWorkflowControllerForMarketplaceTest'
waitFor: ['pullArgoWorkflowController']
- id: 'tagArgoWorkflowControllerForMarketplaceMajorMinor'
waitFor: ['pullArgoWorkflowController', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker tag gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$(cat /workspace/mm.ver)
# Marketplace specific deployer and specific primary image
- id: 'pullMarketplaceDeployer'
name: 'gcr.io/cloud-builders/docker'
args: ['pull', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA']
waitFor: ['-']
- id: 'tagMarketplaceDeployerVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplaceDeployerVersionNumberTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplaceDeployerVersionNumberMajorMinor'
waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines/deployer:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test/deployer:$(cat /workspace/mm.ver)
- id: 'tagMarketplacePrimaryVersionNumber'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplacePrimaryVersionNumberTest'
name: 'gcr.io/cloud-builders/docker'
args: ['tag', 'gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA', 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME']
waitFor: ['pullMarketplaceDeployer']
- id: 'tagMarketplacePrimaryVersionNumberMajorMinor'
waitFor: ['pullMarketplaceDeployer', 'parseMajorMinorVersion']
name: 'gcr.io/cloud-builders/docker'
entrypoint: bash
args:
- -ceux
- |
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
docker tag gcr.io/$PROJECT_ID/deployer:$COMMIT_SHA gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines:$(cat /workspace/mm.ver)
docker push gcr.io/ml-pipeline/google/pipelines-test:$(cat /workspace/mm.ver)
# # Copy the Python SDK
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp.tar.gz', '/workspace/']
# id: 'copyPythonSDKLocal'
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', '/workspace/kfp.tar.gz', 'gs://ml-pipeline/release/$TAG_NAME/kfp.tar.gz']
# id: 'copyPythonSDK'
# waitFor: ['copyPythonSDKLocal']
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', '/workspace/kfp.tar.gz', 'gs://ml-pipeline/release/latest/kfp.tar.gz']
# id: 'copyPythonSDKToLatest'
# waitFor: ['copyPythonSDKLocal']
# # Copy the Python Component SDK
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', 'gs://$PROJECT_ID/builds/$COMMIT_SHA/kfp-component.tar.gz', '/workspace/']
# id: 'copyPythonComponentSDKLocal'
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://ml-pipeline/release/$TAG_NAME/kfp-component.tar.gz']
# id: 'copyPythonComponentSDK'
# waitFor: ['copyPythonComponentSDKLocal']
# - name: 'gcr.io/cloud-builders/gsutil'
# args: ['cp', '/workspace/kfp-component.tar.gz', 'gs://ml-pipeline/release/latest/kfp-component.tar.gz']
# id: 'copyPythonComponentSDKToLatest'
# waitFor: ['copyPythonComponentSDKLocal']
images:
- 'gcr.io/ml-pipeline/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/scheduledworkflow:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/persistenceagent:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/viewer-crd-controller:$TAG_NAME'
- 'gcr.io/ml-pipeline/viewer-crd-controller:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/inverse-proxy-agent:$TAG_NAME'
- 'gcr.io/ml-pipeline/inverse-proxy-agent:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/visualization-server:$TAG_NAME'
- 'gcr.io/ml-pipeline/visualization-server:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/metadata-envoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/metadata-envoy:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/metadata-writer:$TAG_NAME'
- 'gcr.io/ml-pipeline/metadata-writer:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/cache-server:$TAG_NAME'
- 'gcr.io/ml-pipeline/cache-server:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/cache-deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/cache-deployer:$COMMIT_SHA'
- 'gcr.io/ml-pipeline/google/pipelines/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/viewercrd:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/proxyagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/visualizationserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadataserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/minio:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/mysql:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cloudsqlproxy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/argoexecutor:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/argoworkflowcontroller:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadataenvoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/metadatawriter:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cacheserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines/cachedeployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/scheduledworkflow:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/viewercrd:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/persistenceagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/proxyagent:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/visualizationserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadataserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/minio:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/mysql:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cloudsqlproxy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/argoexecutor:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/argoworkflowcontroller:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadataenvoy:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/metadatawriter:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cacheserver:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/cachedeployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test/deployer:$TAG_NAME'
- 'gcr.io/ml-pipeline/google/pipelines-test:$TAG_NAME'
timeout: '2000s'
tags:
- release-on-tag

186
.tekton/listener.yaml Normal file
View File

@ -0,0 +1,186 @@
apiVersion: tekton.dev/v1beta1
kind: TriggerTemplate
metadata:
name: template
spec:
params:
- name: repository
description: The git repo
- name: revision
description: the branch for the git repo
- name: apikey
description: the ibmcloud api key
- name: registry-url
description: ibm container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: region
description: the ibmcloud registry region
default: us-south
- name: org
description: organization
- name: space
description: space
default: dev
- name: resource-group
description: resource group
default: default
- name: docker-username
description: ibm container registry username
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr-username
description: username for docker hub
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public container registry
default: docker.io
- name: tekton-version
description: tekton version
- name: tekton-ns
description: tekton namespace
default: tekton-pipeline
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: slack-webhook
description: webhook URL
default: ""
- name: slack-channel
description: slack channel
default: ""
- name: publish-to-public-cr
description: publish images to public container registry
default: "0"
- name: extra-test-cases
description: run extra test cases
default: "0"
- name: skip-pipeline-loop
descript: skip the pipeline loop test case
default: "0"
- name: image-tag
description: image tag
default: "nightly"
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
- name: images
description: a image list for publishing
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend pipelineloop-controller pipelineloop-webhook kubeclient"
- name: v2-images
description: a v2 image list for publishing
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook tekton-driver"
- name: many-edge-duration
description: duration threshold for many edge pipeline
value: "7"
- name: v2-image-tag
description: v2 tekton catalog image tag
default: "nightly"
resourcetemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pipelinerun-$(uid)-pvc
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
- apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: pipelinerun-$(uid)
spec:
pipelineRef:
name: pipeline
workspaces:
- name: pipeline-pvc
persistentVolumeClaim:
claimName: pipelinerun-$(uid)-pvc
params:
- name: repository
value: $(params.repository)
- name: revision
value: $(params.revision)
- name: apikey
value: $(params.apikey)
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: region
value: $(params.region)
- name: docker-username
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: public-cr-username
value: $(params.public-cr-username)
- name: public-cr-password
value: $(params.public-cr-password)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: space
value: $(params.space)
- name: tekton-version
value: $(params.tekton-version)
- name: tekton-ns
value: $(params.tekton-ns)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: image-tag
value: $(params.image-tag)
- name: public-cr
value: $(params.public-cr)
- name: public-cr-namespace
value: $(params.public-cr-namespace)
- name: publish-to-public-cr
value: $(params.publish-to-public-cr)
- name: extra-test-cases
value: $(params.extra-test-cases)
- name: skip-pipeline-loop
value: $(params.skip-pipeline-loop)
- name: images
value: $(params.images)
- name: v2-images
value: $(params.v2-images)
- name: many-edge-duration
value: $(params.many-edge-duration)
- name: v2-image-tag
value: $(params.v2-image-tag)
---
apiVersion: tekton.dev/v1beta1
kind: TriggerBinding
metadata:
name: binding
spec:
params:
- name: repository
value: "https://github.com/kubeflow/kfp-tekton"
- name: revision
value: "master"
---
apiVersion: tekton.dev/v1beta1
kind: EventListener
metadata:
name: listener
spec:
triggers:
- binding:
name: binding
template:
name: template

934
.tekton/pipeline.yaml Normal file
View File

@ -0,0 +1,934 @@
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: pipeline
spec:
params:
- name: repository
description: the git repo
- name: revision
description: the revision
default: master
- name: apikey
description: the ibmcloud api key
- name: registry-url
description: ibm container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr-username
description: username for public container registry
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public container registry
default: docker.io
- name: resource-group
description: resource group
default: default
- name: org
description: organization
- name: region
description: region
- name: space
description: space
- name: tekton-version
description: tekton version
- name: tekton-ns
description: tekton namespace
default: tekton-pipeline
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: kubernetes-cluster
description: cluster name
- name: slack-webhook
description: webhook URL
default: ""
- name: slack-channel
description: slack channel
default: ""
- name: publish-to-public-cr
description: publish images to public container registry
default: "0"
- name: extra-test-cases
description: execute extra test cases
default: "0"
- name: skip-pipeline-loop
descript: skip the pipeline loop test case
default: "0"
- name: image-tag
description: image tag
default: "nightly"
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
- name: images
description: a image list for publishing
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend pipelineloop-controller pipelineloop-webhook kubeclient"
- name: v2-images
description: a v2 image list for publishing
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook"
- name: many-edge-duration
description: duration threshold for many edge pipeline
value: "7"
- name: v2-image-tag
description: v2 tekton catalog image tag
default: "nightly"
workspaces:
- name: pipeline-pvc
tasks:
- name: test
taskRef:
name: test
params:
- name: repository
value: $(params.repository)
- name: revision
value: $(params.revision)
- name: apikey
value: $(params.apikey)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-artifact
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: api-server
- name: docker-root
value: .
- name: docker-file
value: ""
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: artifact
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-api-server
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: api-server
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-persistenceagent
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: persistenceagent
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile.persistenceagent
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-metadata-writer
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: metadata-writer
- name: docker-root
value: .
- name: docker-file
value: backend/metadata_writer/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-scheduledworkflow
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: scheduledworkflow
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile.scheduledworkflow
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-cacheserver
retries: 1
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: cache-server
- name: docker-root
value: .
- name: docker-file
value: backend/Dockerfile.cacheserver
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-images-frontend
taskRef:
name: build-images-dnd
runAfter:
- test
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: frontend
- name: docker-root
value: .
- name: docker-file
value: frontend/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
- name: build-args
value: "COMMIT_HASH=$(git rev-parse HEAD) DATE=\"$(date -u)\""
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-pipeline-loops-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/pipeline-loops
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-any-sequencer-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/any-sequencer
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: build-kubectl-wrapper-binaries
taskRef:
name: build-binaries
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
params:
- name: build-dir
value: tekton-catalog/kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-pipelineloop-controller
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: pipelineloop-controller
- name: docker-root
value: tekton-catalog/pipeline-loops
- name: docker-file
value: tekton-catalog/pipeline-loops/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: build-args
value: "bin_name=pipelineloop-controller"
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-kfptask-controller
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-kfptask-controller
- name: docker-root
value: tekton-catalog/tekton-kfptask
- name: docker-file
value: tekton-catalog/tekton-kfptask/Dockerfile.tekton-kfptask.controller
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-kfptask-webhook
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-kfptask-webhook
- name: docker-root
value: tekton-catalog/tekton-kfptask
- name: docker-file
value: tekton-catalog/tekton-kfptask/Dockerfile.tekton-kfptask.webhook
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-exithandler-controller
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-exithandler-controller
- name: docker-root
value: tekton-catalog/tekton-exithandler
- name: docker-file
value: tekton-catalog/tekton-exithandler/Dockerfile.tekton-exithandler.controller
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-tekton-exithandler-webhook
runAfter:
- test
- build-images-api-server
- build-images-persistenceagent
- build-images-cacheserver
- build-images-scheduledworkflow
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: tekton-exithandler-webhook
- name: docker-root
value: tekton-catalog/tekton-exithandler
- name: docker-file
value: tekton-catalog/tekton-exithandler/Dockerfile.tekton-exithandler.webhook
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-pipelineloop-webhook
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: pipelineloop-webhook
- name: docker-root
value: tekton-catalog/pipeline-loops
- name: docker-file
value: tekton-catalog/pipeline-loops/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: build-args
value: "bin_name=pipelineloop-webhook"
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-any-sequencer
runAfter:
- build-any-sequencer-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: any-sequencer
- name: docker-root
value: tekton-catalog/any-sequencer
- name: docker-file
value: tekton-catalog/any-sequencer/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: containerize-kubectl-wrapper
runAfter:
- build-kubectl-wrapper-binaries
taskRef:
name: build-images-dnd
params:
- name: apikey
value: $(params.apikey)
- name: image-name
value: kubeclient
- name: docker-root
value: tekton-catalog/kubectl-wrapper
- name: docker-file
value: tekton-catalog/kubectl-wrapper/Dockerfile
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: run-task
value: image
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy
taskRef:
name: deploy
runAfter:
- build-images-api-server
- build-images-persistenceagent
- build-images-metadata-writer
- build-images-scheduledworkflow
- build-images-cacheserver
- build-images-frontend
params:
- name: apikey
value: $(params.apikey)
- name: docker-username
value: iamapikey
- name: docker-password
value: $(params.docker-password)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: public-cr
value: $(params.public-cr)
# - name: tekton-version
# value: $(params.tekton-version)
# - name: tekton-ns
# value: $(params.tekton-ns)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: setup-pipeline-loops-deploy
taskRef:
name: setup-pipeline-loops-deploy
runAfter:
- containerize-pipelineloop-controller
- containerize-pipelineloop-webhook
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: setup-kubectl-wrapper-deploy
taskRef:
name: setup-kubectl-wrapper-deploy
runAfter:
- containerize-kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-pipeline-loops-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- setup-pipeline-loops-deploy
- deploy
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export SKIP_PIPELINE_LOOP=$(params.skip-pipeline-loop);
source scripts/deploy/iks/tekton-catalog/deploy-pipeline-loops-e2e.sh;
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-any-sequencer-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- deploy
- containerize-any-sequencer
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export NEW_IMAGE_URL="${REGISTRY_URL}/${REGISTRY_NAMESPACE}/any-sequencer"
export NEW_IMAGE_TAG="${IMAGE_TAG}"
kubectl create clusterrolebinding pipeline-runner-extend --clusterrole=cluster-admin --serviceaccount=default:default || true
source scripts/deploy/iks/tekton-catalog/deploy-any-sequencer-e2e.sh
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: deploy-kubectl-wrapper-e2e
retries: 1
taskRef:
name: iks-test
runAfter:
- deploy
- setup-kubectl-wrapper-deploy
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
export MANIFEST="resourceop_basic.yaml"
source scripts/deploy/iks/tekton-catalog/deploy-kubectl-wrapper-e2e.sh
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-flip-coin
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-flip-coin.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-cond-dep
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-condition-depend.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-cache
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-cache.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-many-edges
retries: 2
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-many-edges.sh"
- name: many-edge-duration
value: $(params.many-edge-duration)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: e2e-test-trusted-ai
retries: 1
taskRef:
name: e2e-test
runAfter:
- deploy
- deploy-pipeline-loops-e2e
when:
- input: $(params.extra-test-cases)
operator: in
values:
- '1'
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: slack-webhook
value: $(params.slack-webhook)
- name: slack-channel
value: $(params.slack-channel)
- name: test-script
value: "scripts/deploy/iks/test-trusted-ai.sh"
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: publish-images
taskRef:
name: publish-images
runAfter:
- e2e-test-flip-coin
- deploy-any-sequencer-e2e
- deploy-kubectl-wrapper-e2e
when:
- input: $(params.publish-to-public-cr)
operator: in
values:
- '1'
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: registry-url
value: $(params.registry-url)
- name: registry-namespace
value: $(params.registry-namespace)
- name: docker-username
value: $(params.docker-username)
- name: docker-password
value: $(params.docker-password)
- name: public-cr-username
value: $(params.public-cr-username)
- name: public-cr-password
value: $(params.public-cr-password)
- name: image-tag
value: $(params.image-tag)
- name: public-cr-namespace
value: $(params.public-cr-namespace)
- name: public-cr
value: $(params.public-cr)
- name: images
value: $(params.images)
- name: v2-images
value: $(params.v2-images)
- name: v2-image-tag
value: $(params.v2-image-tag)
workspaces:
- name: task-pvc
workspace: pipeline-pvc
finally:
- name: cleanup-pipeline-loops
taskRef:
name: iks-test
params:
- name: apikey
value: $(params.apikey)
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
- name: resource-group
value: $(params.resource-group)
- name: org
value: $(params.org)
- name: region
value: $(params.region)
- name: space
value: $(params.space)
- name: test-commands
value: |
kubectl delete -f tekton-catalog/pipeline-loops/config || true
kubectl delete -f tekton-catalog/pipeline-loops/examples/loop-example-basic.yaml || true
kubectl delete -f sdk/python/tests/compiler/testdata/any_sequencer.yaml || true
kubectl delete -f sdk/python/tests/compiler/testdata/resourceop_basic.yaml || true
kubectl delete job --all -n default || true
workspaces:
- name: task-pvc
workspace: pipeline-pvc
- name: undeploy
taskRef:
name: undeploy
params:
- name: kubernetes-cluster
value: $(params.kubernetes-cluster)
- name: apikey
value: $(params.apikey)
- name: kubeflow-ns
value: $(params.kubeflow-ns)
workspaces:
- name: task-pvc
workspace: pipeline-pvc

774
.tekton/task.yaml Normal file
View File

@ -0,0 +1,774 @@
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: test
spec:
params:
- name: repository
description: the git repo
- name: revision
description: the revision
default: master
- name: apikey
description: the ibmcloud api key
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: resource-group
description: resource group
default: default
- name: org
description: organization
- name: region
description: region
- name: space
description: space
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: run-go-unittests
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: GIT_URL
value: $(params.repository)
- name: REGION
value: $(params.region)
- name: ORG
value: $(params.org)
- name: SPACE
value: $(params.space)
- name: RESOURCE_GROUP
value: $(params.resource-group)
- name: GIT_BRANCH
value: $(params.revision)
- name: REGISTER_NAMESPACE
value: $(params.registry-namespace)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: ARCHIVE_DIR
value: $(params.archive-dir)
command: ["/bin/bash", "-c"]
args:
- set -ex;
rm -r /artifacts/*;
cd /artifacts && git clone -q -b $GIT_BRANCH $GIT_URL .;
GIT_COMMIT=$(git rev-parse HEAD);
source ./scripts/deploy/iks/run-test.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/build-image.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images-args
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
export BUILD_ARG_LIST="COMMIT_HASH=$(git rev-parse HEAD) DATE=\"$(date -u)\"";
source ./scripts/deploy/iks/build-image.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-images-dnd
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: image-name
description: image name
- name: docker-root
description: root directory for docker
default: .
- name: docker-file
description: dockerfile
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: run-task
description: "execution task: artifact or image"
- name: archive-dir
description: archive directory
default: "."
- name: build-args
description: docker build args
default: ""
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-image
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: IMAGE_NAME
value: $(params.image-name)
- name: DOCKER_ROOT
value: $(params.docker-root)
- name: DOCKER_FILE
value: $(params.docker-file)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: RUN_TASK
value: $(params.run-task)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
export BUILD_ARG_LIST="$(params.build-args)";
source scripts/deploy/iks/build-image-dind.sh;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
sidecars:
- name: server
image: icr.io/continuous-delivery/pipeline/docker:20.10.22-dind
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: dind-certs
emptyDir: {}
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: deploy
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
# - name: tekton-version
# description: tekton version
# - name: tekton-ns
# description: tekton namespace
# default: tekton-pipeline
- name: docker-username
description: for ibm container registry
default: iamapikey
- name: docker-password
description: iam api key
- name: public-cr
description: public container registry
default: quay.io
- name: archive-dir
description: archive directory
default: "."
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: deploy-kfp-tekton
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PUBLIC_CR
value: $(params.public-cr)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/deploy-kfp.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: e2e-test
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: archive-dir
description: archive directory
default: "."
- name: slack-webhook
description: webhook URL
default: ""
- name: slack-channel
description: slack channel
default: ""
- name: test-script
description: a shell script to run the test case
default: ""
- name: many-edge-duration
description: duration threshold for pipeline
default: "5"
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: run-test
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: SLACK_WEBHOOK
value: $(params.slack-webhook)
- name: SLACK_CHANNEL
value: $(params.slack-channel)
- name: PIPELINE_URL
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/pipeline-run-url']
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: TEST_SCRIPT
value: $(params.test-script)
- name: MANY_EDGE_DURATION
value: $(params.many-edge-duration)
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/e2e-test.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: publish-images
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: registry-url
description: container registry url
- name: registry-namespace
description: the ibmcloud registry namespace
- name: docker-username
description: ibm container registry username
- name: docker-password
description: ibm container registry password
- name: public-cr-username
description: username for public container registry
- name: public-cr-password
description: password/token for public container registry
- name: public-cr
description: public conainer registry uri
default: docker.io
- name: images
description: image list to publish
default: "api-server persistenceagent metadata-writer scheduledworkflow cache-server frontend"
- name: image-tag
description: image tag
default: "nightly"
- name: v2-images
description: v2 image list to publish
default: "tekton-kfptask-controller tekton-kfptask-webhook tekton-exithandler-controller tekton-exithandler-webhook"
- name: v2-image-tag
description: v2 image tag
default: "nightly"
- name: dind-ns
description: dind namespace
default: docker-build
- name: public-cr-namespace
description: namespace for public container registry
default: aipipeline
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: publish-images-to-cr
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: REGISTRY_URL
value: $(params.registry-url)
- name: REGISTRY_NAMESPACE
value: $(params.registry-namespace)
- name: DOCKER_USERNAME
value: $(params.docker-username)
- name: DOCKER_PASSWORD
value: $(params.docker-password)
- name: PUBLIC_CR_USERNAME
value: $(params.public-cr-username)
- name: PUBLIC_CR_TOKEN
value: $(params.public-cr-password)
- name: PUBLIC_CR_NAMESPACE
value: $(params.public-cr-namespace)
- name: PUBLIC_CR
value: $(params.public-cr)
- name: IMAGES
value: $(params.images)
- name: PUBLISH_TAG
value: $(params.image-tag)
- name: V2_IMAGES
value: $(params.v2-images)
- name: V2_PUBLISH_TAG
value: $(params.v2-image-tag)
- name: DIND_NS
value: $(params.dind-ns)
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source scripts/deploy/iks/publish-image.sh;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
sidecars:
- image: icr.io/continuous-delivery/pipeline/docker:19.03.15-dind
name: server
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: dind-certs
emptyDir: {}
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: undeploy
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: archive-dir
description: archive directory
default: "."
- name: kubernetes-cluster
description: cluster name
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: undeploy-kfp-tekton
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
command: ["/bin/bash", "-c"]
args:
- set -ex;
cd /artifacts && source build.properties;
source ./scripts/deploy/iks/undeploy-kfp.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-binaries
spec:
params:
- name: build-dir
description: directory with makefile
default: tekton-catalog/feature
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-binaries
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: BUILD_DIR
value: $(params.build-dir)
command:
- /bin/bash
- '-c'
args:
- set -ex;
cd /artifacts && source build.properties;
source scripts/deploy/iks/tekton-catalog/build-binaries.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-pipeline-loops-deploy
spec:
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
command:
- /bin/bash
- '-c'
args:
- |
set -ex;
cd /artifacts && source build.properties;
export CONTROLLER_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/pipelineloop-controller;
export WEBHOOK_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/pipelineloop-webhook;
source scripts/deploy/iks/tekton-catalog/setup-pipeline-loops-deploy.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-kubectl-wrapper-deploy
spec:
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
command:
- /bin/bash
- '-c'
args:
- >
cd /artifacts && source build.properties;
export KUBECTL_WRAPPER_IMAGE_URL=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/kubeclient;
source scripts/deploy/iks/tekton-catalog/setup-kubectl-wrapper-deploy.sh;
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: iks-test
spec:
params:
- name: apikey
description: the ibmcloud api key
- name: kubernetes-cluster
description: cluster name
- name: kubeflow-ns
description: kubeflow namespace
default: kubeflow
- name: resource-group
description: resource group
default: default
- name: org
description: organization
- name: region
description: region
- name: space
description: space
- name: archive-dir
description: archive directory
default: "."
- name: test-commands
description: testing commands
default: ""
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: iks-script
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: IBM_CLOUD_API_KEY
value: $(params.apikey)
- name: PIPELINE_KUBERNETES_CLUSTER_NAME
value: $(params.kubernetes-cluster)
- name: ARCHIVE_DIR
value: $(params.archive-dir)
- name: KUBEFLOW_NS
value: $(params.kubeflow-ns)
- name: REGION
value: $(params.region)
- name: ORG
value: $(params.org)
- name: SPACE
value: $(params.space)
- name: RESOURCE_GROUP
value: $(params.resource-group)
- name: PIPELINE_URL
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/pipeline-run-url']
- name: BUILD_NUMBER
valueFrom:
fieldRef:
fieldPath: metadata.annotations['devops.cloud.ibm.com/build-number']
command: ["/bin/bash", "-c"]
args:
- |
set -ex;
cd /artifacts && source build.properties
retry() {
local max=$1; shift
local interval=$1; shift
until "$@"; do
echo "trying.."
max=$((max-1))
if [[ "$max" -eq 0 ]]; then
return 1
fi
sleep "$interval"
done
}
# Set up kubernetes config
retry 3 3 ibmcloud login --apikey "${IBM_CLOUD_API_KEY}" --no-region
retry 3 3 ibmcloud target -r "$REGION" -g "$RESOURCE_GROUP"
retry 3 3 ibmcloud ks cluster config -c "$PIPELINE_KUBERNETES_CLUSTER_NAME"
$(params.test-commands)

View File

@ -0,0 +1,7 @@
# Tekton Catalog Pipeline Manifests
Directory containing the pipeline manifests to build, test, and publish the [tekton-catalog](../../tekton-catalog) images.
Scripts related to this pipeline run can be found under [scripts/deploy/iks/tekton-catalog](../../scripts/deploy/iks/tekton-catalog).
This pipeline also relies on some of the tasks defined in IBM's Open Toolchain [tekton-catalog](https://github.com/open-toolchain/tekton-catalog).

View File

@ -0,0 +1,110 @@
apiVersion: tekton.dev/v1beta1
kind: TriggerTemplate
metadata:
name: trigger-template
spec:
params:
# Passed in through secure-properties: apikey, dockerhub-token
- name: repository
description: git repository url
default: https://github.com/kubeflow/kfp-tekton
- name: branch
description: repository branch
default: master
- name: directory-name
description: Directory to clone repository into
default: kfp-tekton
- name: registry-region
description: The IBM Cloud region for image registry
- name: registry-namespace
description: Container registry namespace
- name: dev-region
default: ibm:yp:us-south
- name: dev-resource-group
default: default
- name: cluster-name
description: the name of the cluster to target
default: iks-cluster
- name: push-to-dockerhub
description: flag to signal if images should be pushed to dockerhub
default: '1'
- name: dockerhub-username
description: Dockerhub username
default: kfptektonbot
- name: docker-namespace
description: Image namespace
default: aipipeline
- name: image-tag
description: Tag for all images pushed to dockerhub/ibmcloud
default: nightly
- name: pipeline-debug
description: Flag used by tekton-catalog tasks
default: '0'
resourcetemplates:
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pipelinerun-$(uid)-pvc
spec:
resources:
requests:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
- apiVersion: tekton.dev/v1beta1
kind: PipelineRun
metadata:
name: pipelinerun-$(uid)
spec:
pipelineRef:
name: tekton-catalog-publish-pipeline
params:
- name: repository
value: $(params.repository)
- name: branch
value: $(params.branch)
- name: directory-name
value: $(params.directory-name)
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: dev-region
value: $(params.dev-region)
- name: dev-resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: push-to-dockerhub
value: $(params.push-to-dockerhub)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: pipeline-ws
persistentVolumeClaim:
claimName: pipelinerun-$(uid)-pvc
---
apiVersion: tekton.dev/v1beta1
kind: TriggerBinding
metadata:
name: trigger-binding-manual
spec:
params: null
---
apiVersion: tekton.dev/v1beta1
kind: EventListener
metadata:
name: manual-run
spec:
triggers:
- binding:
name: trigger-binding-manual
template:
name: trigger-template

View File

@ -0,0 +1,504 @@
apiVersion: tekton.dev/v1beta1
kind: Pipeline
metadata:
name: tekton-catalog-publish-pipeline
timeout: 30m
spec:
params:
- name: repository
description: git repository url
default: https://github.com/kubeflow/kfp-tekton
- name: branch
description: repository branch
default: master
- name: directory-name
description: Directory to clone repository into
default: kfp-tekton
- name: registry-region
description: The IBM Cloud region for image registry
- name: registry-namespace
description: Container registry namespace
- name: dev-region
default: ibm:yp:us-south
- name: dev-resource-group
default: default
- name: cluster-name
description: the name of the cluster to target
default: iks-cluster
- name: push-to-dockerhub
description: flag to signal if images should be pushed to dockerhub
default: '0'
- name: dockerhub-username
description: Dockerhub username
default: kfptektonbot
- name: docker-namespace
description: Image namespace
default: aipipeline
- name: image-tag
description: Tag for all images pushed to dockerhub/ibmcloud
default: nightly
- name: pipeline-debug
description: Flag used by tekton-catalog tasks
default: '0'
# pipeline-loops specific parameters
- name: path-to-pipeline-loops-context
description: Path to pipeline-loops docker build directory.
default: tekton-catalog/pipeline-loops
- name: path-to-pipeline-loops-dockerfile
description: Path to directory with pipeline-loops dockerfile
default: tekton-catalog/pipeline-loops
# any-sequencer specific parameters
- name: path-to-any-sequencer-context
description: >-
Path to any-sequencer docker build directory.
default: tekton-catalog/any-sequencer
- name: path-to-any-sequencer-dockerfile
description: Path to directory with any-sequencer dockerfile
default: tekton-catalog/any-sequencer
# kubectl-wrapper specific parameters
- name: path-to-kubectl-wrapper-context
description: >-
Path to kubectl-wrapper docker build directory.
default: tekton-catalog/kubectl-wrapper
- name: path-to-kubectl-wrapper-dockerfile
description: Path to directory with kubectl-wrapper dockerfile
default: tekton-catalog/kubectl-wrapper
workspaces:
- name: pipeline-ws
tasks:
- name: git-clone
taskRef:
name: git-clone-repo
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: ibmcloud-apikey-secret-key
value: toolchain-apikey
- name: repository
value: $(params.repository)
- name: branch
value: $(params.branch)
- name: directory-name
value: $(params.directory-name)
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: output
workspace: pipeline-ws
# ==========================================================
# Pipeline Loops Tasks
# ==========================================================
- name: build-pipeline-loops-binaries
taskRef:
name: build-binaries
runAfter:
- git-clone
params:
- name: directory-name
value: $(params.directory-name)
- name: build-dir
value: tekton-catalog/pipeline-loops
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: containerize-pipelineloop-controller
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: "pipelineloop-controller"
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-pipeline-loops-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-pipeline-loops-dockerfile)
- name: build-args
value: "bin_name=pipelineloop-controller"
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: containerize-pipelineloop-webhook
runAfter:
- build-pipeline-loops-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: "pipelineloop-webhook"
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-pipeline-loops-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-pipeline-loops-dockerfile)
- name: build-args
value: "bin_name=pipelineloop-webhook"
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: setup-pipeline-loops-deploy
taskRef:
name: setup-pipeline-loops-deploy
runAfter:
- containerize-pipelineloop-controller
- containerize-pipelineloop-webhook
params:
- name: controller-image-url
value: $(tasks.containerize-pipelineloop-controller.results.image-repository)
- name: webhook-image-url
value: $(tasks.containerize-pipelineloop-webhook.results.image-repository)
# Controller and webhook should have been built with same tag
- name: image-tag
value: $(tasks.containerize-pipelineloop-controller.results.image-tags)
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: deploy-pipeline-loops-e2e
taskRef:
name: iks-deploy-to-kubernetes
runAfter:
- setup-pipeline-loops-deploy
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: script
value: |
#!/bin/bash
set -euxo pipefail
cd $(params.directory-name)
source scripts/deploy/iks/tekton-catalog/deploy-pipeline-loops-e2e.sh
APP_URL="null"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: artifacts
workspace: pipeline-ws
- name: publish-pipeline-loops-images-to-dockerhub
taskRef:
name: publish-images-to-dockerhub
runAfter:
- deploy-pipeline-loops-e2e
when:
- input: $(params.push-to-dockerhub)
operator: in
values:
- '1'
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: dockerhub-token-secret-key
value: dockerhub-token
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: publish-script
value: scripts/deploy/iks/tekton-catalog/publish-pipeline-loops-images.sh
workspaces:
- name: task-pvc
workspace: pipeline-ws
# ==========================================================
# Any Sequencer Tasks
# ==========================================================
- name: build-any-sequencer-binaries
taskRef:
name: build-binaries
runAfter:
- git-clone
params:
- name: directory-name
value: $(params.directory-name)
- name: build-dir
value: tekton-catalog/any-sequencer
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: containerize-any-sequencer
runAfter:
- build-any-sequencer-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: any-sequencer
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-any-sequencer-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-any-sequencer-dockerfile)
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: deploy-any-sequencer-e2e
taskRef:
name: iks-deploy-to-kubernetes
runAfter:
- build-any-sequencer-binaries
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: script
value: |
cd $(params.directory-name)
export NEW_IMAGE_URL="$(tasks.containerize-any-sequencer.results.image-repository)"
export NEW_IMAGE_TAG=$(tasks.containerize-any-sequencer.results.image-tags)
source scripts/deploy/iks/tekton-catalog/deploy-any-sequencer-e2e.sh
APP_URL="null"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: artifacts
workspace: pipeline-ws
- name: publish-any-sequencer-image-to-dockerhub
taskRef:
name: publish-images-to-dockerhub
runAfter:
- deploy-any-sequencer-e2e
when:
- input: $(params.push-to-dockerhub)
operator: in
values:
- '1'
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: dockerhub-token-secret-key
value: dockerhub-token
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: publish-script
value: scripts/deploy/iks/tekton-catalog/publish-any-sequencer-image.sh
workspaces:
- name: task-pvc
workspace: pipeline-ws
# ==========================================================
# Kubectl Wrapper Tasks
# ==========================================================
- name: build-kubectl-wrapper-binaries
taskRef:
name: build-binaries
runAfter:
- git-clone
params:
- name: directory-name
value: $(params.directory-name)
- name: build-dir
value: tekton-catalog/kubectl-wrapper
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: containerize-kubectl-wrapper
runAfter:
- build-kubectl-wrapper-binaries
taskRef:
name: icr-containerize
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: container-registry-apikey-secret-key
value: apikey
- name: registry-region
value: $(params.registry-region)
- name: registry-namespace
value: $(params.registry-namespace)
- name: image-name
value: "kubeclient"
- name: path-to-context
value: $(params.directory-name)/$(params.path-to-kubectl-wrapper-context)
- name: path-to-dockerfile
value: $(params.directory-name)/$(params.path-to-kubectl-wrapper-dockerfile)
- name: additional-tags-script
value: >
IMAGE_TAG=$(params.image-tag)
echo "$IMAGE_TAG"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: source
workspace: pipeline-ws
- name: setup-kubectl-wrapper-deploy
taskRef:
name: setup-kubectl-wrapper-deploy
runAfter:
- containerize-kubectl-wrapper
params:
- name: kubectl-wrapper-image-url
value: $(tasks.containerize-kubectl-wrapper.results.image-repository)
- name: image-tag
value: $(tasks.containerize-kubectl-wrapper.results.image-tags)
workspaces:
- name: task-pvc
workspace: pipeline-ws
- name: deploy-kubectl-wrapper-e2e
taskRef:
name: iks-deploy-to-kubernetes
runAfter:
- setup-kubectl-wrapper-deploy
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: script
value: |
#!/bin/bash
set -euxo pipefail
cd $(params.directory-name)
source scripts/deploy/iks/tekton-catalog/deploy-kubectl-wrapper-e2e.sh
APP_URL="null"
- name: pipeline-debug
value: $(params.pipeline-debug)
workspaces:
- name: artifacts
workspace: pipeline-ws
- name: publish-kubectl-wrapper-image-to-dockerhub
taskRef:
name: publish-images-to-dockerhub
runAfter:
- deploy-kubectl-wrapper-e2e
when:
- input: $(params.push-to-dockerhub)
operator: in
values:
- '1'
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: dockerhub-token-secret-key
value: dockerhub-token
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
- name: dockerhub-username
value: $(params.dockerhub-username)
- name: docker-namespace
value: $(params.docker-namespace)
- name: image-tag
value: $(params.image-tag)
- name: publish-script
value: scripts/deploy/iks/tekton-catalog/publish-kubectl-wrapper-image.sh
workspaces:
- name: task-pvc
workspace: pipeline-ws
# ==========================================================
# Final Task
# ==========================================================
finally:
- name: cleanup
taskRef:
Name: cleanup
params:
- name: continuous-delivery-context-secret
value: secure-properties
- name: kubernetes-service-apikey-secret-key
value: apikey
- name: cluster-region
value: $(params.dev-region)
- name: resource-group
value: $(params.dev-resource-group)
- name: cluster-name
value: $(params.cluster-name)
- name: directory-name
value: $(params.directory-name)
workspaces:
- name: task-pvc
workspace: pipeline-ws

View File

@ -0,0 +1,357 @@
# ==========================================================
# Shared Tasks
# ==========================================================
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: build-binaries
spec:
params:
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: build-dir
description: directory with makefile
default: tekton-catalog/feature
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: build-binaries
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: BUILD_DIR
value: $(params.build-dir)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/build-binaries.sh;
---
# Stolen and modified from https://github.com/open-toolchain/tekton-catalog/blob/master/container-registry/task-execute-in-dind.yaml
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: publish-images-to-dockerhub
spec:
params:
- name: ibmcloud-api
description: the ibmcloud api
default: 'https://cloud.ibm.com'
- name: continuous-delivery-context-secret
description: >-
name of the configmap containing the continuous delivery pipeline
context secrets
default: secure-properties
- name: kubernetes-service-apikey-secret-key
description: >-
field in the secret that contains the api key used to login to
ibmcloud kubernetes service
default: apikey
- name: dockerhub-token-secret-key
description: >-
field in the secret that contains the api key used to login to
ibmcloud kubernetes service
default: dockerhub-token
- name: cluster-region
description: >
the ibmcloud region hosting the cluster
default: 'us-south'
- name: resource-group
description: target resource group (name or id) for the ibmcloud login operation
default: "default"
- name: cluster-name
description: >-
name of the cluster
default: 'iks-cluster'
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: dockerhub-username
description: Dockerhub username
default: ''
- name: dockerhub-config-dir
description: Directory to store docker's config.json in
default: /steps
- name: docker-registry-secret-name
description: Name of docker-registry secret
default: registry-dockerconfig-secret
- name: docker-registry
description: Image registry
default: docker.io
- name: docker-namespace
description: Image namespace
default: ''
- name: image-tag
description: Image tag
default: ''
- name: publish-script
description: Script to build/tag/publish/... image
default: run.sh
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: create-docker-registry-secret
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: API_KEY
valueFrom:
secretKeyRef:
name: $(params.continuous-delivery-context-secret)
key: $(params.kubernetes-service-apikey-secret-key)
- name: IBM_CLOUD_API
value: $(params.ibmcloud-api)
- name: IBM_CLOUD_REGION
value: $(params.cluster-region)
- name: IBMCLOUD_RESOURCE_GROUP
value: $(params.resource-group)
- name: CLUSTER_NAME
value: $(params.cluster-name)
- name: DOCKERHUB_USERNAME
value: $(params.dockerhub-username)
- name: DOCKERHUB_TOKEN
valueFrom:
secretKeyRef:
name: $(params.continuous-delivery-context-secret)
key: $(params.dockerhub-token-secret-key)
- name: DOCKER_CONFIG_DIR
value: $(params.dockerhub-config-dir)
- name: SECRET_NAME
value: $(params.docker-registry-secret-name)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/iks-authenticate.sh;
source scripts/deploy/iks/tekton-catalog/create-dockerconfig-secret.sh;
volumeMounts:
- mountPath: /steps
name: steps-volume
- name: run-docker-commands
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: DOCKER_HOST
value: "tcp://localhost:2376"
# Verify TLS.
- name: DOCKER_TLS_VERIFY
value: "1"
# Use the certs generated by the sidecar daemon.
- name: DOCKER_CERT_PATH
value: /certs/client
# The location of the client configuration files.
- name: DOCKER_CONFIG
value: /steps
- name: DOCKER_REGISTRY
value: $(params.docker-registry)
- name: DOCKER_NAMESPACE
value: $(params.docker-namespace)
- name: IMAGE_TAG
value: $(params.image-tag)
command:
- "/bin/bash"
- "-c"
args:
- |
cd artifacts/$(params.directory-name);
source $(params.publish-script)
volumeMounts:
- mountPath: /steps
name: steps-volume
- mountPath: /certs/client
name: dind-certs
sidecars:
- image: docker:dind
name: server
securityContext:
privileged: true
command: ["sh", "-c"]
env:
# Write generated certs to the path shared with the client.
- name: DOCKER_TLS_CERTDIR
value: /certs
args:
# Set the MTU to a value that is containable in the ibmcloud calico mtu value
# References:
# - https://liejuntao001.medium.com/fix-docker-in-docker-network-issue-in-kubernetes-cc18c229d9e5
# - https://cloud.ibm.com/docs/containers?topic=containers-kernel#calico-mtu
#
# Use a workaround to by pass virtio-fs for Continuous Delivery shared workers
- if [[ $(df -PT /var/lib/docker | awk 'NR==2 {print $2}') == virtiofs ]]; then
apk add e2fsprogs &&
truncate -s 20G /tmp/disk.img &&
mkfs.ext4 /tmp/disk.img &&
mount /tmp/disk.img /var/lib/docker; fi &&
dockerd-entrypoint.sh --mtu=1400;
volumeMounts:
- mountPath: /certs/client
name: dind-certs
# Wait for the dind daemon to generate the certs it will share with the client.
readinessProbe:
initialDelaySeconds: 2
periodSeconds: 1
exec:
command: ["ls", "/certs/client/ca.pem"]
volumes:
- name: steps-volume
emptyDir: {}
- name: dind-certs
emptyDir: {}
# ==========================================================
# Final Tasks
# ==========================================================
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: cleanup
spec:
params:
- name: ibmcloud-api
description: the ibmcloud api
default: 'https://cloud.ibm.com'
- name: continuous-delivery-context-secret
description: >-
name of the configmap containing the continuous delivery pipeline
context secrets
default: secure-properties
- name: kubernetes-service-apikey-secret-key
description: >-
field in the secret that contains the api key used to login to
ibmcloud kubernetes service
default: apikey
- name: cluster-region
description: >
the ibmcloud region hosting the cluster
default: 'us-south'
- name: resource-group
description: target resource group (name or id) for the ibmcloud login operation
default: "default"
- name: cluster-name
description: >-
name of the cluster
default: 'iks-cluster'
- name: directory-name
description: Repository directory
default: kfp-tekton
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: cleanup
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: API_KEY
valueFrom:
secretKeyRef:
name: $(params.continuous-delivery-context-secret)
key: $(params.kubernetes-service-apikey-secret-key)
- name: IBM_CLOUD_API
value: $(params.ibmcloud-api)
- name: IBM_CLOUD_REGION
value: $(params.cluster-region)
- name: IBMCLOUD_RESOURCE_GROUP
value: $(params.resource-group)
- name: CLUSTER_NAME
value: $(params.cluster-name)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/iks-authenticate.sh;
source scripts/deploy/iks/tekton-catalog/cleanup.sh;
# ==========================================================
# Pipeline Loops Tasks
# ==========================================================
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-pipeline-loops-deploy
spec:
params:
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: controller-image-url
description: Controller image url of form registry/repository/image
default: us.icr.io/kfp-tekton/pipelineloop-controller
- name: webhook-image-url
description: Webhook image url of form registry/repository/image
default: us.icr.io/kfp-tekton/pipelineloop-webhook
- name: image-tag
description: Image tag SHARED by controller and webhook
default: nightly
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: CONTROLLER_IMAGE_URL
value: $(params.controller-image-url)
- name: WEBHOOK_IMAGE_URL
value: $(params.webhook-image-url)
- name: IMAGE_TAG
value: $(params.image-tag)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/setup-pipeline-loops-deploy.sh;
# ==========================================================
# Any Sequencer Tasks
# ==========================================================
# ==========================================================
# Kubectl Wrapper Tasks
# ==========================================================
---
apiVersion: tekton.dev/v1beta1
kind: Task
metadata:
name: setup-kubectl-wrapper-deploy
spec:
params:
- name: directory-name
description: Repository directory
default: kfp-tekton
- name: kubectl-wrapper-image-url
description: Kubectl wrapper image url of form registry/repository/image
default: us.icr.io/kfp-tekton/kubeclient
- name: image-tag
description: Image tag of kubectl-wrapper image
default: nightly
workspaces:
- name: task-pvc
mountPath: /artifacts
steps:
- name: setup-deploy
image: quay.io/aipipeline/pipeline-base-image:1.6
env:
- name: KUBECTL_WRAPPER_IMAGE_URL
value: $(params.kubectl-wrapper-image-url)
- name: IMAGE_TAG
value: $(params.image-tag)
command:
- /bin/bash
- '-c'
args:
- >
cd artifacts/$(params.directory-name);
source scripts/deploy/iks/tekton-catalog/setup-kubectl-wrapper-deploy.sh;
# ==========================================================
# Imported Tasks - https://github.com/open-toolchain/tekton-catalog
# ==========================================================
# git-clone
# icr-containerize
# iks-deploy-to-kubernetes

View File

@ -1,7 +0,0 @@
# This is the list of Kubeflow Pipelines's significant contributors.
#
# This does not necessarily list everyone who has contributed code,
# especially since many employees of one corporation may be contributing.
# To see the full list of contributors, see the revision history in
# source control.
Google LLC

File diff suppressed because it is too large Load Diff

View File

@ -3,91 +3,54 @@
We'd love to accept your patches and contributions to this project. There are
just a few small guidelines you need to follow.
## Contributor License Agreement
Contributions to this project must be accompanied by a Contributor License
Agreement. You (or your employer) retain the copyright to your contribution;
this simply gives us permission to use and redistribute your contributions as
part of the project. Head over to <https://cla.developers.google.com/> to see
your current agreements on file or to sign a new one.
## Table of Contents
You generally only need to submit a CLA once, so if you've already submitted one
(even if it was for a different project), you probably don't need to do it
again.
<!-- START of ToC generated by running ./tools/mdtoc.sh CONTRIBUTING.md -->
## Contribution Guidelines
- [Project Structure](#project-structure)
- [Legal](#legal)
- [Coding Style](#coding-style)
- [Unit Testing Best Practices](#unit-testing-best-practices)
- [Golang](#golang)
- [Code Reviews](#code-reviews)
- [Pull Requests](#pull-requests)
- [Pull Request Title Convention](#pull-request-title-convention)
- [PR Title Structure](#pr-title-structure)
- [PR Type](#pr-type)
- [PR Scope](#pr-scope)
- [Get Involved](#get-involved)
To propose a new feature or a change that alters some existing user experience
or creates a new user experience, follow these steps:
### Step 1: Establish Context
Search on KFP GitHub issues list to see if the same or similar proposal has been
made in the past. The historical context can help you draft a better
proposal. Sometimes you will find a very similar proposal was already presented,
discussed thoroughly, and that it is either awaiting contribution (in active
development) or was rejected (often due to timing or conflicting scope with
other plans). To avoid confusion and conflicts, where possible, please
contribute to existing issues before creating new ones.
### Step 2: Create Feature Request
Create a new issue using the “Feature Request” template if no existing issue is
found. Fill in answers to the template questions. To avoid delays, provide as
much information as needed for initial review. Keep in mind that new features
should comply with backward-compatibility and platform-portability requirements.
### Step 3: Initial Team Triage
Wait for a member from the Kubeflow Pipelines team (under
orgs/kubeflow/teams/pipelines/ in
[org.yaml](https://raw.githubusercontent.com/kubeflow/internal-acls/master/github-orgs/kubeflow/org.yaml))
to comment on the issue. The team aims for triaging new issues on a weekly
basis, but cannot at this time provide a guarantee on when your issue will be
reviewed. The team will work with you to determine if your change is trivial and
can proceed or whether it is nontrivial and needs a more detailed design
document and review.
### Step 4: Design Review
If the team agreed with the overall proposal, you would be asked to write a
design documentation, explaining why you want to make a change, what changes are
you proposing, and how do you plan to implement it. The design review process
would be required by default unless the team agreed that the change is too
trivial. It is recommended that you use this [Google doc template](https://docs.google.com/document/d/1VrfuMo8ZeMmV75a-rUq9SO-E6KotBodAf-P0WZeFDZA/edit?usp=sharing&resourcekey=0-BklOgu8ivhdLCplZuPDZZg) (You need to join [kubeflow-discuss](https://groups.google.com/g/kubeflow-discuss) google group to get access)
for your design, and share it with kubeflow-discuss@googlegroups.com for
commenting. After sharing the design documentation, you could optionally join a
session of the bi-weekly Kubeflow Pipelines community meetings
[[agenda](http://bit.ly/kfp-meeting-notes)] to present or further discuss your
proposal. A proposal may still get rejected at this stage if it comes with
unresolved drawbacks or if it does not align with the long term plans for the
project.
### Step 5: Implementation
After you get formal approval from a Kubeflow Pipelines team member, you can
implement your design and send a pull request. Make sure existing tests are all
passing and new tests are added when applicable. Remember to link to the feature
request issue to help reviewers catch up on the context.
<!-- END of ToC generated by running ./tools/mdtoc.sh sdk/README.md -->
## Project Structure
Kubeflow Pipelines consists of multiple components. Before you begin, learn how to [build the Kubeflow Pipelines component container images](./developer_guide.md##build-image). To get started, see the development guides:
Kubeflow Pipelines consists of multiple components. Before you begin, learn how
to [build the Kubeflow Pipelines component container images](./guides/developer_guide.md##development-building-from-source-code).
To get started, see the development guides:
* [Frontend development guide](./frontend/README.md)
* [Backend development guide](./backend/README.md)
* [SDK development guide](./sdk/python/README.md)
## Coding style
## Legal
### SDK
See the [SDK-specific Contribution Guidelines](sdk/CONTRIBUTING.md) for contributing to the `kfp` SDK.
Kubeflow uses Developer Certificate of Origin ([DCO](https://github.com/apps/dco/)).
### Frontend
Please see https://github.com/kubeflow/community/tree/master/dco-signoff-hook#signing-off-commits to learn how to sign off your commits.
The frontend part of the project uses [prettier](https://prettier.io/) for formatting, read [frontend/README.md#code-style](frontend/README.md#code-style) for more details.
## Coding Style
### Backend
Use [gofmt](https://pkg.go.dev/cmd/gofmt) package to format your .go source files. Read [backend/README.md#code-style](backend/README.md#code-style) for more details.
The Python part of the project will follow [Google Python style guide](http://google.github.io/styleguide/pyguide.html).
We provide a [yapf](https://github.com/google/yapf) configuration file to help
contributors auto-format their code to adopt the Google Python style. Also, it
is encouraged to lint python docstrings by [docformatter](https://github.com/myint/docformatter).
The frontend part of the project uses [prettier](https://prettier.io/) for
formatting, read [frontend/README.md#code-style](frontend/README.md#code-style)
for more details.
## Unit Testing Best Practices
@ -95,33 +58,46 @@ Use [gofmt](https://pkg.go.dev/cmd/gofmt) package to format your .go source file
### Golang
* Put your tests in a different package: Moving your test code out of the package allows you to write tests as though you were a real user of the package. You cannot fiddle around with the internals,
instead you focus on the exposed interface and are always thinking about any noise that you might be adding to your API. Usually the test code will be put under the same folder
but with a package suffix of `_test`. https://golang.org/src/go/ast/example_test.go (example)
* Internal tests go in a different file: If you do need to unit test some internals, create another file with `_internal_test.go`
as the suffix.
* Write table driven tests: https://github.com/golang/go/wiki/TableDrivenTests (example)
* Put your tests in a different package: Moving your test code out of the package
allows you to write tests as though you were a real user of the package. You
cannot fiddle around with the internals, instead you focus on the exposed
interface and are always thinking about any noise that you might be adding to
your API. Usually the test code will be put under the same folder but with a
package suffix of `_test`. https://golang.org/src/go/ast/example_test.go (example)
* Internal tests go in a different file: If you do need to unit test some internals,
create another file with `_internal_test.go` as the suffix.
* Write table-driven tests: https://github.com/golang/go/wiki/TableDrivenTests (example)
## Code reviews
## Code Reviews
All submissions, including submissions by project members, require review. We
use GitHub pull requests for this purpose. Consult
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
information on using pull requests.
## Pull Requests
The following should be viewed as _Best Practices_ unless you know better ones
(please submit a guidelines PR).
| Practice | Rationale |
| -------- | --------- |
| Keep the code clean | The health of the codebase is imperative to the success of the project. Files should be under 500 lines long in most cases, which may mean a refactor is necessary before adding changes. |
| Limit your scope | No one wants to review a 1000 line PR. Try to keep your changes focused to ease reviewability. This may mean separating a large feature into several smaller milestones. |
| Refine commit messages | Your commit messages should be in the imperative tense and clearly describe your feature upon first glance. See [this article](https://chris.beams.io/posts/git-commit/) for guidelines.
| Reference an issue | Issues are a great way to gather design feedback from the community. To save yourself time on a controversial PR, first cut an issue for any major feature work. |
## Pull Request Title Convention
We enforce a pull request (PR) title convention to quickly indicate the type and scope of a PR.
PR titles become commit messages when PRs are merged. We also parse PR titles to generate the changelog.
PR titles should:
* Provide a user-friendly description of the change.
* Follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/).
* Specifies issue(s) fixed, or worked on at the end of the title.
Examples:
* `fix(ui): fixes empty page. Fixes #1234`
* `feat(backend): configurable service account. Fixes #1234, fixes #1235`
* `chore: refactor some files`
@ -132,22 +108,20 @@ The following sections describe the details of the PR title convention.
### PR Title Structure
PR titles should use the following structure.
```
<type>[optional scope]: <description>[ Fixes #<issue-number>]
```
Replace the following:
* **`<type>`**: The PR type describes the reason for the change, such as `fix` to indicate that the PR fixes a bug. More information about PR types is available in the next section.
* **`[optional scope]`**: (Optional.) The PR scope describes the part of Kubeflow Pipelines that this PR changes, such as `frontend` to indicate that the change affects the user interface. Choose a scope according to [PR Scope section](#pr-scope).
* **`<description>`**: A user friendly description of this change.
* **`[ Fixes #<issues-number>]`**: (Optional.) Specifies the issues fixed by this PR.
* **`<type>`**: The PR type describes the reason for the change, such as `fix` to indicate that the PR fixes a bug. More information about PR types is available in the next section.
* **`[optional scope]`**: (Optional.) The PR scope describes the part of Kubeflow Pipelines that this PR changes, such as `frontend` to indicate that the change affects the user interface. Choose a scope according to [PR Scope section](#pr-scope).
* **`<description>`**: A user friendly description of this change.
* **`[ Fixes #<issues-number>]`**: (Optional.) Specifies the issues fixed by this PR.
### PR Type
Type can be one of the following:
* **feat**: A new feature.
* **fix**: A bug fix. However, a PR that fixes test infrastructure is not user facing, so it should use the test type instead.
* **docs**: Documentation changes.
@ -167,7 +141,6 @@ use `chore` as the fallback.
### PR Scope
Scope is optional, it can be one of the following:
* **frontend**: user interface or frontend server related, folder `frontend`, `frontend/server`
* **backend**: Backend, folder `backend`
* **sdk**: `kfp` python package, folder `sdk`
@ -187,7 +160,8 @@ usually have different reviewers.
If you are not sure, or the PR doesn't fit into above scopes. You can either
omit the scope because it's optional, or propose an additional scope here.
## Community Guidelines
## Get Involved
This project follows
[Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
* [Slack](http://kubeflow.slack.com/)
* [Twitter](http://twitter.com/kubeflow)
* [Mailing List](https://groups.google.com/forum/#!forum/kubeflow-discuss)

283
Makefile Normal file
View File

@ -0,0 +1,283 @@
# Copyright 2020-2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Acknowledgements:
# - The help target was derived from https://stackoverflow.com/a/35730328/5601796
VENV ?= .venv
KFP_TEKTON_RELEASE ?= v1.9.2
export VIRTUAL_ENV := $(abspath ${VENV})
export PATH := ${VIRTUAL_ENV}/bin:${PATH}
DOCKER_REGISTRY ?= aipipeline
GITHUB_ACTION ?= false
.PHONY: help
help: ## Display the Make targets
@grep -E '^[0-9a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-25s\033[0m %s\n", $$1, $$2}'
.PHONY: venv
venv: $(VENV)/bin/activate ## Create and activate virtual environment
$(VENV)/bin/activate: sdk/python/setup.py
# create/update the VENV when there was a change to setup.py
# check if kfp-tekton is already installed (Travis/CI did during install step)
# use pip from the specified VENV as opposed to any pip available in the shell
@echo "VENV=$(VENV)"
@test -d $(VENV) || python3 -m venv $(VENV)
@$(VENV)/bin/pip show kfp-tekton >/dev/null 2>&1 || $(VENV)/bin/pip install -e sdk/python
@if [ "$(GITHUB_ACTION)" = "false" ]; then touch $(VENV)/bin/activate; fi
.PHONY: install
install: venv ## Install the kfp_tekton compiler in a virtual environment
@echo "Run 'source $(VENV)/bin/activate' to activate the virtual environment."
.PHONY: validate-generated-test-yamls
validate-generated-test-yamls:
@echo "=================================================================="
@echo "Reporting files with same non-inlined and inlined generated yamls in testdata:"
@find sdk/python/tests/compiler/testdata \
\( -type f -name "*yaml" -name "*noninlined.yaml" \) | sort -z >/tmp/validate-generated-test-yamls_total
@find sdk/python/tests/compiler/testdata \
\( -type f -name "*yaml" -name "*noninlined.yaml" \) | \
sed -n -e 's/\(.*\)_noninlined.yaml/\1/p' | \
xargs -n1 -I '{}' diff -q '{}.yaml' '{}_noninlined.yaml' | cut -f4 -d' ' | \
sort -z >/tmp/validate-generated-test-yamls_valid
@echo "=================================================================="
@echo "Noninlined and inlined testdata yamls, having same content."
@diff -a /tmp/validate-generated-test-yamls_total /tmp/validate-generated-test-yamls_valid
@echo "$@: OK"
.PHONY: validate-testdata
validate-testdata:
@cd tekton-catalog/pipeline-loops/ && make validate-testdata-python-sdk
@echo "$@: OK"
.PHONY: validate-pipelineloop-examples
validate-pipelineloop-examples:
@cd tekton-catalog/pipeline-loops/ && make validate-examples
@echo "$@: OK"
.PHONY: unit_test
unit_test: venv ## Run compiler unit tests
@echo "=================================================================="
@echo "Optional environment variables to configure $@, examples:"
@sed -n -e 's/# *\(make $@ .*\)/ \1/p' sdk/python/tests/compiler/compiler_tests.py
@echo "=================================================================="
@pip show pytest > /dev/null 2>&1 || pip install pytest
@sdk/python/tests/run_tests.sh
@echo "$@: OK"
.PHONY: ci_unit_test
ci_unit_test: unit_test
.PHONY: e2e_test
e2e_test: venv ## Run compiler end-to-end tests (requires kubectl and tkn CLI)
@echo "=================================================================="
@echo "Optional environment variables to configure $@, examples:"
@sed -n -e 's/# *\(make $@ .*\)/ \1/p' sdk/python/tests/compiler/compiler_tests_e2e.py
@echo "=================================================================="
@which kubectl > /dev/null || (echo "Missing kubectl CLI" && exit 1)
@test -z "${KUBECONFIG}" && echo "KUBECONFIG not set" || echo "KUBECONFIG=${KUBECONFIG}"
@kubectl version --short || (echo "Failed to access kubernetes cluster" && exit 1)
@which tkn > /dev/null || (echo "Missing tkn CLI" && exit 1)
@sdk/python/tests/run_e2e_tests.sh
@echo "$@: OK"
.PHONY: test
test: unit_test e2e_test ## Run compiler unit tests and end-to-end tests
@echo "$@: OK"
.PHONY: report
report: ## Report compilation status of KFP testdata DSL scripts
@sdk/python/tests/test_kfp_samples.sh
@echo "$@: OK"
.PHONY: lint
lint: venv ## Check Python code style compliance
@which flake8 > /dev/null || pip install flake8
@flake8 sdk/python --show-source --statistics \
--select=E9,E2,E3,E5,F63,F7,F82,F4,F841,W291,W292 \
--per-file-ignores sdk/python/tests/compiler/testdata/*:F841,F821 \
--max-line-length=140
@echo "$@: OK"
.PHONY: check_license
check_license: ## Check for license header in source files
@find ./sdk/python -type f \( -name '*.py' -o -name '*.yaml' \) -exec \
grep -H -E -o -c 'Copyright 20.* kubeflow.org' {} \; | \
grep -E ':0$$' | sed 's/..$$//' | \
grep . && echo "The files listed above are missing the license header" && exit 1 || \
echo "$@: OK"
.PHONY: check_mdtoc
check_mdtoc: ## Check Markdown files for valid the Table of Contents
@find guides samples sdk *.md -type f -name '*.md' -exec \
grep -l -i 'Table of Contents' {} \; | sort | \
while read -r md_file; do \
grep -oE '^ *[-+*] \[[^]]+\]\(#[^)]+\)' "$${md_file}" | sed -e 's/[-+*] /- /g' > md_file_toc; \
./tools/mdtoc.sh "$${md_file}" > generated_toc; \
diff -w md_file_toc generated_toc || echo "$${md_file}"; \
rm -f md_file_toc generated_toc; \
done | grep . && echo "Run './tools/mdtoc.sh <md-file>' to update the 'Table of Contents' in the Markdown files reported above." && exit 1 || \
echo "$@: OK"
.PHONY: check_doc_links
check_doc_links: ## Check Markdown files for valid links
@pip3 show requests > /dev/null || pip install requests
@python3 tools/python/verify_doc_links.py
@echo "$@: OK"
.PHONY: verify
verify: check_license check_mdtoc check_doc_links lint unit_test report ## Run all verification targets: check_license, check_mdtoc, lint, unit_test, report
@echo "$@: OK"
.PHONY: distribution
distribution: venv ## Create a distribution and upload to test.PyPi.org
@echo "NOTE: Using test.PyPi.org -- edit Makefile to target real PyPi index"
@twine --version > /dev/null 2>&1 || pip install twine
@cd sdk/python && \
rm -rf dist/ && \
python3 setup.py sdist && \
twine check dist/* && \
twine upload --repository testpypi dist/*
.PHONY: build
build: ## Create GO vendor directories with all dependencies
go mod vendor
# Extract go licenses into a single file. This assume licext is install globally through
# npm install -g license-extractor
# See https://github.com/arei/license-extractor
licext --mode merge --source vendor/ --target third_party/license.txt --overwrite
# Delete vendor directory
rm -rf vendor
.PHONY: build-release-template
build-release-template: ## Build KFP Tekton release deployment templates
@mkdir -p install/$(KFP_TEKTON_RELEASE)
@kustomize build manifests/kustomize/env/kfp-template -o install/$(KFP_TEKTON_RELEASE)/kfp-tekton.yaml
.PHONY: build-backend
build-backend: build-apiserver build-agent build-workflow build-cacheserver ## Verify apiserver, agent, and workflow build
@echo "$@: OK"
.PHONY: build-apiserver
build-apiserver: ## Build apiserver
go build -o apiserver ./backend/src/apiserver
.PHONY: build-agent
build-agent: ## Build agent
go build -o agent ./backend/src/agent/persistence
.PHONY: build-workflow
build-workflow: ## Build workflow
go build -o workflow ./backend/src/crd/controller/scheduledworkflow/*.go
.PHONY: build-cacheserver
build-cacheserver: ## Build cache
go build -o cache ./backend/src/cache/*.go
.PHONY: build-backend-images
build-backend-images: \
build-api-server-image \
build-persistenceagent-image \
build-metadata-writer-image \
build-scheduledworkflow-image \
build-cacheserver-image \
## Build backend docker images
@echo "$@: OK"
.PHONY: build-v2-custom-controller-images
build-v2-custom-controller-images: \
build-tekton-exithandler-controller-image \
build-tekton-exithandler-webhook-image \
build-tekton-kfptask-controller-image \
build-tekton-kfptask-webhook-image \
## Build V2 Tekton custom task controller images
@echo "$@: OK"
.PHONY: build-api-server-image
build-api-server-image: ## Build api-server docker image
docker build -t ${DOCKER_REGISTRY}/api-server -f backend/Dockerfile .
.PHONY: build-persistenceagent-image
build-persistenceagent-image: ## Build persistenceagent docker image
docker build -t ${DOCKER_REGISTRY}/persistenceagent -f backend/Dockerfile.persistenceagent .
.PHONY: build-metadata-writer-image
build-metadata-writer-image: ## Build metadata-writer docker image
docker build -t ${DOCKER_REGISTRY}/metadata-writer -f backend/metadata_writer/Dockerfile .
.PHONY: build-scheduledworkflow-image
build-scheduledworkflow-image: ## Build scheduledworkflow docker image
docker build -t ${DOCKER_REGISTRY}/scheduledworkflow -f backend/Dockerfile.scheduledworkflow .
.PHONY: build-cacheserver-image
build-cacheserver-image: ## Build cacheserver docker image
docker build -t ${DOCKER_REGISTRY}/cache-server -f backend/Dockerfile.cacheserver .
.PHONY: build-tekton-exithandler-controller-image
build-tekton-exithandler-controller-image: ## Build tekton-exithandler-controller docker image
@cd tekton-catalog/tekton-exithandler/ && docker build -t ${DOCKER_REGISTRY}/tekton-exithandler-controller -f Dockerfile.tekton-exithandler.controller .
.PHONY: build-tekton-exithandler-webhook-image
build-tekton-exithandler-webhook-image: ## Build tekton-exithandler-webhook docker image
@cd tekton-catalog/tekton-exithandler/ && docker build -t ${DOCKER_REGISTRY}/tekton-exithandler-webhook -f Dockerfile.tekton-exithandler.webhook .
.PHONY: build-tekton-kfptask-controller-image
build-tekton-kfptask-controller-image: ## Build tekton-kfptask-controller docker image
@cd tekton-catalog/tekton-kfptask/ && docker build -t ${DOCKER_REGISTRY}/tekton-kfptask-controller -f Dockerfile.tekton-kfptask.controller .
.PHONY: build-tekton-kfptask-webhook-image
build-tekton-kfptask-webhook-image: ## Build tekton-kfptask-webhook docker image
@cd tekton-catalog/tekton-kfptask/ && docker build -t ${DOCKER_REGISTRY}/tekton-kfptask-webhook -f Dockerfile.tekton-kfptask.webhook .
.PHONY: build-tekton-driver-image
build-tekton-driver-image: ## Build tekton-driver docker image
@cd tekton-catalog/tekton-driver/ && docker build -t ${DOCKER_REGISTRY}/tekton-driver -f Dockerfile.tektondriver .
.PHONY: run-go-unittests
run-go-unittests: \
run-apiserver-unittests \
run-common-unittests \
run-crd-unittests \
run-persistenceagent-unittests \
run-cacheserver-unittests \
run-tekton-exithandler-unittests \
run-tekton-kfptask-unittests \
## Verify go backend unit tests
@echo "$@: OK"
run-apiserver-unittests: # apiserver golang unit tests
go test -v -cover ./backend/src/apiserver/...
run-common-unittests: # common golang unit tests
go test -v -cover ./backend/src/common/...
run-crd-unittests: # crd golang unit tests
go test -v -cover ./backend/src/crd/...
run-persistenceagent-unittests: # persistence agent golang unit tests
go test -v -cover ./backend/src/agent/...
run-cacheserver-unittests: # cache golang unit tests
go test -v -cover ./backend/src/cache/...
run-tekton-exithandler-unittests: # tekton-exithandler golang unit tests
@cd tekton-catalog/tekton-exithandler/ && go test -v -cover ./...
run-tekton-kfptask-unittests: # tekton-kfptask golang unit tests
@cd tekton-catalog/tekton-kfptask/ && go test -v -cover ./...
run-tekton-driver-unittests: # tekton-driver golang unit tests
@cd tekton-catalog/tekton-driver/ && go test -v -cover ./...

21
OWNERS
View File

@ -1,8 +1,17 @@
approvers:
- chensun
- IronPan
- james-jwu
- zijianjoy
- animeshsingh
- ckadner
- Tomcli
- fenglixa
- pugangxa
- scrapcodes
- yhwang
- rafalbigaj
reviewers:
- chensun
- zijianjoy
- ckadner
- Tomcli
- fenglixa
- pugangxa
- jinchihe
- scrapcodes
- yhwang

101
README.md
View File

@ -1,81 +1,68 @@
[![Coverage Status](https://coveralls.io/repos/github/kubeflow/pipelines/badge.svg?branch=master)](https://coveralls.io/github/kubeflow/pipelines?branch=master)
[![SDK Documentation Status](https://readthedocs.org/projects/kubeflow-pipelines/badge/?version=latest)](https://kubeflow-pipelines.readthedocs.io/en/stable/?badge=latest)
[![SDK Package version](https://img.shields.io/pypi/v/kfp?color=%2334D058&label=pypi%20package)](https://pypi.org/project/kfp)
[![SDK Supported Python versions](https://img.shields.io/pypi/pyversions/kfp.svg?color=%2334D058)](https://pypi.org/project/kfp)
# Kubeflow Pipelines on Tekton
# Kubeflow Pipelines on Tekton (KFP-Tekton)
Project bringing Kubeflow Pipelines and Tekton together. The current code allows you run Kubeflow Pipelines with Tekton backend end to end.
You can use the [Kubeflow Pipelines SDK v2](https://www.kubeflow.org/docs/components/pipelines/v2/introduction/) to compose a ML pipeline,
generate the Intermediate Representation(IR), and run it on KFP-Tekton.
Project bringing Kubeflow Pipelines and Tekton together. The project is driven
according to this [design doc](http://bit.ly/kfp-tekton). The current code allows you run Kubeflow Pipelines with Tekton backend end to end.
To install the KFP-Tekton v2 on any Kubernetes cluster, please follow the instructions below:
```bash
cd manifests/kustomize
KFP_ENV=platform-agnostic-tekton
kubectl apply -k cluster-scoped-resources/
kubectl wait crd/applications.app.k8s.io --for condition=established --timeout=60s
kubectl apply -k "env/${KFP_ENV}/"
kubectl wait pods -l application-crd-id=kubeflow-pipelines -n kubeflow --for condition=Ready --timeout=1800s
kubectl port-forward -n kubeflow svc/ml-pipeline-ui 8080:80
```
Now you can access Kubeflow Pipelines UI in your browser by <http://localhost:8080>.
* Create your Pipeline using Kubeflow Pipelines DSL, and compile it to Tekton
YAML.
* Upload the compiled Tekton YAML to KFP engine (API and UI), and run end to end
with logging and artifacts tracking enabled.
* In KFP-Tekton V2, the SDK compiler will generate the same intermediate representation as in the main Kubeflow pipelines SDK. All the Tekton related implementations are all embedded into the V2 backend API service.
For more details about the project please follow this detailed [blog post](https://developer.ibm.com/blogs/awb-tekton-optimizations-for-kubeflow-pipelines-2-0) . For the latest KFP-Tekton V2 implementation and [supported offerings](https://developer.ibm.com/articles/advance-machine-learning-workflows-with-ibm-watson-pipelines/), please follow our latest [Kubecon Talk](https://www.youtube.com/watch?v=ecx-yp4g7YU) and [slides](https://docs.google.com/presentation/d/1Su42ApXzZvVwhNSYRAk3bd0heHOtrdEX/edit?usp=sharing&ouid=103716780892927252554&rtpof=true&sd=true). For information on the KFP-Tekton V1 implementation, look at these [slides](https://www.slideshare.net/AnimeshSingh/kubeflow-pipelines-with-tekton-236769976) as well as this [deep dive presentation](https://www.youtube.com/watch?v=AYIeNtXLT_k) for demos.
## Architecture
We are currently using [Kubeflow Pipelines 1.8.4](https://github.com/kubeflow/pipelines/releases/tag/1.8.4) and
[Tekton >= 0.53.2](https://github.com/tektoncd/pipeline/releases/tag/v0.53.2)
in the master branch for this project.
For [Kubeflow Pipelines 2.0.5](https://github.com/kubeflow/pipelines/releases/tag/2.0.5) and
[Tekton >= 0.53.2](https://github.com/tektoncd/pipeline/releases/tag/v0.53.2)
integration, please check out the [kfp-tekton v2-integration](https://github.com/kubeflow/kfp-tekton/tree/v2-integration) branch and [KFP-Tekton V2 deployment](/guides/kfp_tekton_install.md#standalone-kubeflow-pipelines-v2-with-tekton-backend-deployment) instead.
![kfp-tekton](images/kfp-tekton.png)
From here below is the original documentation from Kubeflow Pipelines.
## Overview of the Kubeflow pipelines service
Kubeflow Pipelines is a platform for building and deploying portable, scalable machine learning (ML) workflows. More architectural details about the Kubeflow Pipelines can be found on the [Kubeflow website](https://www.kubeflow.org/docs/components/pipelines/overview/).
[Kubeflow](https://www.kubeflow.org/) is a machine learning (ML) toolkit that is dedicated to making deployments of ML workflows on Kubernetes simple, portable, and scalable.
The Tekton Pipelines project provides Kubernetes-style resources for declaring
CI/CD-style pipelines. Tekton introduces several [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)(CRDs) including Task, Pipeline, TaskRun, and PipelineRun. A PipelineRun represents a single running instance of a Pipeline and is responsible for creating a Pod for each of its Tasks and as many containers within each Pod as it has Steps. Please look for more details in the [Tekton repo](https://github.com/tektoncd/pipeline).
**Kubeflow pipelines** are reusable end-to-end ML workflows built using the Kubeflow Pipelines SDK.
### Get Started using Kubeflow Pipelines on Tekton
The Kubeflow pipelines service has the following goals:
[Install Kubeflow Pipelines with Tekton backend](/guides/kfp_tekton_install.md)
* End to end orchestration: enabling and simplifying the orchestration of end to end machine learning pipelines
* Easy experimentation: making it easy for you to try numerous ideas and techniques, and manage your various trials/experiments.
* Easy re-use: enabling you to re-use components and pipelines to quickly cobble together end to end solutions, without having to re-build each time.
[KFP Tekton Pipelines User Guide](/guides/kfp-user-guide/README.md)
## Installation
[Use KFP Tekton SDK](/sdk/README.md)
* Install Kubeflow Pipelines from choices described in [Installation Options for Kubeflow Pipelines](https://www.kubeflow.org/docs/pipelines/installation/overview/).
[Run Samples](/samples/README.md)
* The Docker container runtime has been deprecated on Kubernetes 1.20+. Kubeflow Pipelines has switched to use [Emissary Executor](https://www.kubeflow.org/docs/components/pipelines/installation/choose-executor/#emissary-executor) by default from Kubeflow Pipelines 1.8. Emissary executor is Container runtime agnostic, meaning you are able to run Kubeflow Pipelines on Kubernetes cluster with any [Container runtimes](https://kubernetes.io/docs/setup/production-environment/container-runtimes/).
[Available KFP DSL Features](/sdk/FEATURES.md)
## Documentation
[Tekton Specific Features](/guides/advanced_user_guide.md)
Get started with your first pipeline and read further information in the [Kubeflow Pipelines overview](https://www.kubeflow.org/docs/components/pipelines/introduction/).
### Development Guides
See the various ways you can [use the Kubeflow Pipelines SDK](https://www.kubeflow.org/docs/pipelines/sdk/sdk-overview/).
[Backend Developer Guide](/guides/developer_guide.md)
See the Kubeflow [Pipelines API doc](https://www.kubeflow.org/docs/pipelines/reference/api/kubeflow-pipeline-api-spec/) for API specification.
[SDK Developer Guide](/sdk/python/README.md)
Consult the [Python SDK reference docs](https://kubeflow-pipelines.readthedocs.io/en/stable/) when writing pipelines using the Python SDK.
[Compilation Tests Status Report](/sdk/python/tests/README.md)
Refer to the [versioning policy](./docs/release/versioning-policy.md) and [feature stages](./docs/release/feature-stages.md) documentation for more information about how we manage versions and feature stages (such as Alpha, Beta, and Stable).
### Design Guides
## Contributing to Kubeflow Pipelines
[Design Doc](http://bit.ly/kfp-tekton)
Before you start contributing to Kubeflow Pipelines, read the guidelines in [How to Contribute](./CONTRIBUTING.md). To learn how to build and deploy Kubeflow Pipelines from source code, read the [developer guide](./developer_guide.md).
[KFP, Argo and Tekton Features Comparison](https://docs.google.com/spreadsheets/d/1LFUy86MhVrU2cRhXNsDU-OBzB4BlkT9C0ASD3hoXqpo/edit#gid=979402121)
### Community
## Kubeflow Pipelines Community Meeting
[Kubeflow Slack](https://join.slack.com/t/kubeflow/shared_invite/zt-cpr020z4-PfcAue_2nw67~iIDy7maAQ)
The meeting is happening every other Wed 10-11AM (PST)
[Calendar Invite](https://calendar.google.com/event?action=TEMPLATE&tmeid=NTdoNG5uMDBtcnJlYmdlOWt1c2lkY25jdmlfMjAxOTExMTNUMTgwMDAwWiBqZXNzaWV6aHVAZ29vZ2xlLmNvbQ&tmsrc=jessiezhu%40google.com&scp=ALL) or [Join Meeting Directly](https://meet.google.com/phd-ixfj-kcr/)
### References
[Meeting notes](http://bit.ly/kfp-meeting-notes)
[Kubeflow and TFX Pipelines](/samples/kfp-tfx)
## Kubeflow Pipelines Slack Channel
[#kubeflow-pipelines](https://kubeflow.slack.com)
## Blog posts
* [Getting started with Kubeflow Pipelines](https://cloud.google.com/blog/products/ai-machine-learning/getting-started-kubeflow-pipelines) (By Amy Unruh)
* How to create and deploy a Kubeflow Machine Learning Pipeline (By Lak Lakshmanan)
* [Part 1: How to create and deploy a Kubeflow Machine Learning Pipeline](https://towardsdatascience.com/how-to-create-and-deploy-a-kubeflow-machine-learning-pipeline-part-1-efea7a4b650f)
* [Part 2: How to deploy Jupyter notebooks as components of a Kubeflow ML pipeline](https://towardsdatascience.com/how-to-deploy-jupyter-notebooks-as-components-of-a-kubeflow-ml-pipeline-part-2-b1df77f4e5b3)
* [Part 3: How to carry out CI/CD in Machine Learning (“MLOps”) using Kubeflow ML pipelines](https://medium.com/google-cloud/how-to-carry-out-ci-cd-in-machine-learning-mlops-using-kubeflow-ml-pipelines-part-3-bdaf68082112)
* [Kubeflow Pipelines meets Tekton](https://developer.ibm.com/blogs/kubeflow-pipelines-with-tekton-and-watson/) (By Animesh Singh)
## Acknowledgments
Kubeflow pipelines uses [Argo Workflows](https://github.com/argoproj/argo-workflows) by default under the hood to orchestrate Kubernetes resources. The Argo community has been very supportive and we are very grateful. Additionally there is Tekton backend available as well. To access it, please refer to [Kubeflow Pipelines with Tekton repository](https://github.com/kubeflow/kfp-tekton).
[Kubeflow and TFX Pipelines talk at Tensorflow World](https://www.slideshare.net/AnimeshSingh/hybrid-cloud-kubeflow-and-tensorflow-extended-tfx)

View File

@ -1,328 +0,0 @@
# Kubeflow Pipelines Release Process
<!-- This TOC is auto generated by "markdown all in one" VS Code plugin -->
- [Kubeflow Pipelines Release Process](#kubeflow-pipelines-release-process)
- [Schedule](#schedule)
- [Release Tags and Branches](#release-tags-and-branches)
- [Contributor Instructions](#contributor-instructions)
- [Cherry picking pull requests to release branch](#cherry-picking-pull-requests-to-release-branch)
- [Option - (Recommended) cherrypick-approved label](#option---recommended-cherrypick-approved-label)
- [Option - git cherry-pick](#option---git-cherry-pick)
- [Release Manager Instructions](#release-manager-instructions)
- [Common Prerequisites](#common-prerequisites)
- [Cutting a release branch (Optional)](#cutting-a-release-branch-optional)
- [Before release](#before-release)
- [Releasing from release branch](#releasing-from-release-branch)
- [Release Process Development](#release-process-development)
## Schedule
Kubeflow Pipelines has weekly patch releases and monthly minor releases.
Patch releases only contain bug fixes, while minor releases have new features
additionally.
## Release Tags and Branches
Releases are tagged with tags like `X.Y.Z`, e.g. `1.0.2`. A special format like
`1.0.0-rc.2` is a pre-release. It is the second release candidate before
releasing the final `1.0.0`.
A release branch has the name structure `release-X.Y` where `X.Y` stands for the
minor version. Releases like `X.Y.Z` will all be released from the branch
`release-X.Y`.
For example, `1.0.2` release should be on `release-1.0` branch.
## Contributor Instructions
The following section targets contributors. No need to read further if you only
want to use Kubeflow Pipelines.
### Cherry picking pull requests to release branch
After the `release-X.Y` release branch is cut, pull requests (PRs) merged to
master will be only get released in the next minor release `X.(Y+1).0`.
If you want your PR released earlier in a patch release `X.Y.(Z+1)`:
- The PR must already get merged to master branch.
- The PR should be a bug fix.
- The PR should be cherry picked to corresponding release branch `release-X.Y`.
Choose one of the following options for cherry picking your PR to release branch.
#### Option - (Recommended) cherrypick-approved label
Contributors should ask OWNERS who approved the PR to add a `cherrypick-approved`
label if they want the PR cherry picked to release branch.
Release manager will periodically or before release, search all merged PRs with
`cherrypick-approved` and cherry pick them into current release branch.
#### Option - git cherry-pick
- Find the commit you want to cherry pick on master as $COMMIT_SHA.
- Find the active release branch name $BRANCH, e.g. release-1.0
- Cherry pick a commit:
```bash
git checkout $BRANCH
git checkout -b <cherry-pick-pr-branch-name>
git cherry-pick $COMMIT_SHA
```
- Resolve merge conflicts if any
- `git push origin HEAD`
- Create a PR and remember to update PR's destination branch to `release-$MINOR_VERSION`
- Ask the same OWNERS that would normally need to approve this PR
## Release Manager Instructions
The following sections target release managers. You don't need to read further
if you only want to use or contribute to this repo.
### Common Prerequisites
- OS: Linux or MacOS
- Permissions needed
- Can create a branch in github.com/kubeflow/pipelines.
- (Before [#4840](https://github.com/kubeflow/pipelines/issues/4840) is resolved) one would need the admin access to kubeflow/pipelines repo.
- Can trigger cloudbuild jobs in `google.com/ml-pipeline-test` GCP project.
- Tools that should be in your `$PATH`
- docker
- python3
- Preparations
1. Clone github.com/kubeflow/pipelines repo into `$KFP_REPO`.
2. `cd $KFP_REPO`
### Cutting a release branch (Optional)
1. Choose a good commit on master branch with commit hash as `$COMMIT_SHA`.
1. Choose the next release branch's `$MINOR_VERSION` in format `x.y`, e.g. `1.0`, `1.1`...
1. Make a release branch of format `release-$MINOR_VERSION`, e.g. `release-1.0`, `release-1.1`. Branch from the commit and push to kubeflow pipelines upstream repo.
```bash
git checkout $COMMIT_SHA
BRANCH=release-$MINOR_VERSION
git checkout -b $BRANCH
git push upstream HEAD
```
### Before release
Do the following things before a release:
1. **(Do this step only when releasing from a NON-master release branch)**
Note: Instead of following this step to cherry pick all PRs, you can also manually cherry pick commits from master branch to release branch, if the number of PRs to cherry pick is minimal. Command for manual cherry pick:
```
git cherry-pick <commit-id>
```
If you want to use script to cherry pick all merged PRs with `cherrypick-approved` label:
- Search all merged PRs with `cherrypick-approved`
label, but no `cherrypicked` label using
[this link](https://github.com/kubeflow/pipelines/pulls?q=is%3Apr+label%3Acherrypick-approved+-label%3Acherrypicked+is%3Aclosed+sort%3Aupdated-asc)
- Use the git cherry-pick option to pick these PR commits into the release
branch one by one in a batch and add `cherrypicked` label to these PRs.
NOTE: if there are merge conflicts for a PR, ask the PR author or area OWNER
to create a cherry pick PR by themselves following other two options.
- `git push upstream $BRANCH` directly to the release branch.
There's an automated script that can help you do the above:
```bash
# Prepare your env
cd ~/kubeflow/pipelines
git fetch upstream
git checkout release-1.0
git pull
git checkout -b <your-cherry-pick-branch-name>
# The following command shows usage info
./hack/cherry-pick.sh
# The following command cherry picks PRs #123 #456 #789 for you.
# It runs git cherry-pick for each merged commit, then adds `cherrypicked`
# label on the PR.
#
# If there's a merge conflict in the middle, it will stop there waiting for
# you to resolve. You need to add the `cherrypicked` label by yourself in
# this case. After the issue resolved, you can rerun the same command and
# PRs already cherrypicked (with the label `cherrypicked`) will be skipped.
./hack/cherry-pick.sh 123 456 789
# After cherry pickings are done, they are still in your local repo. Push
# them to your remote branch to create a PR.
git push origin HEAD
```
You can get the list of PRs waiting to be cherrypicked by:
1. Open [cherrypick-approved PRs that haven't been cherrypicked sorted by updated order](https://github.com/kubeflow/pipelines/pulls?q=is%3Apr+label%3Acherrypick-approved+-label%3Acherrypicked+is%3Amerged+sort%3Aupdated-asc+).
1. Open browser console (usually by pressing F12).
1. Paste the following command into the console.
```javascript
console.log(Array.from(document.querySelectorAll('[id^="issue_"][id*="_link"]')).map(el => /issue_(.*)_link/.exec(el.id)[1]).join(' '))
```
1. Verify cloudbuild and postsubmit tests are passing: visit <https://github.com/kubeflow/pipelines/commits/master> for master branch.
![How to very cloudbuild and postsubmit status](release-status-check.png)
If not, contact the KFP team to determine if the failure(s) would block the release. You can also retry the failed job by opening the detail page of prow job, and click the refresh button next ot the job title.
### Releasing from release branch
Note, when releasing from master, all the below mentions of "release branch" means master branch.
1. Choose the release's complete `$VERSION` following semantic versioning, e.g.
- `1.0.0-rc.1`
- `1.0.0-rc.2`
- `1.0.0`
- `1.0.1`
- `1.1.0`
- ...
Set the version by using `VERSION=<version-value>`. Contact @chensun if you are not sure what next version should be.
1. Update all version refs in release branch by
```bash
cd ./test/release && TAG=$VERSION BRANCH=$BRANCH make release
```
It will prompt you whether to push it to release branch. Press `y` and hit `Enter`.
Note, the script will clone kubeflow/pipelines repo into a temporary location on your computer, make those changes and attempt to push to upstream, so that it won't interfere with your current git repo.
If you see error "docker.sock: connect: permission error", you need to [allow managing docker as a non-root user](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user).
1. View related cloudbuild jobs' statuses by clicking the latest commit's status icon
in the release branch. Refer to the screenshot below to find the page.
1. Wait and make sure the `build-each-commit` cloudbuild job that builds all images
in gcr.io/ml-pipeline-test succeeded. If it fails, please click "View more details
on Google Cloud Build" and then "Retry".
NOTE: you can find your latest release commit in <https://github.com/kubeflow/pipelines/commits/master> and select your release branch.
![How to very cloudbuild and postsubmit status](release-status-check.png)
1. Select the `release-on-tag` cloudbuild job that copies built images and artifacts to
public image registry and gcs bucket. This job should have already failed because
artifacts haven't been built. Now, please click "View more details on Google Cloud Build"
and then "Retry", because after waiting for previous step, artifacts are now ready.
NOTE: **DO NOT** click the "Re-run" button from GitHub Actions status page.
It will create a build with "Branch: $BRANCH" instead of "TAG: $VERSION".
Open "View more details on Google Cloud Build", and rerun from there.
![Retry release-on-tag from the Google Cloud Build details page](retry-release-on-tag.png)
![Verify that you're retrying the right build](verify-retry-the-right-build.png)
TODO: we should have an automation KFP cluster, and the waiting and submiting
`release-on-tag` cloudbuild task should happen automatically.
NOTE: postsubmit tests will most likely fail for the release commit, this is expected, postsubmit
tests start right after the commit is in GitHub repo, but some artifacts they depend on are still
being built by the processes in these two steps.
1. Search "PyPI" in Google internal release doc for getting password of kubeflow-pipelines user.
1. Release `kfp-server-api` python packages to PyPI.
```bash
git checkout $BRANCH
git pull upstream $BRANCH
cd backend/api/v2beta1/python_http_client
rm -r dist
python3 setup.py --quiet sdist
python3 -m twine upload --username kubeflow-pipelines dist/*
```
1. Release `kfp` python packages to PyPI. (Note: Please skip this step for backend release, this step will be handled by SDK release.)
```bash
pip3 install twine --user
gsutil cp gs://ml-pipeline/release/$VERSION/kfp.tar.gz kfp-$VERSION.tar.gz
python3 -m twine upload --username kubeflow-pipelines kfp-$VERSION.tar.gz
```
!!! The file name must contain the version. See <https://github.com/kubeflow/pipelines/issues/1292>
1. Create a GitHub release using `$VERSION` git tag and title `Version $VERSION`,
fill in the description. Detailed steps:
1. [Draft a new release](https://github.com/kubeflow/pipelines/releases/new).
1. Typing in version tag field to search and select the "$VERSION" tag published in release instructions above.
Its format is like `X.Y.Z` or `X.Y.Z-rc.N`.
1. Use this template for public releases and replace the `$VERSION` with real values.
<pre>
To deploy Kubeflow Pipelines in an existing cluster, follow the instruction in [here](https://www.kubeflow.org/docs/pipelines/standalone-deployment-gcp/) or via UI [here](https://console.cloud.google.com/ai-platform/pipelines)
Install python SDK (python 3.7 above) by running:
```bash
python3 -m pip install kfp kfp-server-api --upgrade
```
See the [Change Log](https://github.com/kubeflow/pipelines/blob/$VERSION/CHANGELOG.md)
</pre>
Use this template for prereleases (release candidates) and **PLEASE CHECK** the
***This is a prerelease*** checkbox in the GitHub release UI.
<pre>
To deploy Kubeflow Pipelines in an existing cluster, follow the instruction in [here](https://www.kubeflow.org/docs/pipelines/standalone-deployment-gcp/).
Install kfp-server-api package (python 3.7 above) by running:
```bash
python3 -m pip install kfp-server-api==$VERSION --upgrade
```
Refer to:
* [Upgrade Notes with notices and breaking changes](https://www.kubeflow.org/docs/components/pipelines/installation/upgrade/)
* [Change Log](https://github.com/kubeflow/pipelines/blob/$VERSION/CHANGELOG.md)
NOTE, kfp python SDK is **NOT** included and released separately.
</pre>
1. **(Do this step only when releasing from a NON-master release branch)**
Update master branch to the same version and include latest changelog:
```bash
git checkout master
git pull
git checkout -b <your-branch-name>
# This avoids line break at end of line.
echo -n $VERSION > VERSION
# This takes a while.
pushd test/release
make release-in-place
popd
git checkout $VERSION -- CHANGELOG.md
git add -A
git commit -m "chore(release): bump version to $VERSION on master branch"
```
1. If current release is not a prerelease, create a PR to update version in kubeflow documentation website:
<https://github.com/kubeflow/website/blob/master/layouts/shortcodes/pipelines/latest-version.html>
Note, there **MUST NOT** be a line ending in the file. Editing on GitHub always add a line ending
for you so you cannot create a PR on GitHub UI.
Instead, you can checkout the repo locally and
```bash
echo -n 1.0.0 > layouts/shortcodes/pipelines/latest-version.html
```
and create a PR to update the version, e.g. <https://github.com/kubeflow/website/pull/1942>.
1. Follow [Upgrade KFP](https://github.com/kubeflow/testing/tree/master/test-infra/kfp) instruction to upgrade KFP manifests in test-infra.
## Release Process Development
Please refer to [./test/release](./test/release).

View File

@ -1,177 +0,0 @@
# Kubeflow Pipelines Roadmap
## Kubeflow Pipelines 2022 Roadmap
### KFP v2
Design: [bit.ly/kfp-v2](https://bit.ly/kfp-v2)
#### KFP v2 Goals
* Enable v2 authoring Critical User Journeys
* Containerized Python component
* Bring-your-own container
* DAG component
* Finalize IR and component packaging format to address feature gaps with v1
#### Post-v2 Goals (tentative)
* Support local development
* Further improve DAG visualization on UI
* Offer advanced control flow features
## Kubeflow Pipelines 2021 Roadmap (major themes)
### KFP v2 compatible
Quick links:
* Design: [bit.ly/kfp-v2-compatible](https://bit.ly/kfp-v2-compatible)
* [Tracker Project](https://github.com/kubeflow/pipelines/projects/13)
* [Documentation](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility/)
#### Goals - v2 compatible
* Enables v2 DSL core features for early user feedback, while retaining backward compatibility to most v1 features.
* Stepping stone for v1 to v2 migration.
* Validate v2 system design perf & scalability requirements.
#### Timeline - v2 compatible
* First beta release late May
* Feature complete mid July
#### New Features in v2 compatible
* Improved Artifact Passing
Improvements/changes below make KFP artifacts easier to integrate with other systems:
* In components, support consuming input artifacts by URI. This is useful for components that launch external jobs using artifact URIs, but do not need to access the data directly by themselves.
* A new intermediate artifact repository feature is designed -- pipeline root. It is configurable at:
* Cluster default
* Namespace defaults
* Authoring pipelines
* Submitting a pipeline
* Pipeline root supports MinIO, S3, GCS natively using Go CDK.
* Artifacts are no longer compressed by default.
* Artifacts with metadata
* Support for components that can consume MLMD metadata.
* Support for components that can produce/update MLMD-based metadata.
* Visualizations
* [#5668](https://github.com/kubeflow/pipelines/issues/5668) Visualize v2 metrics -- components can output metrics artifacts that are rendered in UI. [sample pipeline](https://github.com/kubeflow/pipelines/blob/307e91aaae5e9c71dde1fddaffa10ffd751a40e8/samples/test/metrics_visualization_v2.py#L103)
* [#3970](https://github.com/kubeflow/pipelines/issues/3970) Easier visualizations: HTML, Markdown, etc using artifact type + metadata.
* v2 python components.
* A convenient component authoring method designed to support new features above natively. (v1 components do not completely support all the features mentioned above)
* [samples/test/lightweight_python_functions_v2_with_outputs.py](https://github.com/kubeflow/pipelines/blob/master/samples/test/lightweight_python_functions_v2_with_outputs.py)
* [samples/test/lightweight_python_functions_v2_pipeline.py](https://github.com/kubeflow/pipelines/blob/master/samples/test/lightweight_python_functions_v2_pipeline.py)
* [#5669](https://github.com/kubeflow/pipelines/issues/5669) KFP semantics in MLMD
* MLMD state with exact KFP semantics (e.g. parameter / artifact / task names are the same as in DSL). This will enable use-cases like: “querying a result artifact from a pipeline run using MLMD API and then use the result in another system or another pipeline”.
[Example pipeline](https://github.com/kubeflow/pipelines/blob/master/samples/test/lightweight_python_functions_v2_pipeline.py) and [corresponding MLMD state](https://github.com/kubeflow/pipelines/blob/master/samples/test/lightweight_python_functions_v2_pipeline_test.py).
* [#5670](https://github.com/kubeflow/pipelines/issues/5670) Revamp KFP UI to show inputs & outputs in KFP semantics for v2 compatible pipelines
* [#5667](https://github.com/kubeflow/pipelines/issues/5667) KFP data model based caching using MLMD.
### KFP v2
Design: [bit.ly/kfp-v2](https://bit.ly/kfp-v2)
#### KFP v2 Goals
* Data Management:
* build first class support for metadata -- recording, presentation and orchestration.
* making it easy to keep track of all the data produced by machine learning pipelines and how it was computed.
* KFP native (and argo agnostic) spec and status: define a clear interface for KFP, so that other systems can understand KFP pipeline spec and status in KFP semantics.
* Gaining more control over KFP runtime behavior, so that it sets up a solid foundation for us to add new features to KFP: give the KFP system more control over the exact runtime behavior. This wasnt a goal initially coming from use cases. However, the more we innovate on Data Management and KFP native spec/status, the clearer that other workflow systems become limitations to how we may implement new KFP features on our own. Therefore, re-architecturing KFP to let us get more control of runtime behavior is ideal for achieving KFPs long term goals.
* Be backward compatible with KFP v1: for existing features, we want to keep them as backward compatible as possible to ease upgrade.
#### Timeline - v2
* Start work after v2 compatible feature complete
* Alpha release in October
* Beta/Stable release timelines TBD
#### Planned Features in KFP v2
* KFP v2 DSL (TBD)
* Use [the pipeline spec](https://github.com/kubeflow/pipelines/blob/master/api/v2alpha1/pipeline_spec.proto) as pipeline description data structure. The new spec is argo workflow agnostic and can be a shared common format for different underlying engines.
* Design and implement a pipeline run status API (also argo agnostic).
* KFP v2 DAG UI
* KFP semantics.
* Convenient features: panning, zooming, etc.
* Control flow features
* Reusable subgraph component.
* Subgraph component supports return value and aggregating from parallel for.
* Caching improvement: skipped tasks will not execute at all (in both v1 and v2 compatible, skipped tasks will still run a Pod which does not do anything).
* TFX on KFP v2.
### Other Items
* [#3857](https://github.com/kubeflow/pipelines/issues/3857) Set up a Vulnerability Scanning Process.
## Kubeflow Pipelines 2019 Roadmap
### 2019 Overview
This document outlines the main directions on the Kubeflow Pipelines (KFP) project in 2019.
### Production Readiness
We will continue developing capabilities for better reliability, scaling, and maintenance of production ML systems built with Kubeflow Pipelines.
* Ability to easily upgrade KFP system components to new versions and apply fixes to a live cluster without losing state
* Ability to externalize the critical metadata state to a data store outside of the cluster lifetime
* Ability to configure a standard cluster-wide persistent storage that all pipelines can share, connected to any cloud or on-prem storage system
* Easy deployment of KFP system services
### Connector Components
To make it easy to use KFP within an ecosystem of other cloud services, and to take advantage of scale and other capabilities of job scheduling services and data processing services - KFP components will build a framework for reliable connections to other services. Google will extend the framework and contribute a few specific connector components:
* Connectors to DataProc (Spark), DataFlow (Beam), BigQuery, Cloud ML Engine
### Metadata Store and API
As a foundational layer in the ML system, KFP will introduce an extensible and scalable metadata store for tracking versioning, dependencies, and provenance of artifacts and executables. The metadata store will be usable from any other KF component to help users easily connect artifacts to their origins, metrics, and effects and consumption points.
* Metadata Store and API
* Automatic tracking of pipelines, pipeline steps, parameters, and artifacts
* Extensible Type System and Standard Types for most common ML artifacts (models, datasets, metrics, visualizations)
### Shareable Components and Pipelines Model
To make it easy for users to share and consume KFP components within and outside of an organization, KFP will improve the sharing capabilities in the KFP SDK:
* Component configuration for easy sharing of components through file sharing and source control
* Ability to represent a pipeline as a component for use in other pipelines
### Enhanced UI and Notebooks
KFP UI will continue to improve so that operating KFP clusters and managing KFP resources is more intuitive:
* Metadata UI to provide an exploration and search experience over artifacts and types
* Ability to use Notebooks outside of the K8S cluster to build and control pipeline execution
* Controls for viewing pipeline topology and execution results within Notebooks
### Pipeline execution and debugging
To make it more efficient to run ML experiments, KFP will add features for faster iteration over experiments, better control, and transparency of the execution engine:
* Support for caching of pipeline artifacts and the ability to use the artifacts cache to accelerate pipeline re-execution. This will allow steps that have already executed to be skipped on subsequent runs.
* Ability to stop/restart pipeline jobs
* Ability to track pipeline dependencies and resources created/used by a pipeline job
### Data and Event driven scheduling
Many ML workflows make more sense to be triggered by data availability or by external events rather than scheduled manually. KFP will have native support for data driven and event driven workflows. KFP will provide the ability to configure pipeline execution upon appearance of certain entries in the metadata store, making it easy to create complex CI pipelines orchestrated around key artifacts, such as models.
### Enhanced DSL workflow control
The KFP SDK for defining the pipeline topologies and component dependencies will add more advanced control operators for organizing workflow loops, parallel for-each, and enhanced conditions support.
<EOD>

View File

@ -1,9 +1,17 @@
# Private Security Vulnerability Reporting
# Security Policy
When reporting a vulnerability, please include a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue. If the issue is confirmed as a vulnerability, we will open a Security Advisory. This project follows a 90 day disclosure timeline.
## Supported Versions
To report a security issue, please choose one of the options below:
Below are the list of supported KFP-Tekton for security and bug fixes.
- Use [https://g.co/vulnz](https://g.co/vulnz). We use `g.co/vulnz` for our intake, and do coordination and disclosure here on GitHub (including using GitHub Security Advisory). The Google Security Team will respond within 5 working days of your report on `g.co/vulnz`.
| Version | Supported |
| ------- | ------------------ |
| 2.0.x | :white_check_mark: |
| 1.9.x | :white_check_mark: |
| 1.8.x | :white_check_mark: |
| 1.7.x | :white_check_mark: |
| < 1.7 | :x: |
- Report a security vulnerability private via GitHub built-in function by following [these instructions](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability).
## Reporting a Vulnerability
Please summit [an issue](https://github.com/kubeflow/kfp-tekton/issues) in the KFP-Tekton repo for any vulnerability you found in this project. If the vulnerability should not be exposed in open source, then please summit the vulnerability to the Kubeflow slack channel `#external-kfp-tekton` or direct message to `Tommy Li`.

View File

@ -1 +1 @@
2.0.1
1.9.2

3
api/.gitignore vendored
View File

@ -1,3 +0,0 @@
# Dependencies need to download via Makefile.
v2alpha1/google
v2alpha1/python/kfp/pipeline_spec/*pb2.py

View File

@ -1,85 +0,0 @@
# Copyright 2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Contact one of Bobgy, capri-xiyue or zijianjoy if this remote image needs an update.
PREBUILT_REMOTE_IMAGE=gcr.io/ml-pipeline-test/api-generator:latest
.PHONY: all
all: golang python
.PHONY: clean
clean: clean-go clean-python
# Generate proto packages using a pre-built api-generator image.
.PHONY: golang
golang: v2alpha1/*.proto
docker run --interactive --rm \
--user $$(id -u):$$(id -g) \
--mount type=bind,source="$$(pwd)/..",target=/go/src/github.com/kubeflow/pipelines \
$(PREBUILT_REMOTE_IMAGE) \
sh -c 'cd /go/src/github.com/kubeflow/pipelines/api && make generate'
# Delete all generated proto go packages.
.PHONY: clean-go
clean-go:
rm -rf v2alpha1/go
rm -f v2alpha1/google/rpc/status.proto
# Generate Python package.
.PHONY: python
python: v2alpha1/pipeline_spec.proto v2alpha1/google/rpc/status.proto
python3 v2alpha1/python/generate_proto.py && cd v2alpha1/python && python3 setup.py bdist_wheel
# Delete all generated Python packages
.PHONY: clean-python
clean-python:
rm -rf v2alpha1/python/build
rm -rf v2alpha1/python/dist
rm -rf v2alpha1/python/kfp_pipeline_spec.egg-info
rm -f v2alpha1/python/kfp/pipeline_spec/pipeline_spec_pb2.py
rm -f v2alpha1/google/rpc/status.proto
##########################
# The following are IMPLEMENTATION DETAILS.
##########################
# Generates proto packages locally, this should only be called:
# * during development
# * inside the prebuilt docker container
.PHONY: generate
generate: go_pipelinespec go_cachekey
go_pipelinespec: v2alpha1/pipeline_spec.proto v2alpha1/google/rpc/status.proto
mkdir -p v2alpha1/go/pipelinespec
cd v2alpha1 && protoc -I=. \
--go_out=go/pipelinespec \
--go_opt=paths=source_relative \
pipeline_spec.proto
go_cachekey: v2alpha1/pipeline_spec.proto v2alpha1/cache_key.proto
mkdir -p v2alpha1/go/cachekey
cd v2alpha1 && protoc -I=. \
--go_out=go/cachekey \
--go_opt=paths=source_relative \
cache_key.proto
# Fetch dependency proto
v2alpha1/google/rpc/status.proto:
mkdir -p v2alpha1/google/rpc
wget -O v2alpha1/google/rpc/status.proto https://raw.githubusercontent.com/googleapis/googleapis/047d3a8ac7f75383855df0166144f891d7af08d9/google/rpc/status.proto
# protoc-gen-go is already installed in api-generator image
.PHONY: protoc-gen-go
protoc-gen-go:
go install google.golang.org/protobuf/cmd/protoc-gen-go

View File

@ -1,7 +0,0 @@
approvers:
- chensun
- connor-mccarthy
- neuromage
reviewers:
- chensun
- connor-mccarthy

View File

@ -1,31 +0,0 @@
# Pipeline Spec
## Generate golang proto code
Generate golang proto code:
```bash
make clean-go golang
```
## Generate Python proto package
Generate kfp-pipeline-spec:
Update `VERSION` in [v2alpha1/python/setup.py](https://github.com/kubeflow/pipelines/blob/master/api/v2alpha1/python/setup.py) if applicable.
```bash
make clean-python python
```
## Generate both Python and golang proto code
Generate both Python and golang proto:
```bash
make clean all
```
Note, there are no prerequisites, because the generation uses a prebuilt docker image with all the tools necessary.
Documentation: <https://developers.google.com/protocol-buffers/docs/reference/go-generated>

View File

@ -1,8 +0,0 @@
module github.com/kubeflow/pipelines/api
go 1.16
require (
google.golang.org/genproto v0.0.0-20211026145609-4688e4c4e024
google.golang.org/protobuf v1.27.1
)

122
api/go.sum generated
View File

@ -1,122 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20211026145609-4688e4c4e024 h1:aePO4E0x+Urj9V5NQHjqOpaNG4oMeHQq0l2ob05z5tI=
google.golang.org/genproto v0.0.0-20211026145609-4688e4c4e024/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -1,39 +0,0 @@
// Copyright 2021 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
option go_package = "github.com/kubeflow/pipelines/api/v2alpha1/go/cachekey";
package ml_pipelines;
import "google/protobuf/struct.proto";
import "pipeline_spec.proto";
message CacheKey {
map<string, ArtifactNameList> inputArtifactNames = 1;
map<string, Value> inputParameters = 2 [deprecated = true];
map<string, RuntimeArtifact> outputArtifactsSpec = 3;
map<string, string> outputParametersSpec=4;
ContainerSpec containerSpec=5;
map<string, google.protobuf.Value> input_parameter_values = 6;
}
message ContainerSpec {
string image = 1;
repeated string cmdArgs = 2;
}
message ArtifactNameList {
repeated string artifactNames = 1;
}

View File

@ -1,422 +0,0 @@
// Copyright 2021 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: cache_key.proto
package cachekey
import (
pipelinespec "github.com/kubeflow/pipelines/api/v2alpha1/go/pipelinespec"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
structpb "google.golang.org/protobuf/types/known/structpb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type CacheKey struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
InputArtifactNames map[string]*ArtifactNameList `protobuf:"bytes,1,rep,name=inputArtifactNames,proto3" json:"inputArtifactNames,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Deprecated: Do not use.
InputParameters map[string]*pipelinespec.Value `protobuf:"bytes,2,rep,name=inputParameters,proto3" json:"inputParameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
OutputArtifactsSpec map[string]*pipelinespec.RuntimeArtifact `protobuf:"bytes,3,rep,name=outputArtifactsSpec,proto3" json:"outputArtifactsSpec,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
OutputParametersSpec map[string]string `protobuf:"bytes,4,rep,name=outputParametersSpec,proto3" json:"outputParametersSpec,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
ContainerSpec *ContainerSpec `protobuf:"bytes,5,opt,name=containerSpec,proto3" json:"containerSpec,omitempty"`
InputParameterValues map[string]*structpb.Value `protobuf:"bytes,6,rep,name=input_parameter_values,json=inputParameterValues,proto3" json:"input_parameter_values,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *CacheKey) Reset() {
*x = CacheKey{}
if protoimpl.UnsafeEnabled {
mi := &file_cache_key_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CacheKey) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CacheKey) ProtoMessage() {}
func (x *CacheKey) ProtoReflect() protoreflect.Message {
mi := &file_cache_key_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CacheKey.ProtoReflect.Descriptor instead.
func (*CacheKey) Descriptor() ([]byte, []int) {
return file_cache_key_proto_rawDescGZIP(), []int{0}
}
func (x *CacheKey) GetInputArtifactNames() map[string]*ArtifactNameList {
if x != nil {
return x.InputArtifactNames
}
return nil
}
// Deprecated: Do not use.
func (x *CacheKey) GetInputParameters() map[string]*pipelinespec.Value {
if x != nil {
return x.InputParameters
}
return nil
}
func (x *CacheKey) GetOutputArtifactsSpec() map[string]*pipelinespec.RuntimeArtifact {
if x != nil {
return x.OutputArtifactsSpec
}
return nil
}
func (x *CacheKey) GetOutputParametersSpec() map[string]string {
if x != nil {
return x.OutputParametersSpec
}
return nil
}
func (x *CacheKey) GetContainerSpec() *ContainerSpec {
if x != nil {
return x.ContainerSpec
}
return nil
}
func (x *CacheKey) GetInputParameterValues() map[string]*structpb.Value {
if x != nil {
return x.InputParameterValues
}
return nil
}
type ContainerSpec struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"`
CmdArgs []string `protobuf:"bytes,2,rep,name=cmdArgs,proto3" json:"cmdArgs,omitempty"`
}
func (x *ContainerSpec) Reset() {
*x = ContainerSpec{}
if protoimpl.UnsafeEnabled {
mi := &file_cache_key_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ContainerSpec) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ContainerSpec) ProtoMessage() {}
func (x *ContainerSpec) ProtoReflect() protoreflect.Message {
mi := &file_cache_key_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ContainerSpec.ProtoReflect.Descriptor instead.
func (*ContainerSpec) Descriptor() ([]byte, []int) {
return file_cache_key_proto_rawDescGZIP(), []int{1}
}
func (x *ContainerSpec) GetImage() string {
if x != nil {
return x.Image
}
return ""
}
func (x *ContainerSpec) GetCmdArgs() []string {
if x != nil {
return x.CmdArgs
}
return nil
}
type ArtifactNameList struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ArtifactNames []string `protobuf:"bytes,1,rep,name=artifactNames,proto3" json:"artifactNames,omitempty"`
}
func (x *ArtifactNameList) Reset() {
*x = ArtifactNameList{}
if protoimpl.UnsafeEnabled {
mi := &file_cache_key_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ArtifactNameList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ArtifactNameList) ProtoMessage() {}
func (x *ArtifactNameList) ProtoReflect() protoreflect.Message {
mi := &file_cache_key_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ArtifactNameList.ProtoReflect.Descriptor instead.
func (*ArtifactNameList) Descriptor() ([]byte, []int) {
return file_cache_key_proto_rawDescGZIP(), []int{2}
}
func (x *ArtifactNameList) GetArtifactNames() []string {
if x != nil {
return x.ArtifactNames
}
return nil
}
var File_cache_key_proto protoreflect.FileDescriptor
var file_cache_key_proto_rawDesc = []byte{
0x0a, 0x0f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x0c, 0x6d, 0x6c, 0x5f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x1a,
0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x13, 0x70,
0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x22, 0x8a, 0x08, 0x0a, 0x08, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x12,
0x5e, 0x0a, 0x12, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74,
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6d, 0x6c,
0x5f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65,
0x4b, 0x65, 0x79, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63,
0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x12, 0x69, 0x6e, 0x70,
0x75, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12,
0x59, 0x0a, 0x0f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x6c, 0x5f, 0x70, 0x69,
0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79,
0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x69, 0x6e, 0x70, 0x75, 0x74,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x61, 0x0a, 0x13, 0x6f, 0x75,
0x74, 0x70, 0x75, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x53, 0x70, 0x65,
0x63, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6d, 0x6c, 0x5f, 0x70, 0x69, 0x70,
0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x2e,
0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x53,
0x70, 0x65, 0x63, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74,
0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x53, 0x70, 0x65, 0x63, 0x12, 0x64, 0x0a,
0x14, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
0x73, 0x53, 0x70, 0x65, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x6c,
0x5f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65,
0x4b, 0x65, 0x79, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
0x74, 0x65, 0x72, 0x73, 0x53, 0x70, 0x65, 0x63, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x6f,
0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x53,
0x70, 0x65, 0x63, 0x12, 0x41, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x53, 0x70, 0x65, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x6c, 0x5f,
0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69,
0x6e, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x12, 0x66, 0x0a, 0x16, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f,
0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73,
0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x6c, 0x5f, 0x70, 0x69, 0x70, 0x65,
0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4b, 0x65, 0x79, 0x2e, 0x49,
0x6e, 0x70, 0x75, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x50,
0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x65,
0x0a, 0x17, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x4e,
0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6c, 0x5f,
0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61,
0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x14, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x50, 0x61,
0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
0x2e, 0x6d, 0x6c, 0x5f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x65,
0x0a, 0x18, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74,
0x73, 0x53, 0x70, 0x65, 0x63, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x6c,
0x5f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
0x6d, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x47, 0x0a, 0x19, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50,
0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x53, 0x70, 0x65, 0x63, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5f,
0x0a, 0x19, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
0x3f, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63,
0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x41, 0x72, 0x67,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x41, 0x72, 0x67, 0x73,
0x22, 0x38, 0x0a, 0x10, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65,
0x4c, 0x69, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74,
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x72, 0x74,
0x69, 0x66, 0x61, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f,
0x77, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x67, 0x6f, 0x2f, 0x63, 0x61, 0x63, 0x68,
0x65, 0x6b, 0x65, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_cache_key_proto_rawDescOnce sync.Once
file_cache_key_proto_rawDescData = file_cache_key_proto_rawDesc
)
func file_cache_key_proto_rawDescGZIP() []byte {
file_cache_key_proto_rawDescOnce.Do(func() {
file_cache_key_proto_rawDescData = protoimpl.X.CompressGZIP(file_cache_key_proto_rawDescData)
})
return file_cache_key_proto_rawDescData
}
var file_cache_key_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_cache_key_proto_goTypes = []interface{}{
(*CacheKey)(nil), // 0: ml_pipelines.CacheKey
(*ContainerSpec)(nil), // 1: ml_pipelines.ContainerSpec
(*ArtifactNameList)(nil), // 2: ml_pipelines.ArtifactNameList
nil, // 3: ml_pipelines.CacheKey.InputArtifactNamesEntry
nil, // 4: ml_pipelines.CacheKey.InputParametersEntry
nil, // 5: ml_pipelines.CacheKey.OutputArtifactsSpecEntry
nil, // 6: ml_pipelines.CacheKey.OutputParametersSpecEntry
nil, // 7: ml_pipelines.CacheKey.InputParameterValuesEntry
(*pipelinespec.Value)(nil), // 8: ml_pipelines.Value
(*pipelinespec.RuntimeArtifact)(nil), // 9: ml_pipelines.RuntimeArtifact
(*structpb.Value)(nil), // 10: google.protobuf.Value
}
var file_cache_key_proto_depIdxs = []int32{
3, // 0: ml_pipelines.CacheKey.inputArtifactNames:type_name -> ml_pipelines.CacheKey.InputArtifactNamesEntry
4, // 1: ml_pipelines.CacheKey.inputParameters:type_name -> ml_pipelines.CacheKey.InputParametersEntry
5, // 2: ml_pipelines.CacheKey.outputArtifactsSpec:type_name -> ml_pipelines.CacheKey.OutputArtifactsSpecEntry
6, // 3: ml_pipelines.CacheKey.outputParametersSpec:type_name -> ml_pipelines.CacheKey.OutputParametersSpecEntry
1, // 4: ml_pipelines.CacheKey.containerSpec:type_name -> ml_pipelines.ContainerSpec
7, // 5: ml_pipelines.CacheKey.input_parameter_values:type_name -> ml_pipelines.CacheKey.InputParameterValuesEntry
2, // 6: ml_pipelines.CacheKey.InputArtifactNamesEntry.value:type_name -> ml_pipelines.ArtifactNameList
8, // 7: ml_pipelines.CacheKey.InputParametersEntry.value:type_name -> ml_pipelines.Value
9, // 8: ml_pipelines.CacheKey.OutputArtifactsSpecEntry.value:type_name -> ml_pipelines.RuntimeArtifact
10, // 9: ml_pipelines.CacheKey.InputParameterValuesEntry.value:type_name -> google.protobuf.Value
10, // [10:10] is the sub-list for method output_type
10, // [10:10] is the sub-list for method input_type
10, // [10:10] is the sub-list for extension type_name
10, // [10:10] is the sub-list for extension extendee
0, // [0:10] is the sub-list for field type_name
}
func init() { file_cache_key_proto_init() }
func file_cache_key_proto_init() {
if File_cache_key_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_cache_key_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CacheKey); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cache_key_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ContainerSpec); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_cache_key_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ArtifactNameList); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_cache_key_proto_rawDesc,
NumEnums: 0,
NumMessages: 8,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_cache_key_proto_goTypes,
DependencyIndexes: file_cache_key_proto_depIdxs,
MessageInfos: file_cache_key_proto_msgTypes,
}.Build()
File_cache_key_proto = out.File
file_cache_key_proto_rawDesc = nil
file_cache_key_proto_goTypes = nil
file_cache_key_proto_depIdxs = nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,74 +0,0 @@
# Copyright 2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
try:
from distutils.spawn import find_executable
except ImportError:
from shutil import which as find_executable
PROTO_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir))
PKG_DIR = os.path.realpath(
os.path.join(os.path.dirname(__file__), "kfp", "pipeline_spec"))
# Find the Protocol Compiler. (Taken from protobuf/python/setup.py)
if "PROTOC" in os.environ and os.path.exists(os.environ["PROTOC"]):
PROTOC = os.environ["PROTOC"]
else:
PROTOC = find_executable("protoc")
def generate_proto(source):
"""Generate a _pb2.py from a .proto file.
Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input.
Args:
source: The source proto file that needs to be compiled.
"""
output = source.replace(".proto", "_pb2.py")
if not os.path.exists(output) or (
os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output)):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if PROTOC is None:
sys.stderr.write("protoc is not found. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [
PROTOC,
"-I%s" % PROTO_DIR,
"--python_out=%s" % PKG_DIR, source
]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
if __name__ == '__main__':
# Generate the protobuf files that we depend on.
generate_proto(os.path.join(PROTO_DIR, "pipeline_spec.proto"))

View File

@ -1,13 +0,0 @@
# Copyright 2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,32 +0,0 @@
# Copyright 2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
NAME = 'kfp-pipeline-spec'
VERSION = '0.2.2'
setuptools.setup(
name=NAME,
version=VERSION,
description='Kubeflow Pipelines pipeline spec',
author='google',
author_email='kubeflow-pipelines@google.com',
url='https://github.com/kubeflow/pipelines',
packages=setuptools.find_namespace_packages(include=['kfp.*']),
python_requires='>=3.7.0',
install_requires=['protobuf>=3.13.0,<4'],
include_package_data=True,
license='Apache 2.0',
)

View File

@ -1,4 +1,7 @@
# Copyright 2021-2022 The Kubeflow Authors
### Updated base image to golang in order to build with go modules
### Bazel build cannot work with the Tekton library because the current
### KFP Bazel does not support go.mod "replace" on key dependencies.
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -13,7 +16,7 @@
# limitations under the License.
# 1. Build api server application
FROM golang:1.20.4-buster as builder
FROM golang:1.19.3-buster as builder
RUN apt-get update && apt-get install -y cmake clang musl-dev openssl
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
@ -23,22 +26,20 @@ RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/apiserver
RUN go-licenses csv ./backend/src/apiserver > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/apiserver.csv && \
go-licenses save ./backend/src/apiserver --save_path /tmp/NOTICES
diff /tmp/licenses.csv backend/third_party_licenses/apiserver.csv && \
go-licenses save ./backend/src/apiserver --save_path /tmp/NOTICES
# 2. Compile preloaded pipeline samples
FROM python:3.7 as compiler
FROM python:3.8 as compiler
RUN apt-get update -y && apt-get install --no-install-recommends -y -q default-jdk python3-setuptools python3-dev jq
RUN wget https://bootstrap.pypa.io/get-pip.py && python3 get-pip.py
COPY backend/requirements.txt .
COPY sdk/python/requirements.txt .
RUN python3 -m pip install -r requirements.txt --no-cache-dir
# Downloading Argo CLI so that the samples are validated
ENV ARGO_VERSION v3.3.10
RUN curl -sLO https://github.com/argoproj/argo-workflows/releases/download/${ARGO_VERSION}/argo-linux-amd64.gz && \
gunzip argo-linux-amd64.gz && \
chmod +x argo-linux-amd64 && \
mv ./argo-linux-amd64 /usr/local/bin/argo
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY sdk sdk
WORKDIR /go/src/github.com/kubeflow/pipelines/sdk/python
RUN python3 setup.py install
WORKDIR /
COPY ./samples /samples
@ -48,10 +49,15 @@ COPY backend/src/apiserver/config/sample_config.json /samples/
# The default image is replaced with the GCR-hosted python image.
RUN set -e; \
< /samples/sample_config.json jq .[].file --raw-output | while read pipeline_yaml; do \
pipeline_py="${pipeline_yaml%.yaml}"; \
python3 "$pipeline_py"; \
pipeline_py="${pipeline_yaml%.yaml}.py"; \
pipeline_py="${pipeline_yaml%.yaml}.py"; \
mode=`< /samples/sample_config.json jq ".[] | select(.file == \"${pipeline_yaml}\") | (if .mode == null then \"V1\" else .mode end)" --raw-output`; \
mv "$pipeline_py" "${pipeline_py}.tmp"; \
echo 'import kfp; kfp.components.default_base_image_or_builder="gcr.io/google-appengine/python:2020-03-31-141326"' | cat - "${pipeline_py}.tmp" > "$pipeline_py"; \
dsl-compile-tekton --py "$pipeline_py" --output "$pipeline_yaml" || python3 "$pipeline_py"; \
done
# 3. Start api web server
FROM debian:stable
@ -75,8 +81,9 @@ RUN chmod +x /bin/apiserver
RUN apt-get update && apt-get install -y ca-certificates wget
# Pin sample doc links to the commit that built the backend image
RUN sed -E "s#/(blob|tree)/master/#/\1/${COMMIT_SHA}/#g" -i /config/sample_config.json && \
sed -E "s/%252Fmaster/%252F${COMMIT_SHA}/#g" -i /config/sample_config.json
# Commented out due to no commit sha for non-release build
# RUN sed -E "s#/(blob|tree)/master/#/\1/${COMMIT_SHA}/#g" -i /config/sample_config.json && \
# sed -E "s/%252Fmaster/%252F${COMMIT_SHA}/#g" -i /config/sample_config.json
# Expose apiserver port
EXPOSE 8888

View File

@ -13,7 +13,7 @@
# limitations under the License.
# Dockerfile for building the source code of cache_server
FROM golang:1.20.4-alpine3.17 as builder
FROM golang:1.19.3-alpine3.15 as builder
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
@ -31,11 +31,7 @@ RUN go-licenses csv ./backend/src/cache > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/cache_server.csv && \
go-licenses save ./backend/src/cache --save_path /tmp/NOTICES
FROM alpine:3.17
RUN adduser -S appuser
USER appuser
FROM alpine:3.8
WORKDIR /bin
COPY --from=builder /bin/cache_server /bin/cache_server

View File

@ -1,43 +0,0 @@
# Copyright 2022 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Dockerfile for building the source code of conformance tests
FROM golang:1.20.4-alpine3.17 as builder
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
# Compile the test
RUN GO111MODULE=on go test -c -o /test/integration/api-test backend/test/integration/*.go
# Add test resources
ADD backend/test/resources /test/resources
# Add test script.
COPY backend/conformance/run.sh /test/integration
RUN chmod +x /test/integration/run.sh
# Create a tar ball for all the test assets, to be copied into the final image.
RUN tar -czvf /test.tar.gz /test
FROM alpine:3.8
COPY --from=builder /test.tar.gz /
RUN tar -xzvf /test.tar.gz
WORKDIR /test/integration
ENTRYPOINT [ "./run.sh" ]

View File

@ -1,42 +0,0 @@
# Copyright 2023 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -tags netgo -ldflags '-extldflags "-static"' -o /bin/driver ./backend/src/v2/cmd/driver/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/v2/cmd/driver
RUN go-licenses csv ./backend/src/v2/cmd/driver > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/driver.csv && \
go-licenses save ./backend/src/v2/cmd/driver --save_path /tmp/NOTICES
FROM alpine:3.17
RUN adduser -S appuser
USER appuser
WORKDIR /bin
COPY --from=builder /bin/driver /bin/driver
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENTRYPOINT [ "/bin/driver" ]

View File

@ -1,42 +0,0 @@
# Copyright 2023 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
RUN GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -tags netgo -ldflags '-extldflags "-static"' -o /bin/launcher-v2 ./backend/src/v2/cmd/launcher-v2/*.go
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/v2/cmd/launcher-v2
RUN go-licenses csv ./backend/src/v2/cmd/launcher-v2 > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/launcher.csv && \
go-licenses save ./backend/src/v2/cmd/launcher-v2 --save_path /tmp/NOTICES
FROM alpine:3.17
RUN adduser -S appuser
USER appuser
WORKDIR /bin
COPY --from=builder /bin/launcher-v2 /bin/launcher-v2
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
ENTRYPOINT [ "/bin/launcher-v2" ]

View File

@ -1,3 +1,5 @@
# Updated golang image to 1.13 since Tekton 0.13 onwards
# requires golang 1.13 in order to build certain packages.
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -12,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
FROM golang:1.19.3-alpine3.15 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
@ -30,13 +32,10 @@ RUN go-licenses csv ./backend/src/agent/persistence > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/persistence_agent.csv && \
go-licenses save ./backend/src/agent/persistence --save_path /tmp/NOTICES
FROM alpine:3.17
RUN adduser -S appuser
USER appuser
FROM alpine:3.11
WORKDIR /bin
COPY backend/src/apiserver/config/ /config
COPY --from=builder /bin/persistence_agent /bin/persistence_agent
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
@ -50,6 +49,10 @@ ENV TTL_SECONDS_AFTER_WORKFLOW_FINISH 86400
# NUM_WORKERS indicates now many worker goroutines
ENV NUM_WORKERS 2
ENV EXECUTIONTYPE Workflow
#CHILDREFERENCESKIND kind of runs to search for the childReferences
ENV CHILDREFERENCES_KINDS ""
CMD persistence_agent --logtostderr=true --namespace=${NAMESPACE} --ttlSecondsAfterWorkflowFinish=${TTL_SECONDS_AFTER_WORKFLOW_FINISH} --numWorker ${NUM_WORKERS} --executionType ${EXECUTIONTYPE}
#LEGACY_STATUS_UPDATE legacy status update method to pass update via apiserver
ENV LEGACY_STATUS_UPDATE "false"
CMD persistence_agent --logtostderr=true --namespace=${NAMESPACE} --ttlSecondsAfterWorkflowFinish=${TTL_SECONDS_AFTER_WORKFLOW_FINISH} --numWorker=${NUM_WORKERS} --childReferencesKinds=${CHILDREFERENCES_KINDS} --legacyStatusUpdate=${LEGACY_STATUS_UPDATE} --config=/config

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
FROM golang:1.19.3-alpine3.15 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
@ -30,21 +30,16 @@ RUN go-licenses csv ./backend/src/crd/controller/scheduledworkflow > /tmp/licens
diff /tmp/licenses.csv backend/third_party_licenses/swf.csv && \
go-licenses save ./backend/src/crd/controller/scheduledworkflow --save_path /tmp/NOTICES
FROM alpine:3.17
RUN apk --no-cache add tzdata
RUN adduser -S appuser
USER appuser
FROM alpine:3.11
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/controller
RUN apk --no-cache add tzdata
ENV NAMESPACE ""
ENV EXECUTIONTYPE Workflow
CMD /bin/controller --logtostderr=true --namespace=${NAMESPACE} --executionType ${EXECUTIONTYPE}
CMD /bin/controller --logtostderr=true --namespace=${NAMESPACE}

View File

@ -1,41 +0,0 @@
# Copyright 2023 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
# Needed musl-dev for github.com/mattn/go-sqlite3
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev patch
RUN CGO_ENABLED=0 GO111MODULE=on go build -o /bin/controller ./backend/src/v2/cmd/tekton-exithandler/controller/*.go
RUN go install github.com/google/go-licenses@d483853
RUN go-licenses check ./backend/src/v2/cmd/tekton-exithandler/controller/
RUN go-licenses csv ./backend/src/v2/cmd/tekton-exithandler/controller > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/tekton-exithandler-controller.csv && \
go-licenses save ./backend/src/v2/cmd/tekton-exithandler/controller --save_path /tmp/NOTICES
FROM alpine:3.17
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/controller
RUN apk --no-cache add tzdata
CMD /bin/controller

View File

@ -1,41 +0,0 @@
# Copyright 2023 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
# Needed musl-dev for github.com/mattn/go-sqlite3
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev patch
RUN CGO_ENABLED=0 GO111MODULE=on go build -o /bin/webhook ./backend/src/v2/cmd/tekton-exithandler/webhook/*.go
RUN go install github.com/google/go-licenses@d483853
RUN go-licenses check ./backend/src/v2/cmd/tekton-exithandler/webhook/
RUN go-licenses csv ./backend/src/v2/cmd/tekton-exithandler/webhook > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/tekton-exithandler-webhook.csv && \
go-licenses save ./backend/src/v2/cmd/tekton-exithandler/webhook --save_path /tmp/NOTICES
FROM alpine:3.17
WORKDIR /bin
COPY --from=builder /bin/webhook /bin/webhook
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/webhook
RUN apk --no-cache add tzdata
CMD /bin/webhook

View File

@ -1,41 +0,0 @@
# Copyright 2023 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
# Needed musl-dev for github.com/mattn/go-sqlite3
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev patch
RUN CGO_ENABLED=0 GO111MODULE=on go build -o /bin/controller ./backend/src/v2/cmd/tekton-kfptask/controller/*.go
RUN go install github.com/google/go-licenses@d483853
RUN go-licenses check ./backend/src/v2/cmd/tekton-kfptask/controller/
RUN go-licenses csv ./backend/src/v2/cmd/tekton-kfptask/controller > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/tekton-kfptask-controller.csv && \
go-licenses save ./backend/src/v2/cmd/tekton-kfptask/controller --save_path /tmp/NOTICES
FROM alpine:3.17
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/controller
RUN apk --no-cache add tzdata
CMD /bin/controller

View File

@ -1,41 +0,0 @@
# Copyright 2023 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
# Needed musl-dev for github.com/mattn/go-sqlite3
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev patch
RUN CGO_ENABLED=0 GO111MODULE=on go build -o /bin/webhook ./backend/src/v2/cmd/tekton-kfptask/webhook/*.go
RUN go install github.com/google/go-licenses@d483853
RUN go-licenses check ./backend/src/v2/cmd/tekton-kfptask/webhook/
RUN go-licenses csv ./backend/src/v2/cmd/tekton-kfptask/webhook > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/tekton-kfptask-webhook.csv && \
go-licenses save ./backend/src/v2/cmd/tekton-kfptask/webhook --save_path /tmp/NOTICES
FROM alpine:3.17
WORKDIR /bin
COPY --from=builder /bin/webhook /bin/webhook
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/webhook
RUN apk --no-cache add tzdata
CMD /bin/webhook

View File

@ -1,44 +0,0 @@
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
WORKDIR /go/src/github.com/kubeflow/pipelines
COPY . .
# Needed musl-dev for github.com/mattn/go-sqlite3
RUN apk update && apk upgrade && \
apk add --no-cache bash git openssh gcc musl-dev patch
RUN go mod vendor && patch -u vendor/k8s.io/klog/v2/klog.go backend/src/v2/controller/klog.patch
RUN CGO_ENABLED=0 GO111MODULE=on go build -mod=vendor -o /bin/controller backend/src/v2/cmd/controller/*.go && rm -rf vendor
# Check licenses and comply with license terms.
RUN ./hack/install-go-licenses.sh
# First, make sure there's no forbidden license.
RUN go-licenses check ./backend/src/v2/cmd/controller/
RUN go-licenses csv ./backend/src/v2/cmd/controller > /tmp/licenses.csv && \
diff /tmp/licenses.csv backend/third_party_licenses/tekton-driver.csv && \
go-licenses save ./backend/src/v2/cmd/controller --save_path /tmp/NOTICES
FROM alpine:3.17
WORKDIR /bin
COPY --from=builder /bin/controller /bin/controller
# Copy licenses and notices.
COPY --from=builder /tmp/licenses.csv /third_party/licenses.csv
COPY --from=builder /tmp/NOTICES /third_party/NOTICES
RUN chmod +x /bin/controller
RUN apk --no-cache add tzdata
CMD /bin/controller

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM golang:1.20.4-alpine3.17 as builder
FROM golang:1.17.6-alpine3.15 as builder
RUN apk update && apk upgrade
RUN apk add --no-cache git gcc musl-dev

View File

@ -1,40 +0,0 @@
# Copyright 2019-2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This docker file starts server.py (located at src/apiserver/visualization)
# which accepts a post request that resolves to html that depicts a specified
# visualization. More details about this process can be found in the server.py
# and exporter.py files in the directory specified above.
# This image should be in sync with image in backend/src/apiserver/visualization/update_requirements.sh.
FROM tensorflow/tensorflow:2.5.1
RUN apt-get update \
&& apt-get install -y wget curl tar openssl
RUN curl https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz > /tmp/google-cloud-sdk.tar.gz
RUN mkdir -p /usr/local/gcloud
RUN tar -C /usr/local/gcloud -xf /tmp/google-cloud-sdk.tar.gz
RUN /usr/local/gcloud/google-cloud-sdk/install.sh
ENV PATH $PATH:/usr/local/gcloud/google-cloud-sdk/bin
WORKDIR /src
COPY backend/src/apiserver/visualization/requirements.txt /src
RUN python3 -m pip install -r requirements.txt --no-cache-dir
COPY backend/src/apiserver/visualization /src
ENTRYPOINT [ "python3", "server.py" ]

View File

@ -58,6 +58,3 @@ image_swf:
.PHONY: image_viewer
image_viewer:
cd $(MOD_ROOT) && docker build -t viewercontroller -f backend/Dockerfile.viewercontroller .
.PHONY: image_visualization
image_visualization:
cd $(MOD_ROOT) && docker build -t visualization -f backend/Dockerfile.visualization .

View File

@ -1,8 +0,0 @@
approvers:
- chensun
- gkcalat
- Linchin
reviewers:
- chensun
- gkcalat
- Linchin

View File

@ -3,25 +3,34 @@ Pipelines backend.
## Building & Testing
To run all unittests for backend:
To run all unit tests for backend:
```
go test -v -cover ./backend/...
```
To run the API server unit tests:
```
go test -v -cover ./backend/src/apiserver/...
```
The API server itself can be built using:
```
go build -o /tmp/apiserver backend/src/apiserver/*.go
```
## Code Style
## Building APIServer Image using Remote Build Execution
Backend codebase follows the [Google's Go Style Guide](https://google.github.io/styleguide/go/). Please, take time to get familiar with the [best practices](https://google.github.io/styleguide/go/best-practices). It is not intended to be exhaustive, but it often helps minimizing guesswork among developers and keep codebase uniform and consistent.
If you are a dev in the Kubeflow Pipelines team, you can use
[Remote Build Execution Service](https://cloud.google.com/sdk/gcloud/reference/alpha/remote-build-execution/)
to build the API Server image using Bazel with use of a shared cache for
speeding up the build. To do so, execute the following command:
We use [golangci-lint](https://golangci-lint.run/) tool that can catch common mistakes locally (see detailed configuration [here](https://github.com/kubeflow/pipelines/blob/master/.golangci.yaml)). It can be [conveniently integrated](https://golangci-lint.run/usage/integrations/) with multiple popular IDEs such as VS Code or Vim.
Finally, it is advised to install [pre-commit](https://pre-commit.com/) in order to automate linter checks (see configuration [here](https://github.com/kubeflow/pipelines/blob/master/.pre-commit-config.yaml))
```
./build_api_server.sh -i gcr.io/cloud-ml-pipelines-test/api-server:dev
```
## Building APIServer image locally
@ -30,14 +39,6 @@ The API server image can be built from the root folder of the repo using:
export API_SERVER_IMAGE=api_server
docker build -f backend/Dockerfile . --tag $API_SERVER_IMAGE
```
## Deploy APIServer with the image you own build
Run
```
kubectl edit deployment.v1.apps/ml-pipeline -n kubeflow
```
You'll see the field reference the api server docker image.
Change it to point to your own build, after saving and closing the file, apiserver will restart with your change.
## Building client library and swagger files
@ -46,8 +47,11 @@ need to be regenerated and checked-in. Refer to [backend/api](./api/README.md) f
## Updating licenses info
1. [Install go-licenses tool](../hack/install-go-licenses.sh) and refer to [its documentation](https://github.com/google/go-licenses) for how to use it.
1. Install go-licenses tool and refer to [its documentation](https://github.com/google/go-licenses) for how to use it.
```bash
go install github.com/google/go-licenses@d483853
```
2. Run the tool to update all licenses:
@ -55,19 +59,10 @@ need to be regenerated and checked-in. Refer to [backend/api](./api/README.md) f
make all
```
# Visualization Server Instructions
## Updating python dependencies
[pip-tools](https://github.com/jazzband/pip-tools) is used to manage python
dependencies. To update dependencies, edit [requirements.in](requirements.in)
and run `./update_requirements.sh` to update and pin the transitive
dependencies.
## Building conformance tests (WIP)
Run
```
docker build . -f backend/Dockerfile.conformance -t <tag>
```
and pin the transitive dependencies.

View File

@ -14,10 +14,10 @@
# Generate client code (go & json) from API protocol buffers
FROM golang:1.15.10 as generator
ENV GRPC_GATEWAY_VERSION v1.9.6
ENV GO_SWAGGER_VERSION v0.18.0
ENV GOLANG_PROTOBUF_VERSION v1.5.1
ENV GRPC_VERSION v1.23.0
ENV GRPC_GATEWAY_VERSION v2.11.3
ENV GO_SWAGGER_VERSION v0.30.4
ENV GOLANG_PROTOBUF_VERSION v1.5.2
ENV GRPC_VERSION v1.48.0
ENV PROTOC_VERSION 3.17.3
ENV GOBIN=/go/bin
@ -38,9 +38,10 @@ RUN mkdir grpc && git clone --depth 1 --branch $GRPC_VERSION https://github.com/
# Install protoc-gen-rpc-gateway && protoc-gen-swagger.
RUN cd grpc-ecosystem/grpc-gateway && GO111MODULE=on go mod vendor
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
RUN go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-openapiv2
# Download go-swagger binary.
# swagger doesn't exist for openapiv2 yet
RUN curl -LO "https://github.com/go-swagger/go-swagger/releases/download/${GO_SWAGGER_VERSION}/swagger_linux_amd64"
RUN chmod +x swagger_linux_amd64 && mv swagger_linux_amd64 /usr/bin/swagger

View File

@ -17,28 +17,19 @@
IMAGE_TAG=kfp-api-generator
# Contact one of Bobgy, or zijianjoy if this remote image needs an update.
REMOTE_IMAGE=gcr.io/ml-pipeline-test/api-generator
# Image generated by https://github.com/kubeflow/pipelines/pull/7788.
# Keep in sync with the version used in test/release/Dockerfile.release
PREBUILT_REMOTE_IMAGE=gcr.io/ml-pipeline-test/api-generator@sha256:431635b564a8716e0814df4b8803594d64a517e02d72c6950e936e4b5cce60e3
PREBUILT_REMOTE_IMAGE=aipipeline/api-generator:openapiv2.11.3
# PREBUILT_REMOTE_IMAGE=aipipeline/api-generator:test
API_VERSION=v1
# Generate clients using a pre-built api-generator image.
.PHONY: generate
generate: fetch-dependencies hack/generator.sh $(API_VERSION)/*.proto
generate: hack/generator.sh $(API_VERSION)/*.proto
docker run --interactive --rm \
-e API_VERSION=$(API_VERSION) \
--user $$(id -u):$$(id -g) \
--mount type=bind,source="$$(pwd)/../..",target=/go/src/github.com/kubeflow/pipelines \
$(PREBUILT_REMOTE_IMAGE) /go/src/github.com/kubeflow/pipelines/backend/api/hack/generator.sh
# Fetch dependency proto
.PHONY: fetch-dependencies
fetch-dependencies: v2beta1/google/rpc/status.proto
# TODO(gkcalat): add this as a submodule?
v2beta1/google/rpc/status.proto:
mkdir -p v2beta1/google/rpc
wget -O v2beta1/google/rpc/status.proto https://raw.githubusercontent.com/googleapis/googleapis/047d3a8ac7f75383855df0166144f891d7af08d9/google/rpc/status.proto
# Generate clients starting by building api-generator image locally.
# Note, this should only be used for local development purposes. Once any change is made to the Dockerfile,
# we should push the new image remotely to ensure everyone is using the same tools.

View File

@ -1,87 +1,55 @@
# Kubeflow Pipelines backend API
# Kubeflow Pipelines API
## Before you begin
## Before You Start
Tools needed:
* [docker](https://docs.docker.com/get-docker/)
* [make](https://www.gnu.org/software/make/)
* [java](https://www.java.com/en/download/)
* [python3](https://www.python.org/downloads/)
* Docker
* Make
Set the environment variable `API_VERSION` to the version that you want to generate. We use `v1beta1` as example here.
```bash
export API_VERSION="v1beta1"
```
## Compiling `.proto` files to Go client and swagger definitions
## Auto-generation of Go client and swagger definitions
Use `make generate` command to generate clients using a pre-built api-generator image:
```bash
make generate
```
Go client library will be placed into:
```bash
make generate
```
Code will be generated in:
* `./${API_VERSION}/go_client`
* `./${API_VERSION}/go_http_client`
* `./${API_VERSION}/swagger`
> **Note**
> `./${API_VERSION}/swagger/pipeline.upload.swagger.json` is manually created, while the rest of `./${API_VERSION}/swagger/*.swagger.json` are compiled from `./${API_VERSION}/*.proto` files.
## Auto-generation of Python client
## Compiling Python client
To generate the Python client, run the following bash script (requires `java` and `python3`).
This will generate the Python client for the API version specified in the environment variable.
```bash
./build_kfp_server_api_python_package.sh
```
Python client will be placed into `./${API_VERSION}/python_http_client`.
Code will be generated in `./${API_VERSION}/python_http_client`.
## Updating of API reference documentation
## Auto-generation of API reference documentation
> **Note**
> Whenever the API definition changes (i.e., the file `kfp_api_single_file.swagger.json` changes), the API reference documentation needs to be updated.
This directory contains API definitions. They are used to generate [the API reference on kubeflow.org](https://www.kubeflow.org/docs/pipelines/reference/api/kubeflow-pipeline-api-spec/).
API definitions in this folder are used to generate [`v1beta1`](https://www.kubeflow.org/docs/components/pipelines/v1/reference/api/kubeflow-pipeline-api-spec/) and [`v2beta1`](https://www.kubeflow.org/docs/components/pipelines/v2/reference/api/kubeflow-pipeline-api-spec/) API reference documentation on kubeflow.org. Follow the steps below to update the documentation:
* Use the tools [bootprint-openapi](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi) and [html-inline](https://github.com/substack/html-inline) to generate the API reference from [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json). These [instructions](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi#bootprint-openapi) have shown how to generate *a single self-contained html file* which is the API reference, from a json file.
1. Install [bootprint-openapi](https://github.com/bootprint/bootprint-monorepo/tree/master/packages/bootprint-openapi) and [html-inline](https://www.npmjs.com/package/html-inline) packages using `npm`:
```bash
npm install -g bootprint
npm install -g bootprint-openapi
npm -g install html-inline
```
* Use the above generated html to replace the html section, which is below the title section, in the file [kubeflow-pipeline-api-spec.html](https://github.com/kubeflow/website/blob/master/content/en/docs/pipelines/reference/api/kubeflow-pipeline-api-spec.html)
2. Generate *self-contained html file(s)* with API reference documentation from `./${API_VERSION}/swagger/kfp_api_single_file.swagger.json`:
Note: whenever the API definition changes (i.e., the file [kfp_api_single_file.swagger.json](https://github.com/kubeflow/pipelines/blob/master/backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json) changes), the API reference needs to be updated.
Fov `v1beta1`:
```bash
bootprint openapi ./v1beta1/swagger/kfp_api_single_file.swagger.json ./temp/v1
html-inline ./temp/v1/index.html > ./temp/v1/kubeflow-pipeline-api-spec.html
```
## Auto-generation of api generator image
For `v2beta1`:
```bash
make push
```
```bash
bootprint openapi ./v2beta1/swagger/kfp_api_single_file.swagger.json ./temp/v2
html-inline ./temp/v2/index.html > ./temp/v2/kubeflow-pipeline-api-spec.html
```
When you update the [Dockerfile](`./Dockerfile`), to make sure others are using the same image as you do:
3. Use the above generated html file(s) to replace the relevant section(s) on kubeflow.org. When copying th content, make sure to **preserve the original headers**.
- `v1beta1`: file [kubeflow-pipeline-api-spec.html](https://github.com/kubeflow/website/blob/master/content/en/docs/components/pipelines/v1/reference/api/kubeflow-pipeline-api-spec.html).
- `v2beta1`: file [kubeflow-pipeline-api-spec.html](https://github.com/kubeflow/website/blob/master/content/en/docs/components/pipelines/v2/reference/api/kubeflow-pipeline-api-spec.html).
4. Create a PR with the changes in [kubeflow.org website repository](https://github.com/kubeflow/website). See an example [here](https://github.com/kubeflow/website/pull/3444).
## Updating API generator image
API generator image is defined in [Dockerfile](`./Dockerfile`). If you need to update the container, follow these steps:
1. Update the [Dockerfile](`./Dockerfile`) and build the image by running `docker build -t gcr.io/ml-pipeline-test/api-generator:latest .`
1. Push the new container by running `docker push gcr.io/ml-pipeline-test/api-generator:latest` (requires to be [authenticated](https://cloud.google.com/container-registry/docs/advanced-authentication)).
1. Update the `PREBUILT_REMOTE_IMAGE` variable in the [Makefile](./Makefile) to point to your new image.
1. Similarly, push a new version of the release tools image to `gcr.io/ml-pipeline-test/release:latest` and run `make push` in [test/release/Makefile](../../test/release/Makefile).
1. push a new version of the api generator image to gcr.io/ml-pipeline-test/api-generator:latest.
2. update the PREBUILT_REMOTE_IMAGE var in Makefile to point to your new image.
3. push a new version of the release tools image to gcr.io/ml-pipeline-test/release:latest, run `make push` in [test/release/Makefile](../../test/release/Makefile).

View File

@ -34,6 +34,8 @@ if [ -z "$VERSION" ]; then
echo "ERROR: $REPO_ROOT/VERSION is empty"
exit 1
fi
API_VERSION=v1
codegen_file=/tmp/openapi-generator-cli.jar
# Browse all versions in: https://repo1.maven.org/maven2/org/openapitools/openapi-generator-cli/
@ -53,9 +55,9 @@ rm -rf "$DIR"
echo "Generating python code from swagger json in $DIR."
java -jar "$codegen_file" generate -g python -t "$CURRENT_DIR/$API_VERSION/python_http_client_template" -i "$swagger_file" -o "$DIR" -c <(echo '{
"packageName": "'"kfp_server_api"'",
"packageName": "'"kfp_tekton_server_api"'",
"packageVersion": "'"$VERSION"'",
"packageUrl": "https://github.com/kubeflow/pipelines"
"packageUrl": "https://github.com/kubeflow/kfp-tekton"
}')
echo "Copying LICENSE to $DIR"

View File

@ -1,6 +1,6 @@
#!/bin/bash
# Copyright 2018 The Kubeflow Authors
# Copyright 2018-2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -26,28 +26,25 @@ KFP_VERSION=$(cat VERSION)
# Delete currently generated code.
rm -r -f backend/api/${API_VERSION}/go_http_client/*
rm -r -f backend/api/${API_VERSION}/go_client/*
rm -f -f backend/api/${API_VERSION}/go_client/*
# Cannot delete backend/api/${API_VERSION}/swagger/*, because there are manually maintained definition files too.
# Create directories if they don't exist
mkdir -p backend/api/${API_VERSION}/go_http_client
mkdir -p backend/api/${API_VERSION}/go_client
mkdir -p backend/api/${API_VERSION}/swagger
# Generate *.pb.go (grpc api client) from *.proto.
${PROTOCCOMPILER} -I. -Ibackend/api/${API_VERSION} \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/ \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/ \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-openapiv2/options/ \
-I/usr/include/ \
--plugin=protoc-gen-go=/go/bin/protoc-gen-go \
--go_out=plugins=grpc:${TMP_OUTPUT} \
backend/api/${API_VERSION}/*.proto
# Generate *.pb.gw.go (grpc api rest client) from *.proto.
${PROTOCCOMPILER} -I. -Ibackend/api/${API_VERSION} \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/ \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/ \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-openapiv2/options/ \
-I/usr/include/ \
--plugin=protoc-gen-grpc-gateway=/go/bin/protoc-gen-grpc-gateway \
--grpc-gateway_out=logtostderr=true:${TMP_OUTPUT} \
@ -56,50 +53,32 @@ ${PROTOCCOMPILER} -I. -Ibackend/api/${API_VERSION} \
cp ${TMP_OUTPUT}/github.com/kubeflow/pipelines/backend/api/${API_VERSION}/go_client/* ./backend/api/${API_VERSION}/go_client
# Generate *.swagger.json from *.proto into swagger folder.
${PROTOCCOMPILER} -I. -Ibackend/api/${API_VERSION} \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/ \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options/ \
-I/go/src/github.com/grpc-ecosystem/grpc-gateway/protoc-gen-openapiv2/options/ \
-I//usr/include/ \
--plugin=protoc-gen-swagger=/go/bin/protoc-gen-swagger \
--swagger_out=logtostderr=true:${TMP_OUTPUT} \
--plugin=protoc-gen-openapiv2=/go/bin/protoc-gen-openapiv2 \
--openapiv2_out=logtostderr=true,json_names_for_fields=false:${TMP_OUTPUT} \
backend/api/${API_VERSION}/*.proto
# Move *.swagger.json files into swagger folder.
cp -a ${TMP_OUTPUT}/backend/api/${API_VERSION}/*.swagger.json ./backend/api/${API_VERSION}/swagger
# Generate a single swagger json file from the swagger json files of all models.
# Note: use backend/backend/api/${API_VERSION}/swagger/{run,job,experiment,pipeline,pipeline.upload,healthz}.swagger.json when apt-get can install jq-1.6
if [[ "$API_VERSION" == "v1beta1" ]]; then
jq -s 'reduce .[] as $item ({}; . * $item) | .info.title = "Kubeflow Pipelines API" | .info.description = "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition." | .info.version = "'$KFP_VERSION'" | .info.contact = { "name": "google", "email": "kubeflow-pipelines@google.com", "url": "https://www.google.com" } | .info.license = { "name": "Apache 2.0", "url": "https://raw.githubusercontent.com/kubeflow/pipelines/master/LICENSE" }' \
backend/api/${API_VERSION}/swagger/experiment.swagger.json \
backend/api/${API_VERSION}/swagger/run.swagger.json \
backend/api/${API_VERSION}/swagger/job.swagger.json \
backend/api/${API_VERSION}/swagger/pipeline.swagger.json \
backend/api/${API_VERSION}/swagger/pipeline.upload.swagger.json \
backend/api/${API_VERSION}/swagger/healthz.swagger.json \
> "backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json"
else
# TODO(gkcalat): needs to be updated when new protos are added.
jq -s 'reduce .[] as $item ({}; . * $item) | .info.title = "Kubeflow Pipelines API" | .info.description = "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition." | .info.version = "'$KFP_VERSION'" | .info.contact = { "name": "google", "email": "kubeflow-pipelines@google.com", "url": "https://www.google.com" } | .info.license = { "name": "Apache 2.0", "url": "https://raw.githubusercontent.com/kubeflow/pipelines/master/LICENSE" }' \
backend/api/${API_VERSION}/swagger/*.swagger.json \
> "backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json"
fi
jq -s 'reduce .[] as $item ({}; . * $item) | .info.title = "Kubeflow Pipelines API" | .info.description = "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition." | .info.version = "'$KFP_VERSION'" | .info.contact = { "name": "google", "email": "kubeflow-pipelines@google.com", "url": "https://www.google.com" } | .info.license = { "name": "Apache 2.0", "url": "https://raw.githubusercontent.com/kubeflow/pipelines/master/LICENSE" }' \
backend/api/${API_VERSION}/swagger/run.swagger.json \
backend/api/${API_VERSION}/swagger/job.swagger.json \
backend/api/${API_VERSION}/swagger/experiment.swagger.json \
backend/api/${API_VERSION}/swagger/pipeline.swagger.json \
backend/api/${API_VERSION}/swagger/pipeline.upload.swagger.json \
backend/api/${API_VERSION}/swagger/healthz.swagger.json \
> "backend/api/${API_VERSION}/swagger/kfp_api_single_file.swagger.json"
# Generate go_http_client from swagger json.
if [[ "$API_VERSION" == "v1beta1" ]]; then
swagger generate client \
-f backend/api/${API_VERSION}/swagger/job.swagger.json \
-A job \
--principal models.Principal \
-c job_client \
-m job_model \
-t backend/api/${API_VERSION}/go_http_client
else
swagger generate client \
-f backend/api/${API_VERSION}/swagger/recurring_run.swagger.json \
-A recurring_run \
--principal models.Principal \
-c recurring_run_client \
-m recurring_run_model \
-t backend/api/${API_VERSION}/go_http_client
fi
swagger generate client \
-f backend/api/${API_VERSION}/swagger/job.swagger.json \
-A job \
--principal models.Principal \
-c job_client \
-m job_model \
-t backend/api/${API_VERSION}/go_http_client
swagger generate client \
-f backend/api/${API_VERSION}/swagger/run.swagger.json \
-A run \
@ -142,19 +121,11 @@ swagger generate client \
-c healthz_client \
-m healthz_model \
-t backend/api/${API_VERSION}/go_http_client
# Hack to fix an issue with go-swagger
# See https://github.com/go-swagger/go-swagger/issues/1381 for details.
if [[ "$API_VERSION" == "v1beta1" ]]; then
sed -i -- 's/MaxConcurrency int64 `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/api_job.go
sed -i -- 's/IntervalSecond int64 `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/api_periodic_schedule.go
sed -i -- 's/MaxConcurrency string `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/api_job.go
sed -i -- 's/IntervalSecond string `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/api_periodic_schedule.go
else
sed -i -- 's/MaxConcurrency int64 `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/recurring_run_model/${API_VERSION}_recurring_run.go
sed -i -- 's/IntervalSecond int64 `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/recurring_run_model/${API_VERSION}_periodic_schedule.go
sed -i -- 's/MaxConcurrency string `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/recurring_run_model/${API_VERSION}_recurring_run.go
sed -i -- 's/IntervalSecond string `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/recurring_run_model/${API_VERSION}_periodic_schedule.go
fi
sed -i -- 's/MaxConcurrency int64 `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/${API_VERSION}_job.go
sed -i -- 's/IntervalSecond int64 `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/${API_VERSION}_periodic_schedule.go
sed -i -- 's/MaxConcurrency string `json:"max_concurrency,omitempty"`/MaxConcurrency int64 `json:"max_concurrency,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/${API_VERSION}_job.go
sed -i -- 's/IntervalSecond string `json:"interval_second,omitempty"`/IntervalSecond int64 `json:"interval_second,omitempty,string"`/g' backend/api/${API_VERSION}/go_http_client/job_model/${API_VERSION}_periodic_schedule.go
# Execute the //go:generate directives in the generated code.
cd backend/api && go generate ./...

85
backend/api/v1/auth.proto Normal file
View File

@ -0,0 +1,85 @@
// Copyright 2020 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
option go_package = "github.com/kubeflow/pipelines/backend/api/v1/go_client";
package v1;
import "google/api/annotations.proto";
import "google/protobuf/empty.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
responses: {
key: "default";
value: {
schema: {
json_schema: {
ref: ".v1.Status";
}
}
}
}
// Use bearer token for authorizing access to job service.
// Kubernetes client library(https://kubernetes.io/docs/reference/using-api/client-libraries/)
// uses bearer token as default for authorization. The section below
// ensures security definition object is generated in the swagger definition.
// For more details see https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
security_definitions: {
security: {
key: "Bearer";
value: {
type: TYPE_API_KEY;
in: IN_HEADER;
name: "authorization";
}
}
}
security: {
security_requirement: {
key: "Bearer";
value: {};
}
}
};
service AuthService {
rpc Authorize(AuthorizeRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
get: "/apis/v1/auth"
};
}
}
// Ask for authorization of an access by providing resource's namespace, type
// and verb. User identity is not part of the message, because it is expected
// to be parsed from request headers. Caller should proxy user request's headers.
message AuthorizeRequest {
// Type of resources in pipelines system.
enum Resources {
UNASSIGNED_RESOURCES = 0;
VIEWERS = 1;
}
// Type of verbs that act on the resources.
enum Verb {
UNASSIGNED_VERB = 0;
CREATE = 1;
GET = 2;
DELETE = 3;
}
string namespace = 1; // Namespace the resource belongs to.
Resources resources = 2; // Resource type asking for authorization.
Verb verb = 3; // Verb on the resource asking for authorization.
}

View File

@ -14,14 +14,14 @@
syntax = "proto3";
option go_package = "github.com/kubeflow/pipelines/backend/api/v1beta1/go_client";
package api;
option go_package = "github.com/kubeflow/pipelines/backend/api/v1/go_client";
package v1;
import "google/protobuf/any.proto";
message Error {
string error_message = 1;
string error_details = 2;
string error_message = 1 [json_name = "error_message"];
string error_details = 2 [json_name = "error_details"];
}
message Status {

View File

@ -0,0 +1,195 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
option go_package = "github.com/kubeflow/pipelines/backend/api/v1/go_client";
package v1;
import "backend/api/v1/error.proto";
import "backend/api/v1/resource_reference.proto";
import "google/api/annotations.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
import "protoc-gen-openapiv2/options/annotations.proto";
option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
responses: {
key: "default";
value: {
schema: {
json_schema: {
ref: ".v1.Status";
}
}
}
}
// Use bearer token for authorizing access to job service.
// Kubernetes client library(https://kubernetes.io/docs/reference/using-api/client-libraries/)
// uses bearer token as default for authorization. The section below
// ensures security definition object is generated in the swagger definition.
// For more details see https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
security_definitions: {
security: {
key: "Bearer";
value: {
type: TYPE_API_KEY;
in: IN_HEADER;
name: "authorization";
}
}
}
security: {
security_requirement: {
key: "Bearer";
value: {};
}
}
};
service ExperimentService {
// Creates a new experiment.
rpc CreateExperiment(CreateExperimentRequest) returns (Experiment) {
option (google.api.http) = {
post: "/apis/v1/experiments"
body: "experiment"
};
}
// Finds a specific experiment by ID.
rpc GetExperiment(GetExperimentRequest) returns (Experiment) {
option (google.api.http) = {
get: "/apis/v1/experiments/{id}"
};
}
// Finds all experiments. Supports pagination, and sorting on certain fields.
rpc ListExperiment(ListExperimentsRequest) returns (ListExperimentsResponse) {
option (google.api.http) = {
get: "/apis/v1/experiments"
};
}
// Deletes an experiment without deleting the experiment's runs and jobs. To
// avoid unexpected behaviors, delete an experiment's runs and jobs before
// deleting the experiment.
rpc DeleteExperiment(DeleteExperimentRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
delete: "/apis/v1/experiments/{id}"
};
}
// Archives an experiment and the experiment's runs and jobs.
rpc ArchiveExperiment(ArchiveExperimentRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/apis/v1/experiments/{id}:archive"
};
}
// Restores an archived experiment. The experiment's archived runs and jobs
// will stay archived.
rpc UnarchiveExperiment(UnarchiveExperimentRequest) returns (google.protobuf.Empty) {
option (google.api.http) = {
post: "/apis/v1/experiments/{id}:unarchive"
};
}
}
message CreateExperimentRequest {
// The experiment to be created.
Experiment experiment = 1;
}
message GetExperimentRequest {
// The ID of the experiment to be retrieved.
string id = 1;
}
message ListExperimentsRequest {
// A page token to request the next page of results. The token is acquried
// from the nextPageToken field of the response from the previous
// ListExperiment call or can be omitted when fetching the first page.
string page_token = 1 [json_name = "page_token"];
// The number of experiments to be listed per page. If there are more
// experiments than this number, the response message will contain a
// nextPageToken field you can use to fetch the next page.
int32 page_size = 2 [json_name = "page_size"];
// Can be format of "field_name", "field_name asc" or "field_name desc"
// Ascending by default.
string sort_by = 3 [json_name = "sort_by"];
// A url-encoded, JSON-serialized Filter protocol buffer (see
// [filter.proto](https://github.com/kubeflow/pipelines/blob/master/backend/api/v1/filter.proto)).
string filter = 4;
// What resource reference to filter on.
// For Experiment, the only valid resource type is Namespace. An sample query string could be
// resource_reference_key.type=NAMESPACE&resource_reference_key.id=ns1
ResourceKey resource_reference_key = 5 [json_name = "resource_reference_key"];
}
message ListExperimentsResponse {
// A list of experiments returned.
repeated Experiment experiments = 1;
// The total number of experiments for the given query.
int32 total_size = 3 [json_name = "total_size"];
// The token to list the next page of experiments.
string next_page_token = 2 [json_name = "next_page_token"];
}
message DeleteExperimentRequest {
// The ID of the experiment to be deleted.
string id = 1;
}
message Experiment {
// Output. Unique experiment ID. Generated by API server.
string id = 1;
// Required input field. Unique experiment name provided by user.
string name = 2;
// Optional input field. Describing the purpose of the experiment
string description = 3;
// Output. The time that the experiment created.
google.protobuf.Timestamp created_at = 4 [json_name = "created_at"];
// Optional input field. Specify which resource this run belongs to.
// For Experiment, the only valid resource reference is a single Namespace.
repeated ResourceReference resource_references = 5 [json_name = "resource_references"];
enum StorageState {
STORAGESTATE_UNSPECIFIED = 0;
STORAGESTATE_AVAILABLE = 1;
STORAGESTATE_ARCHIVED = 2;
}
// Output. Specifies whether this experiment is in archived or available state.
StorageState storage_state = 6 [json_name = "storage_state"];
}
message ArchiveExperimentRequest {
// The ID of the experiment to be archived.
string id = 1;
}
message UnarchiveExperimentRequest {
// The ID of the experiment to be restored.
string id = 1;
}

129
backend/api/v1/filter.proto Normal file
View File

@ -0,0 +1,129 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
option go_package = "github.com/kubeflow/pipelines/backend/api/v1/go_client";
package v1;
import "google/api/annotations.proto";
import "google/protobuf/timestamp.proto";
// Predicate captures individual conditions that must be true for a resource
// being filtered.
message Predicate {
// Op is the operation to apply.
enum Op {
UNKNOWN = 0;
// Operators on scalar values. Only applies to one of |int_value|,
// |long_value|, |string_value| or |timestamp_value|.
EQUALS = 1;
NOT_EQUALS = 2;
GREATER_THAN = 3;
GREATER_THAN_EQUALS = 5;
LESS_THAN = 6;
LESS_THAN_EQUALS = 7;
// Checks if the value is a member of a given array, which should be one of
// |int_values|, |long_values| or |string_values|.
IN = 8;
// Checks if the value contains |string_value| as a substring match. Only
// applies to |string_value|.
IS_SUBSTRING = 9;
}
Op op = 1;
string key = 2;
oneof value {
int32 int_value = 3 [json_name = "int_value"];
int64 long_value = 4 [json_name = "long_value"];
string string_value = 5 [json_name = "string_value"];
// Timestamp values will be converted to Unix time (seconds since the epoch)
// prior to being used in a filtering operation.
google.protobuf.Timestamp timestamp_value = 6 [json_name = "timestamp_value"];
// Array values below are only meant to be used by the IN operator.
IntValues int_values = 7 [json_name = "int_values"];
LongValues long_values = 8 [json_name = "long_values"];
StringValues string_values = 9 [json_name = "string_values"];
}
}
message IntValues {
repeated int32 values = 1;
}
message StringValues {
repeated string values = 2;
}
message LongValues {
repeated int64 values = 3;
}
// Filter is used to filter resources returned from a ListXXX request.
//
// Example filters:
// 1) Filter runs with status = 'Running'
// filter {
// predicate {
// key: "status"
// op: EQUALS
// string_value: "Running"
// }
// }
//
// 2) Filter runs that succeeded since Dec 1, 2018
// filter {
// predicate {
// key: "status"
// op: EQUALS
// string_value: "Succeeded"
// }
// predicate {
// key: "created_at"
// op: GREATER_THAN
// timestamp_value {
// seconds: 1543651200
// }
// }
// }
//
// 3) Filter runs with one of labels 'label_1' or 'label_2'
//
// filter {
// predicate {
// key: "label"
// op: IN
// string_values {
// value: 'label_1'
// value: 'label_2'
// }
// }
// }
message Filter {
// All predicates are AND-ed when this filter is applied.
repeated Predicate predicates = 1;
}
// This dummy service is required so that grpc-gateway will generate Swagger
// definitions for the Filter message. Otherwise, it does not get generated
// since Filter itself is not used in any of the RPC calls - only a serialized
// encoded version of it is used.
service DummyFilterService {
rpc GetFilter(Filter) returns (Filter) {}
}

View File

@ -0,0 +1,406 @@
// Copyright 2020 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/auth.proto
package go_client
import (
context "context"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Type of resources in pipelines system.
type AuthorizeRequest_Resources int32
const (
AuthorizeRequest_UNASSIGNED_RESOURCES AuthorizeRequest_Resources = 0
AuthorizeRequest_VIEWERS AuthorizeRequest_Resources = 1
)
// Enum value maps for AuthorizeRequest_Resources.
var (
AuthorizeRequest_Resources_name = map[int32]string{
0: "UNASSIGNED_RESOURCES",
1: "VIEWERS",
}
AuthorizeRequest_Resources_value = map[string]int32{
"UNASSIGNED_RESOURCES": 0,
"VIEWERS": 1,
}
)
func (x AuthorizeRequest_Resources) Enum() *AuthorizeRequest_Resources {
p := new(AuthorizeRequest_Resources)
*p = x
return p
}
func (x AuthorizeRequest_Resources) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AuthorizeRequest_Resources) Descriptor() protoreflect.EnumDescriptor {
return file_backend_api_v1_auth_proto_enumTypes[0].Descriptor()
}
func (AuthorizeRequest_Resources) Type() protoreflect.EnumType {
return &file_backend_api_v1_auth_proto_enumTypes[0]
}
func (x AuthorizeRequest_Resources) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AuthorizeRequest_Resources.Descriptor instead.
func (AuthorizeRequest_Resources) EnumDescriptor() ([]byte, []int) {
return file_backend_api_v1_auth_proto_rawDescGZIP(), []int{0, 0}
}
// Type of verbs that act on the resources.
type AuthorizeRequest_Verb int32
const (
AuthorizeRequest_UNASSIGNED_VERB AuthorizeRequest_Verb = 0
AuthorizeRequest_CREATE AuthorizeRequest_Verb = 1
AuthorizeRequest_GET AuthorizeRequest_Verb = 2
AuthorizeRequest_DELETE AuthorizeRequest_Verb = 3
)
// Enum value maps for AuthorizeRequest_Verb.
var (
AuthorizeRequest_Verb_name = map[int32]string{
0: "UNASSIGNED_VERB",
1: "CREATE",
2: "GET",
3: "DELETE",
}
AuthorizeRequest_Verb_value = map[string]int32{
"UNASSIGNED_VERB": 0,
"CREATE": 1,
"GET": 2,
"DELETE": 3,
}
)
func (x AuthorizeRequest_Verb) Enum() *AuthorizeRequest_Verb {
p := new(AuthorizeRequest_Verb)
*p = x
return p
}
func (x AuthorizeRequest_Verb) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (AuthorizeRequest_Verb) Descriptor() protoreflect.EnumDescriptor {
return file_backend_api_v1_auth_proto_enumTypes[1].Descriptor()
}
func (AuthorizeRequest_Verb) Type() protoreflect.EnumType {
return &file_backend_api_v1_auth_proto_enumTypes[1]
}
func (x AuthorizeRequest_Verb) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use AuthorizeRequest_Verb.Descriptor instead.
func (AuthorizeRequest_Verb) EnumDescriptor() ([]byte, []int) {
return file_backend_api_v1_auth_proto_rawDescGZIP(), []int{0, 1}
}
// Ask for authorization of an access by providing resource's namespace, type
// and verb. User identity is not part of the message, because it is expected
// to be parsed from request headers. Caller should proxy user request's headers.
type AuthorizeRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` // Namespace the resource belongs to.
Resources AuthorizeRequest_Resources `protobuf:"varint,2,opt,name=resources,proto3,enum=v1.AuthorizeRequest_Resources" json:"resources,omitempty"` // Resource type asking for authorization.
Verb AuthorizeRequest_Verb `protobuf:"varint,3,opt,name=verb,proto3,enum=v1.AuthorizeRequest_Verb" json:"verb,omitempty"` // Verb on the resource asking for authorization.
}
func (x *AuthorizeRequest) Reset() {
*x = AuthorizeRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_auth_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AuthorizeRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AuthorizeRequest) ProtoMessage() {}
func (x *AuthorizeRequest) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_auth_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AuthorizeRequest.ProtoReflect.Descriptor instead.
func (*AuthorizeRequest) Descriptor() ([]byte, []int) {
return file_backend_api_v1_auth_proto_rawDescGZIP(), []int{0}
}
func (x *AuthorizeRequest) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *AuthorizeRequest) GetResources() AuthorizeRequest_Resources {
if x != nil {
return x.Resources
}
return AuthorizeRequest_UNASSIGNED_RESOURCES
}
func (x *AuthorizeRequest) GetVerb() AuthorizeRequest_Verb {
if x != nil {
return x.Verb
}
return AuthorizeRequest_UNASSIGNED_VERB
}
var File_backend_api_v1_auth_proto protoreflect.FileDescriptor
var file_backend_api_v1_auth_proto_rawDesc = []byte{
0x0a, 0x19, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x1a,
0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32,
0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x02, 0x0a, 0x10, 0x41,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x3c, 0x0a,
0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
0x32, 0x1e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x04, 0x76,
0x65, 0x72, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x41,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
0x56, 0x65, 0x72, 0x62, 0x52, 0x04, 0x76, 0x65, 0x72, 0x62, 0x22, 0x32, 0x0a, 0x09, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x4e, 0x41, 0x53, 0x53,
0x49, 0x47, 0x4e, 0x45, 0x44, 0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x53, 0x10,
0x00, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x49, 0x45, 0x57, 0x45, 0x52, 0x53, 0x10, 0x01, 0x22, 0x3c,
0x0a, 0x04, 0x56, 0x65, 0x72, 0x62, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x53, 0x53, 0x49,
0x47, 0x4e, 0x45, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x42, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x43,
0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x02,
0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x03, 0x32, 0x5f, 0x0a, 0x0b,
0x41, 0x75, 0x74, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x50, 0x0a, 0x09, 0x41,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x12, 0x14, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75,
0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x15, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0f, 0x12, 0x0d,
0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x42, 0x87, 0x01,
0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62,
0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f,
0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x67,
0x6f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x92, 0x41, 0x4c, 0x52, 0x1b, 0x0a, 0x07, 0x64,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x10, 0x12, 0x0e, 0x0a, 0x0c, 0x1a, 0x0a, 0x2e, 0x76,
0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5a, 0x1f, 0x0a, 0x1d, 0x0a, 0x06, 0x42, 0x65,
0x61, 0x72, 0x65, 0x72, 0x12, 0x13, 0x08, 0x02, 0x1a, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x02, 0x62, 0x0c, 0x0a, 0x0a, 0x0a, 0x06, 0x42,
0x65, 0x61, 0x72, 0x65, 0x72, 0x12, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_auth_proto_rawDescOnce sync.Once
file_backend_api_v1_auth_proto_rawDescData = file_backend_api_v1_auth_proto_rawDesc
)
func file_backend_api_v1_auth_proto_rawDescGZIP() []byte {
file_backend_api_v1_auth_proto_rawDescOnce.Do(func() {
file_backend_api_v1_auth_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_auth_proto_rawDescData)
})
return file_backend_api_v1_auth_proto_rawDescData
}
var file_backend_api_v1_auth_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_backend_api_v1_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_backend_api_v1_auth_proto_goTypes = []interface{}{
(AuthorizeRequest_Resources)(0), // 0: v1.AuthorizeRequest.Resources
(AuthorizeRequest_Verb)(0), // 1: v1.AuthorizeRequest.Verb
(*AuthorizeRequest)(nil), // 2: v1.AuthorizeRequest
(*emptypb.Empty)(nil), // 3: google.protobuf.Empty
}
var file_backend_api_v1_auth_proto_depIdxs = []int32{
0, // 0: v1.AuthorizeRequest.resources:type_name -> v1.AuthorizeRequest.Resources
1, // 1: v1.AuthorizeRequest.verb:type_name -> v1.AuthorizeRequest.Verb
2, // 2: v1.AuthService.Authorize:input_type -> v1.AuthorizeRequest
3, // 3: v1.AuthService.Authorize:output_type -> google.protobuf.Empty
3, // [3:4] is the sub-list for method output_type
2, // [2:3] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_backend_api_v1_auth_proto_init() }
func file_backend_api_v1_auth_proto_init() {
if File_backend_api_v1_auth_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_auth_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AuthorizeRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_auth_proto_rawDesc,
NumEnums: 2,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_backend_api_v1_auth_proto_goTypes,
DependencyIndexes: file_backend_api_v1_auth_proto_depIdxs,
EnumInfos: file_backend_api_v1_auth_proto_enumTypes,
MessageInfos: file_backend_api_v1_auth_proto_msgTypes,
}.Build()
File_backend_api_v1_auth_proto = out.File
file_backend_api_v1_auth_proto_rawDesc = nil
file_backend_api_v1_auth_proto_goTypes = nil
file_backend_api_v1_auth_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// AuthServiceClient is the client API for AuthService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type AuthServiceClient interface {
Authorize(ctx context.Context, in *AuthorizeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type authServiceClient struct {
cc grpc.ClientConnInterface
}
func NewAuthServiceClient(cc grpc.ClientConnInterface) AuthServiceClient {
return &authServiceClient{cc}
}
func (c *authServiceClient) Authorize(ctx context.Context, in *AuthorizeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/v1.AuthService/Authorize", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// AuthServiceServer is the server API for AuthService service.
type AuthServiceServer interface {
Authorize(context.Context, *AuthorizeRequest) (*emptypb.Empty, error)
}
// UnimplementedAuthServiceServer can be embedded to have forward compatible implementations.
type UnimplementedAuthServiceServer struct {
}
func (*UnimplementedAuthServiceServer) Authorize(context.Context, *AuthorizeRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method Authorize not implemented")
}
func RegisterAuthServiceServer(s *grpc.Server, srv AuthServiceServer) {
s.RegisterService(&_AuthService_serviceDesc, srv)
}
func _AuthService_Authorize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AuthorizeRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(AuthServiceServer).Authorize(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1.AuthService/Authorize",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(AuthServiceServer).Authorize(ctx, req.(*AuthorizeRequest))
}
return interceptor(ctx, in, info, handler)
}
var _AuthService_serviceDesc = grpc.ServiceDesc{
ServiceName: "v1.AuthService",
HandlerType: (*AuthServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Authorize",
Handler: _AuthService_Authorize_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/v1/auth.proto",
}

View File

@ -0,0 +1,173 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/v1/auth.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
var (
filter_AuthService_Authorize_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_AuthService_Authorize_0(ctx context.Context, marshaler runtime.Marshaler, client AuthServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq AuthorizeRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AuthService_Authorize_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Authorize(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_AuthService_Authorize_0(ctx context.Context, marshaler runtime.Marshaler, server AuthServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq AuthorizeRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_AuthService_Authorize_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.Authorize(ctx, &protoReq)
return msg, metadata, err
}
// RegisterAuthServiceHandlerServer registers the http handlers for service AuthService to "mux".
// UnaryRPC :call AuthServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthServiceHandlerFromEndpoint instead.
func RegisterAuthServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AuthServiceServer) error {
mux.Handle("GET", pattern_AuthService_Authorize_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.AuthService/Authorize", runtime.WithHTTPPathPattern("/apis/v1/auth"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_AuthService_Authorize_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_AuthService_Authorize_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterAuthServiceHandlerFromEndpoint is same as RegisterAuthServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterAuthServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterAuthServiceHandler(ctx, mux, conn)
}
// RegisterAuthServiceHandler registers the http handlers for service AuthService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterAuthServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterAuthServiceHandlerClient(ctx, mux, NewAuthServiceClient(conn))
}
// RegisterAuthServiceHandlerClient registers the http handlers for service AuthService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AuthServiceClient" to call the correct interceptors.
func RegisterAuthServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AuthServiceClient) error {
mux.Handle("GET", pattern_AuthService_Authorize_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.AuthService/Authorize", runtime.WithHTTPPathPattern("/apis/v1/auth"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_AuthService_Authorize_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_AuthService_Authorize_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_AuthService_Authorize_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "auth"}, ""))
)
var (
forward_AuthService_Authorize_0 = runtime.ForwardResponseMessage
)

View File

@ -0,0 +1,257 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/error.proto
package go_client
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Error struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ErrorMessage string `protobuf:"bytes,1,opt,name=error_message,proto3" json:"error_message,omitempty"`
ErrorDetails string `protobuf:"bytes,2,opt,name=error_details,proto3" json:"error_details,omitempty"`
}
func (x *Error) Reset() {
*x = Error{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_error_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Error) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Error) ProtoMessage() {}
func (x *Error) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_error_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Error.ProtoReflect.Descriptor instead.
func (*Error) Descriptor() ([]byte, []int) {
return file_backend_api_v1_error_proto_rawDescGZIP(), []int{0}
}
func (x *Error) GetErrorMessage() string {
if x != nil {
return x.ErrorMessage
}
return ""
}
func (x *Error) GetErrorDetails() string {
if x != nil {
return x.ErrorDetails
}
return ""
}
type Status struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
}
func (x *Status) Reset() {
*x = Status{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_error_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Status) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Status) ProtoMessage() {}
func (x *Status) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_error_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Status.ProtoReflect.Descriptor instead.
func (*Status) Descriptor() ([]byte, []int) {
return file_backend_api_v1_error_proto_rawDescGZIP(), []int{1}
}
func (x *Status) GetError() string {
if x != nil {
return x.Error
}
return ""
}
func (x *Status) GetCode() int32 {
if x != nil {
return x.Code
}
return 0
}
func (x *Status) GetDetails() []*anypb.Any {
if x != nil {
return x.Details
}
return nil
}
var File_backend_api_v1_error_proto protoreflect.FileDescriptor
var file_backend_api_v1_error_proto_rawDesc = []byte{
0x0a, 0x1a, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31,
0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x53, 0x0a, 0x05, 0x45,
0x72, 0x72, 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
0x22, 0x62, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04,
0x63, 0x6f, 0x64, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18,
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74,
0x61, 0x69, 0x6c, 0x73, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x69, 0x70, 0x65,
0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x6f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_error_proto_rawDescOnce sync.Once
file_backend_api_v1_error_proto_rawDescData = file_backend_api_v1_error_proto_rawDesc
)
func file_backend_api_v1_error_proto_rawDescGZIP() []byte {
file_backend_api_v1_error_proto_rawDescOnce.Do(func() {
file_backend_api_v1_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_error_proto_rawDescData)
})
return file_backend_api_v1_error_proto_rawDescData
}
var file_backend_api_v1_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_backend_api_v1_error_proto_goTypes = []interface{}{
(*Error)(nil), // 0: v1.Error
(*Status)(nil), // 1: v1.Status
(*anypb.Any)(nil), // 2: google.protobuf.Any
}
var file_backend_api_v1_error_proto_depIdxs = []int32{
2, // 0: v1.Status.details:type_name -> google.protobuf.Any
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_backend_api_v1_error_proto_init() }
func file_backend_api_v1_error_proto_init() {
if File_backend_api_v1_error_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Error); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Status); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_error_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_backend_api_v1_error_proto_goTypes,
DependencyIndexes: file_backend_api_v1_error_proto_depIdxs,
MessageInfos: file_backend_api_v1_error_proto_msgTypes,
}.Build()
File_backend_api_v1_error_proto = out.File
file_backend_api_v1_error_proto_rawDesc = nil
file_backend_api_v1_error_proto_goTypes = nil
file_backend_api_v1_error_proto_depIdxs = nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,670 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/v1/experiment.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
func request_ExperimentService_CreateExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateExperimentRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Experiment); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ExperimentService_CreateExperiment_0(ctx context.Context, marshaler runtime.Marshaler, server ExperimentServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateExperimentRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Experiment); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.CreateExperiment(ctx, &protoReq)
return msg, metadata, err
}
func request_ExperimentService_GetExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ExperimentService_GetExperiment_0(ctx context.Context, marshaler runtime.Marshaler, server ExperimentServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.GetExperiment(ctx, &protoReq)
return msg, metadata, err
}
var (
filter_ExperimentService_ListExperiment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_ExperimentService_ListExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListExperimentsRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ExperimentService_ListExperiment_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ExperimentService_ListExperiment_0(ctx context.Context, marshaler runtime.Marshaler, server ExperimentServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListExperimentsRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ExperimentService_ListExperiment_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListExperiment(ctx, &protoReq)
return msg, metadata, err
}
func request_ExperimentService_DeleteExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ExperimentService_DeleteExperiment_0(ctx context.Context, marshaler runtime.Marshaler, server ExperimentServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.DeleteExperiment(ctx, &protoReq)
return msg, metadata, err
}
func request_ExperimentService_ArchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ArchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.ArchiveExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ExperimentService_ArchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, server ExperimentServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ArchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.ArchiveExperiment(ctx, &protoReq)
return msg, metadata, err
}
func request_ExperimentService_UnarchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, client ExperimentServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq UnarchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.UnarchiveExperiment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ExperimentService_UnarchiveExperiment_0(ctx context.Context, marshaler runtime.Marshaler, server ExperimentServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq UnarchiveExperimentRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.UnarchiveExperiment(ctx, &protoReq)
return msg, metadata, err
}
// RegisterExperimentServiceHandlerServer registers the http handlers for service ExperimentService to "mux".
// UnaryRPC :call ExperimentServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterExperimentServiceHandlerFromEndpoint instead.
func RegisterExperimentServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ExperimentServiceServer) error {
mux.Handle("POST", pattern_ExperimentService_CreateExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ExperimentService/CreateExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ExperimentService_CreateExperiment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_CreateExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_GetExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ExperimentService/GetExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ExperimentService_GetExperiment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_GetExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_ListExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ExperimentService/ListExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ExperimentService_ListExperiment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ListExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_ExperimentService_DeleteExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ExperimentService/DeleteExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ExperimentService_DeleteExperiment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_DeleteExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_ArchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ExperimentService/ArchiveExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}:archive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ExperimentService_ArchiveExperiment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ArchiveExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_UnarchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ExperimentService/UnarchiveExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}:unarchive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ExperimentService_UnarchiveExperiment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_UnarchiveExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterExperimentServiceHandlerFromEndpoint is same as RegisterExperimentServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterExperimentServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterExperimentServiceHandler(ctx, mux, conn)
}
// RegisterExperimentServiceHandler registers the http handlers for service ExperimentService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterExperimentServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterExperimentServiceHandlerClient(ctx, mux, NewExperimentServiceClient(conn))
}
// RegisterExperimentServiceHandlerClient registers the http handlers for service ExperimentService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ExperimentServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ExperimentServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ExperimentServiceClient" to call the correct interceptors.
func RegisterExperimentServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ExperimentServiceClient) error {
mux.Handle("POST", pattern_ExperimentService_CreateExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ExperimentService/CreateExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_CreateExperiment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_CreateExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_GetExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ExperimentService/GetExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_GetExperiment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_GetExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_ExperimentService_ListExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ExperimentService/ListExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_ListExperiment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ListExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_ExperimentService_DeleteExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ExperimentService/DeleteExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_DeleteExperiment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_DeleteExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_ArchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ExperimentService/ArchiveExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}:archive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_ArchiveExperiment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_ArchiveExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ExperimentService_UnarchiveExperiment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ExperimentService/UnarchiveExperiment", runtime.WithHTTPPathPattern("/apis/v1/experiments/{id}:unarchive"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ExperimentService_UnarchiveExperiment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ExperimentService_UnarchiveExperiment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_ExperimentService_CreateExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "experiments"}, ""))
pattern_ExperimentService_GetExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1", "experiments", "id"}, ""))
pattern_ExperimentService_ListExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "experiments"}, ""))
pattern_ExperimentService_DeleteExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1", "experiments", "id"}, ""))
pattern_ExperimentService_ArchiveExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1", "experiments", "id"}, "archive"))
pattern_ExperimentService_UnarchiveExperiment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1", "experiments", "id"}, "unarchive"))
)
var (
forward_ExperimentService_CreateExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_GetExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_ListExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_DeleteExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_ArchiveExperiment_0 = runtime.ForwardResponseMessage
forward_ExperimentService_UnarchiveExperiment_0 = runtime.ForwardResponseMessage
)

View File

@ -0,0 +1,796 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/filter.proto
package go_client
import (
context "context"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Op is the operation to apply.
type Predicate_Op int32
const (
Predicate_UNKNOWN Predicate_Op = 0
// Operators on scalar values. Only applies to one of |int_value|,
// |long_value|, |string_value| or |timestamp_value|.
Predicate_EQUALS Predicate_Op = 1
Predicate_NOT_EQUALS Predicate_Op = 2
Predicate_GREATER_THAN Predicate_Op = 3
Predicate_GREATER_THAN_EQUALS Predicate_Op = 5
Predicate_LESS_THAN Predicate_Op = 6
Predicate_LESS_THAN_EQUALS Predicate_Op = 7
// Checks if the value is a member of a given array, which should be one of
// |int_values|, |long_values| or |string_values|.
Predicate_IN Predicate_Op = 8
// Checks if the value contains |string_value| as a substring match. Only
// applies to |string_value|.
Predicate_IS_SUBSTRING Predicate_Op = 9
)
// Enum value maps for Predicate_Op.
var (
Predicate_Op_name = map[int32]string{
0: "UNKNOWN",
1: "EQUALS",
2: "NOT_EQUALS",
3: "GREATER_THAN",
5: "GREATER_THAN_EQUALS",
6: "LESS_THAN",
7: "LESS_THAN_EQUALS",
8: "IN",
9: "IS_SUBSTRING",
}
Predicate_Op_value = map[string]int32{
"UNKNOWN": 0,
"EQUALS": 1,
"NOT_EQUALS": 2,
"GREATER_THAN": 3,
"GREATER_THAN_EQUALS": 5,
"LESS_THAN": 6,
"LESS_THAN_EQUALS": 7,
"IN": 8,
"IS_SUBSTRING": 9,
}
)
func (x Predicate_Op) Enum() *Predicate_Op {
p := new(Predicate_Op)
*p = x
return p
}
func (x Predicate_Op) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Predicate_Op) Descriptor() protoreflect.EnumDescriptor {
return file_backend_api_v1_filter_proto_enumTypes[0].Descriptor()
}
func (Predicate_Op) Type() protoreflect.EnumType {
return &file_backend_api_v1_filter_proto_enumTypes[0]
}
func (x Predicate_Op) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Predicate_Op.Descriptor instead.
func (Predicate_Op) EnumDescriptor() ([]byte, []int) {
return file_backend_api_v1_filter_proto_rawDescGZIP(), []int{0, 0}
}
// Predicate captures individual conditions that must be true for a resource
// being filtered.
type Predicate struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Op Predicate_Op `protobuf:"varint,1,opt,name=op,proto3,enum=v1.Predicate_Op" json:"op,omitempty"`
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
// Types that are assignable to Value:
// *Predicate_IntValue
// *Predicate_LongValue
// *Predicate_StringValue
// *Predicate_TimestampValue
// *Predicate_IntValues
// *Predicate_LongValues
// *Predicate_StringValues
Value isPredicate_Value `protobuf_oneof:"value"`
}
func (x *Predicate) Reset() {
*x = Predicate{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_filter_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Predicate) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Predicate) ProtoMessage() {}
func (x *Predicate) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_filter_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Predicate.ProtoReflect.Descriptor instead.
func (*Predicate) Descriptor() ([]byte, []int) {
return file_backend_api_v1_filter_proto_rawDescGZIP(), []int{0}
}
func (x *Predicate) GetOp() Predicate_Op {
if x != nil {
return x.Op
}
return Predicate_UNKNOWN
}
func (x *Predicate) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (m *Predicate) GetValue() isPredicate_Value {
if m != nil {
return m.Value
}
return nil
}
func (x *Predicate) GetIntValue() int32 {
if x, ok := x.GetValue().(*Predicate_IntValue); ok {
return x.IntValue
}
return 0
}
func (x *Predicate) GetLongValue() int64 {
if x, ok := x.GetValue().(*Predicate_LongValue); ok {
return x.LongValue
}
return 0
}
func (x *Predicate) GetStringValue() string {
if x, ok := x.GetValue().(*Predicate_StringValue); ok {
return x.StringValue
}
return ""
}
func (x *Predicate) GetTimestampValue() *timestamppb.Timestamp {
if x, ok := x.GetValue().(*Predicate_TimestampValue); ok {
return x.TimestampValue
}
return nil
}
func (x *Predicate) GetIntValues() *IntValues {
if x, ok := x.GetValue().(*Predicate_IntValues); ok {
return x.IntValues
}
return nil
}
func (x *Predicate) GetLongValues() *LongValues {
if x, ok := x.GetValue().(*Predicate_LongValues); ok {
return x.LongValues
}
return nil
}
func (x *Predicate) GetStringValues() *StringValues {
if x, ok := x.GetValue().(*Predicate_StringValues); ok {
return x.StringValues
}
return nil
}
type isPredicate_Value interface {
isPredicate_Value()
}
type Predicate_IntValue struct {
IntValue int32 `protobuf:"varint,3,opt,name=int_value,proto3,oneof"`
}
type Predicate_LongValue struct {
LongValue int64 `protobuf:"varint,4,opt,name=long_value,proto3,oneof"`
}
type Predicate_StringValue struct {
StringValue string `protobuf:"bytes,5,opt,name=string_value,proto3,oneof"`
}
type Predicate_TimestampValue struct {
// Timestamp values will be converted to Unix time (seconds since the epoch)
// prior to being used in a filtering operation.
TimestampValue *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=timestamp_value,proto3,oneof"`
}
type Predicate_IntValues struct {
// Array values below are only meant to be used by the IN operator.
IntValues *IntValues `protobuf:"bytes,7,opt,name=int_values,proto3,oneof"`
}
type Predicate_LongValues struct {
LongValues *LongValues `protobuf:"bytes,8,opt,name=long_values,proto3,oneof"`
}
type Predicate_StringValues struct {
StringValues *StringValues `protobuf:"bytes,9,opt,name=string_values,proto3,oneof"`
}
func (*Predicate_IntValue) isPredicate_Value() {}
func (*Predicate_LongValue) isPredicate_Value() {}
func (*Predicate_StringValue) isPredicate_Value() {}
func (*Predicate_TimestampValue) isPredicate_Value() {}
func (*Predicate_IntValues) isPredicate_Value() {}
func (*Predicate_LongValues) isPredicate_Value() {}
func (*Predicate_StringValues) isPredicate_Value() {}
type IntValues struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Values []int32 `protobuf:"varint,1,rep,packed,name=values,proto3" json:"values,omitempty"`
}
func (x *IntValues) Reset() {
*x = IntValues{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_filter_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IntValues) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntValues) ProtoMessage() {}
func (x *IntValues) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_filter_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntValues.ProtoReflect.Descriptor instead.
func (*IntValues) Descriptor() ([]byte, []int) {
return file_backend_api_v1_filter_proto_rawDescGZIP(), []int{1}
}
func (x *IntValues) GetValues() []int32 {
if x != nil {
return x.Values
}
return nil
}
type StringValues struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"`
}
func (x *StringValues) Reset() {
*x = StringValues{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_filter_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StringValues) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringValues) ProtoMessage() {}
func (x *StringValues) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_filter_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringValues.ProtoReflect.Descriptor instead.
func (*StringValues) Descriptor() ([]byte, []int) {
return file_backend_api_v1_filter_proto_rawDescGZIP(), []int{2}
}
func (x *StringValues) GetValues() []string {
if x != nil {
return x.Values
}
return nil
}
type LongValues struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Values []int64 `protobuf:"varint,3,rep,packed,name=values,proto3" json:"values,omitempty"`
}
func (x *LongValues) Reset() {
*x = LongValues{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_filter_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *LongValues) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*LongValues) ProtoMessage() {}
func (x *LongValues) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_filter_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use LongValues.ProtoReflect.Descriptor instead.
func (*LongValues) Descriptor() ([]byte, []int) {
return file_backend_api_v1_filter_proto_rawDescGZIP(), []int{3}
}
func (x *LongValues) GetValues() []int64 {
if x != nil {
return x.Values
}
return nil
}
// Filter is used to filter resources returned from a ListXXX request.
//
// Example filters:
// 1) Filter runs with status = 'Running'
// filter {
// predicate {
// key: "status"
// op: EQUALS
// string_value: "Running"
// }
// }
//
// 2) Filter runs that succeeded since Dec 1, 2018
// filter {
// predicate {
// key: "status"
// op: EQUALS
// string_value: "Succeeded"
// }
// predicate {
// key: "created_at"
// op: GREATER_THAN
// timestamp_value {
// seconds: 1543651200
// }
// }
// }
//
// 3) Filter runs with one of labels 'label_1' or 'label_2'
//
// filter {
// predicate {
// key: "label"
// op: IN
// string_values {
// value: 'label_1'
// value: 'label_2'
// }
// }
// }
type Filter struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// All predicates are AND-ed when this filter is applied.
Predicates []*Predicate `protobuf:"bytes,1,rep,name=predicates,proto3" json:"predicates,omitempty"`
}
func (x *Filter) Reset() {
*x = Filter{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_filter_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Filter) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Filter) ProtoMessage() {}
func (x *Filter) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_filter_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Filter.ProtoReflect.Descriptor instead.
func (*Filter) Descriptor() ([]byte, []int) {
return file_backend_api_v1_filter_proto_rawDescGZIP(), []int{4}
}
func (x *Filter) GetPredicates() []*Predicate {
if x != nil {
return x.Predicates
}
return nil
}
var File_backend_api_v1_filter_proto protoreflect.FileDescriptor
var file_backend_api_v1_filter_proto_rawDesc = []byte{
0x0a, 0x1b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76,
0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0xb1, 0x04, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x20,
0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x76, 0x31, 0x2e,
0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x4f, 0x70, 0x52, 0x02, 0x6f, 0x70,
0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
0x65, 0x79, 0x12, 0x1e, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x03, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x12, 0x20, 0x0a, 0x0a, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x74,
0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0f, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48,
0x00, 0x52, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x12, 0x2f, 0x0a, 0x0a, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73,
0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f,
0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67,
0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x0d, 0x73, 0x74, 0x72, 0x69, 0x6e,
0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10,
0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73,
0x48, 0x00, 0x52, 0x0d, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x73, 0x22, 0x97, 0x01, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x53, 0x10,
0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x4f, 0x54, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x53, 0x10,
0x02, 0x12, 0x10, 0x0a, 0x0c, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54, 0x48, 0x41,
0x4e, 0x10, 0x03, 0x12, 0x17, 0x0a, 0x13, 0x47, 0x52, 0x45, 0x41, 0x54, 0x45, 0x52, 0x5f, 0x54,
0x48, 0x41, 0x4e, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x53, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09,
0x4c, 0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x4c,
0x45, 0x53, 0x53, 0x5f, 0x54, 0x48, 0x41, 0x4e, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x53, 0x10,
0x07, 0x12, 0x06, 0x0a, 0x02, 0x49, 0x4e, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x53, 0x5f,
0x53, 0x55, 0x42, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x42, 0x07, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x09, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28,
0x05, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x26, 0x0a, 0x0c, 0x53, 0x74, 0x72,
0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x73, 0x22, 0x24, 0x0a, 0x0a, 0x4c, 0x6f, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12,
0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52,
0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65,
0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18,
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69,
0x63, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
0x32, 0x3b, 0x0a, 0x12, 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x46, 0x69, 0x6c,
0x74, 0x65, 0x72, 0x12, 0x0a, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x1a,
0x0a, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x00, 0x42, 0x38, 0x5a,
0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65,
0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f, 0x62,
0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x6f,
0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_filter_proto_rawDescOnce sync.Once
file_backend_api_v1_filter_proto_rawDescData = file_backend_api_v1_filter_proto_rawDesc
)
func file_backend_api_v1_filter_proto_rawDescGZIP() []byte {
file_backend_api_v1_filter_proto_rawDescOnce.Do(func() {
file_backend_api_v1_filter_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_filter_proto_rawDescData)
})
return file_backend_api_v1_filter_proto_rawDescData
}
var file_backend_api_v1_filter_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_backend_api_v1_filter_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
var file_backend_api_v1_filter_proto_goTypes = []interface{}{
(Predicate_Op)(0), // 0: v1.Predicate.Op
(*Predicate)(nil), // 1: v1.Predicate
(*IntValues)(nil), // 2: v1.IntValues
(*StringValues)(nil), // 3: v1.StringValues
(*LongValues)(nil), // 4: v1.LongValues
(*Filter)(nil), // 5: v1.Filter
(*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp
}
var file_backend_api_v1_filter_proto_depIdxs = []int32{
0, // 0: v1.Predicate.op:type_name -> v1.Predicate.Op
6, // 1: v1.Predicate.timestamp_value:type_name -> google.protobuf.Timestamp
2, // 2: v1.Predicate.int_values:type_name -> v1.IntValues
4, // 3: v1.Predicate.long_values:type_name -> v1.LongValues
3, // 4: v1.Predicate.string_values:type_name -> v1.StringValues
1, // 5: v1.Filter.predicates:type_name -> v1.Predicate
5, // 6: v1.DummyFilterService.GetFilter:input_type -> v1.Filter
5, // 7: v1.DummyFilterService.GetFilter:output_type -> v1.Filter
7, // [7:8] is the sub-list for method output_type
6, // [6:7] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_backend_api_v1_filter_proto_init() }
func file_backend_api_v1_filter_proto_init() {
if File_backend_api_v1_filter_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_filter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Predicate); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_filter_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IntValues); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_filter_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StringValues); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_filter_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*LongValues); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_filter_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Filter); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_backend_api_v1_filter_proto_msgTypes[0].OneofWrappers = []interface{}{
(*Predicate_IntValue)(nil),
(*Predicate_LongValue)(nil),
(*Predicate_StringValue)(nil),
(*Predicate_TimestampValue)(nil),
(*Predicate_IntValues)(nil),
(*Predicate_LongValues)(nil),
(*Predicate_StringValues)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_filter_proto_rawDesc,
NumEnums: 1,
NumMessages: 5,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_backend_api_v1_filter_proto_goTypes,
DependencyIndexes: file_backend_api_v1_filter_proto_depIdxs,
EnumInfos: file_backend_api_v1_filter_proto_enumTypes,
MessageInfos: file_backend_api_v1_filter_proto_msgTypes,
}.Build()
File_backend_api_v1_filter_proto = out.File
file_backend_api_v1_filter_proto_rawDesc = nil
file_backend_api_v1_filter_proto_goTypes = nil
file_backend_api_v1_filter_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// DummyFilterServiceClient is the client API for DummyFilterService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DummyFilterServiceClient interface {
GetFilter(ctx context.Context, in *Filter, opts ...grpc.CallOption) (*Filter, error)
}
type dummyFilterServiceClient struct {
cc grpc.ClientConnInterface
}
func NewDummyFilterServiceClient(cc grpc.ClientConnInterface) DummyFilterServiceClient {
return &dummyFilterServiceClient{cc}
}
func (c *dummyFilterServiceClient) GetFilter(ctx context.Context, in *Filter, opts ...grpc.CallOption) (*Filter, error) {
out := new(Filter)
err := c.cc.Invoke(ctx, "/v1.DummyFilterService/GetFilter", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DummyFilterServiceServer is the server API for DummyFilterService service.
type DummyFilterServiceServer interface {
GetFilter(context.Context, *Filter) (*Filter, error)
}
// UnimplementedDummyFilterServiceServer can be embedded to have forward compatible implementations.
type UnimplementedDummyFilterServiceServer struct {
}
func (*UnimplementedDummyFilterServiceServer) GetFilter(context.Context, *Filter) (*Filter, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetFilter not implemented")
}
func RegisterDummyFilterServiceServer(s *grpc.Server, srv DummyFilterServiceServer) {
s.RegisterService(&_DummyFilterService_serviceDesc, srv)
}
func _DummyFilterService_GetFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Filter)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DummyFilterServiceServer).GetFilter(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1.DummyFilterService/GetFilter",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DummyFilterServiceServer).GetFilter(ctx, req.(*Filter))
}
return interceptor(ctx, in, info, handler)
}
var _DummyFilterService_serviceDesc = grpc.ServiceDesc{
ServiceName: "v1.DummyFilterService",
HandlerType: (*DummyFilterServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetFilter",
Handler: _DummyFilterService_GetFilter_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/v1/filter.proto",
}

View File

@ -0,0 +1,274 @@
// Copyright 2020 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/healthz.proto
package go_client
import (
context "context"
_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type GetHealthzResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Returns if KFP in multi-user mode
MultiUser bool `protobuf:"varint,3,opt,name=multi_user,proto3" json:"multi_user,omitempty"`
}
func (x *GetHealthzResponse) Reset() {
*x = GetHealthzResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_healthz_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GetHealthzResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetHealthzResponse) ProtoMessage() {}
func (x *GetHealthzResponse) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_healthz_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetHealthzResponse.ProtoReflect.Descriptor instead.
func (*GetHealthzResponse) Descriptor() ([]byte, []int) {
return file_backend_api_v1_healthz_proto_rawDescGZIP(), []int{0}
}
func (x *GetHealthzResponse) GetMultiUser() bool {
if x != nil {
return x.MultiUser
}
return false
}
var File_backend_api_v1_healthz_proto protoreflect.FileDescriptor
var file_backend_api_v1_healthz_proto_rawDesc = []byte{
0x0a, 0x1c, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x7a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02,
0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x62,
0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x34, 0x0a, 0x12, 0x47, 0x65, 0x74,
0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x7a, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
0x1e, 0x0a, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20,
0x01, 0x28, 0x08, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x32,
0x68, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x7a, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x12, 0x56, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x7a, 0x12,
0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74,
0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x7a, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x18, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x12, 0x12, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76,
0x31, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x7a, 0x42, 0x87, 0x01, 0x5a, 0x36, 0x67, 0x69,
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f,
0x77, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f, 0x62, 0x61, 0x63, 0x6b,
0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x6f, 0x5f, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x92, 0x41, 0x4c, 0x52, 0x1b, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
0x6c, 0x74, 0x12, 0x10, 0x12, 0x0e, 0x0a, 0x0c, 0x1a, 0x0a, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74,
0x61, 0x74, 0x75, 0x73, 0x5a, 0x1f, 0x0a, 0x1d, 0x0a, 0x06, 0x42, 0x65, 0x61, 0x72, 0x65, 0x72,
0x12, 0x13, 0x08, 0x02, 0x1a, 0x0d, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x20, 0x02, 0x62, 0x0c, 0x0a, 0x0a, 0x0a, 0x06, 0x42, 0x65, 0x61, 0x72, 0x65,
0x72, 0x12, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_healthz_proto_rawDescOnce sync.Once
file_backend_api_v1_healthz_proto_rawDescData = file_backend_api_v1_healthz_proto_rawDesc
)
func file_backend_api_v1_healthz_proto_rawDescGZIP() []byte {
file_backend_api_v1_healthz_proto_rawDescOnce.Do(func() {
file_backend_api_v1_healthz_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_healthz_proto_rawDescData)
})
return file_backend_api_v1_healthz_proto_rawDescData
}
var file_backend_api_v1_healthz_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_backend_api_v1_healthz_proto_goTypes = []interface{}{
(*GetHealthzResponse)(nil), // 0: v1.GetHealthzResponse
(*emptypb.Empty)(nil), // 1: google.protobuf.Empty
}
var file_backend_api_v1_healthz_proto_depIdxs = []int32{
1, // 0: v1.HealthzService.GetHealthz:input_type -> google.protobuf.Empty
0, // 1: v1.HealthzService.GetHealthz:output_type -> v1.GetHealthzResponse
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_backend_api_v1_healthz_proto_init() }
func file_backend_api_v1_healthz_proto_init() {
if File_backend_api_v1_healthz_proto != nil {
return
}
file_backend_api_v1_error_proto_init()
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_healthz_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GetHealthzResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_healthz_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_backend_api_v1_healthz_proto_goTypes,
DependencyIndexes: file_backend_api_v1_healthz_proto_depIdxs,
MessageInfos: file_backend_api_v1_healthz_proto_msgTypes,
}.Build()
File_backend_api_v1_healthz_proto = out.File
file_backend_api_v1_healthz_proto_rawDesc = nil
file_backend_api_v1_healthz_proto_goTypes = nil
file_backend_api_v1_healthz_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// HealthzServiceClient is the client API for HealthzService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type HealthzServiceClient interface {
// Get healthz data.
GetHealthz(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetHealthzResponse, error)
}
type healthzServiceClient struct {
cc grpc.ClientConnInterface
}
func NewHealthzServiceClient(cc grpc.ClientConnInterface) HealthzServiceClient {
return &healthzServiceClient{cc}
}
func (c *healthzServiceClient) GetHealthz(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetHealthzResponse, error) {
out := new(GetHealthzResponse)
err := c.cc.Invoke(ctx, "/v1.HealthzService/GetHealthz", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// HealthzServiceServer is the server API for HealthzService service.
type HealthzServiceServer interface {
// Get healthz data.
GetHealthz(context.Context, *emptypb.Empty) (*GetHealthzResponse, error)
}
// UnimplementedHealthzServiceServer can be embedded to have forward compatible implementations.
type UnimplementedHealthzServiceServer struct {
}
func (*UnimplementedHealthzServiceServer) GetHealthz(context.Context, *emptypb.Empty) (*GetHealthzResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetHealthz not implemented")
}
func RegisterHealthzServiceServer(s *grpc.Server, srv HealthzServiceServer) {
s.RegisterService(&_HealthzService_serviceDesc, srv)
}
func _HealthzService_GetHealthz_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(emptypb.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HealthzServiceServer).GetHealthz(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1.HealthzService/GetHealthz",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HealthzServiceServer).GetHealthz(ctx, req.(*emptypb.Empty))
}
return interceptor(ctx, in, info, handler)
}
var _HealthzService_serviceDesc = grpc.ServiceDesc{
ServiceName: "v1.HealthzService",
HandlerType: (*HealthzServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetHealthz",
Handler: _HealthzService_GetHealthz_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/v1/healthz.proto",
}

View File

@ -0,0 +1,156 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/v1/healthz.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/emptypb"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
func request_HealthzService_GetHealthz_0(ctx context.Context, marshaler runtime.Marshaler, client HealthzServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := client.GetHealthz(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_HealthzService_GetHealthz_0(ctx context.Context, marshaler runtime.Marshaler, server HealthzServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := server.GetHealthz(ctx, &protoReq)
return msg, metadata, err
}
// RegisterHealthzServiceHandlerServer registers the http handlers for service HealthzService to "mux".
// UnaryRPC :call HealthzServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterHealthzServiceHandlerFromEndpoint instead.
func RegisterHealthzServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server HealthzServiceServer) error {
mux.Handle("GET", pattern_HealthzService_GetHealthz_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.HealthzService/GetHealthz", runtime.WithHTTPPathPattern("/apis/v1/healthz"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_HealthzService_GetHealthz_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HealthzService_GetHealthz_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterHealthzServiceHandlerFromEndpoint is same as RegisterHealthzServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterHealthzServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterHealthzServiceHandler(ctx, mux, conn)
}
// RegisterHealthzServiceHandler registers the http handlers for service HealthzService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterHealthzServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterHealthzServiceHandlerClient(ctx, mux, NewHealthzServiceClient(conn))
}
// RegisterHealthzServiceHandlerClient registers the http handlers for service HealthzService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "HealthzServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "HealthzServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "HealthzServiceClient" to call the correct interceptors.
func RegisterHealthzServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client HealthzServiceClient) error {
mux.Handle("GET", pattern_HealthzService_GetHealthz_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.HealthzService/GetHealthz", runtime.WithHTTPPathPattern("/apis/v1/healthz"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_HealthzService_GetHealthz_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_HealthzService_GetHealthz_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_HealthzService_GetHealthz_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "healthz"}, ""))
)
var (
forward_HealthzService_GetHealthz_0 = runtime.ForwardResponseMessage
)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,670 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/v1/job.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
func request_JobService_CreateJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateJobRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Job); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.CreateJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_JobService_CreateJob_0(ctx context.Context, marshaler runtime.Marshaler, server JobServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq CreateJobRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Job); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.CreateJob(ctx, &protoReq)
return msg, metadata, err
}
func request_JobService_GetJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.GetJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_JobService_GetJob_0(ctx context.Context, marshaler runtime.Marshaler, server JobServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq GetJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.GetJob(ctx, &protoReq)
return msg, metadata, err
}
var (
filter_JobService_ListJobs_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_JobService_ListJobs_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListJobsRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_JobService_ListJobs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListJobs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_JobService_ListJobs_0(ctx context.Context, marshaler runtime.Marshaler, server JobServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ListJobsRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_JobService_ListJobs_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListJobs(ctx, &protoReq)
return msg, metadata, err
}
func request_JobService_EnableJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq EnableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.EnableJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_JobService_EnableJob_0(ctx context.Context, marshaler runtime.Marshaler, server JobServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq EnableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.EnableJob(ctx, &protoReq)
return msg, metadata, err
}
func request_JobService_DisableJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DisableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DisableJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_JobService_DisableJob_0(ctx context.Context, marshaler runtime.Marshaler, server JobServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DisableJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.DisableJob(ctx, &protoReq)
return msg, metadata, err
}
func request_JobService_DeleteJob_0(ctx context.Context, marshaler runtime.Marshaler, client JobServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.DeleteJob(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_JobService_DeleteJob_0(ctx context.Context, marshaler runtime.Marshaler, server JobServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq DeleteJobRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.DeleteJob(ctx, &protoReq)
return msg, metadata, err
}
// RegisterJobServiceHandlerServer registers the http handlers for service JobService to "mux".
// UnaryRPC :call JobServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterJobServiceHandlerFromEndpoint instead.
func RegisterJobServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server JobServiceServer) error {
mux.Handle("POST", pattern_JobService_CreateJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.JobService/CreateJob", runtime.WithHTTPPathPattern("/apis/v1/jobs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_JobService_CreateJob_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_CreateJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_GetJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.JobService/GetJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_JobService_GetJob_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_GetJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_ListJobs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.JobService/ListJobs", runtime.WithHTTPPathPattern("/apis/v1/jobs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_JobService_ListJobs_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_ListJobs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_EnableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.JobService/EnableJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}/enable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_JobService_EnableJob_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_EnableJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_DisableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.JobService/DisableJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}/disable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_JobService_DisableJob_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DisableJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_JobService_DeleteJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.JobService/DeleteJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_JobService_DeleteJob_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DeleteJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterJobServiceHandlerFromEndpoint is same as RegisterJobServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterJobServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterJobServiceHandler(ctx, mux, conn)
}
// RegisterJobServiceHandler registers the http handlers for service JobService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterJobServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterJobServiceHandlerClient(ctx, mux, NewJobServiceClient(conn))
}
// RegisterJobServiceHandlerClient registers the http handlers for service JobService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "JobServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "JobServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "JobServiceClient" to call the correct interceptors.
func RegisterJobServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client JobServiceClient) error {
mux.Handle("POST", pattern_JobService_CreateJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.JobService/CreateJob", runtime.WithHTTPPathPattern("/apis/v1/jobs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_CreateJob_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_CreateJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_GetJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.JobService/GetJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_GetJob_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_GetJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_JobService_ListJobs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.JobService/ListJobs", runtime.WithHTTPPathPattern("/apis/v1/jobs"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_ListJobs_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_ListJobs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_EnableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.JobService/EnableJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}/enable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_EnableJob_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_EnableJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_JobService_DisableJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.JobService/DisableJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}/disable"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_DisableJob_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DisableJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("DELETE", pattern_JobService_DeleteJob_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.JobService/DeleteJob", runtime.WithHTTPPathPattern("/apis/v1/jobs/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_JobService_DeleteJob_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_JobService_DeleteJob_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_JobService_CreateJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "jobs"}, ""))
pattern_JobService_GetJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1", "jobs", "id"}, ""))
pattern_JobService_ListJobs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "jobs"}, ""))
pattern_JobService_EnableJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1", "jobs", "id", "enable"}, ""))
pattern_JobService_DisableJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"apis", "v1", "jobs", "id", "disable"}, ""))
pattern_JobService_DeleteJob_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"apis", "v1", "jobs", "id"}, ""))
)
var (
forward_JobService_CreateJob_0 = runtime.ForwardResponseMessage
forward_JobService_GetJob_0 = runtime.ForwardResponseMessage
forward_JobService_ListJobs_0 = runtime.ForwardResponseMessage
forward_JobService_EnableJob_0 = runtime.ForwardResponseMessage
forward_JobService_DisableJob_0 = runtime.ForwardResponseMessage
forward_JobService_DeleteJob_0 = runtime.ForwardResponseMessage
)

View File

@ -0,0 +1,168 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/parameter.proto
package go_client
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Parameter struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (x *Parameter) Reset() {
*x = Parameter{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_parameter_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Parameter) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Parameter) ProtoMessage() {}
func (x *Parameter) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_parameter_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Parameter.ProtoReflect.Descriptor instead.
func (*Parameter) Descriptor() ([]byte, []int) {
return file_backend_api_v1_parameter_proto_rawDescGZIP(), []int{0}
}
func (x *Parameter) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Parameter) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
var File_backend_api_v1_parameter_proto protoreflect.FileDescriptor
var file_backend_api_v1_parameter_proto_rawDesc = []byte{
0x0a, 0x1e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x12, 0x02, 0x76, 0x31, 0x22, 0x35, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x38, 0x5a, 0x36, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c,
0x6f, 0x77, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f, 0x62, 0x61, 0x63,
0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x6f, 0x5f, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_parameter_proto_rawDescOnce sync.Once
file_backend_api_v1_parameter_proto_rawDescData = file_backend_api_v1_parameter_proto_rawDesc
)
func file_backend_api_v1_parameter_proto_rawDescGZIP() []byte {
file_backend_api_v1_parameter_proto_rawDescOnce.Do(func() {
file_backend_api_v1_parameter_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_parameter_proto_rawDescData)
})
return file_backend_api_v1_parameter_proto_rawDescData
}
var file_backend_api_v1_parameter_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_backend_api_v1_parameter_proto_goTypes = []interface{}{
(*Parameter)(nil), // 0: v1.Parameter
}
var file_backend_api_v1_parameter_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_backend_api_v1_parameter_proto_init() }
func file_backend_api_v1_parameter_proto_init() {
if File_backend_api_v1_parameter_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_parameter_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Parameter); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_parameter_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_backend_api_v1_parameter_proto_goTypes,
DependencyIndexes: file_backend_api_v1_parameter_proto_depIdxs,
MessageInfos: file_backend_api_v1_parameter_proto_msgTypes,
}.Build()
File_backend_api_v1_parameter_proto = out.File
file_backend_api_v1_parameter_proto_rawDesc = nil
file_backend_api_v1_parameter_proto_goTypes = nil
file_backend_api_v1_parameter_proto_depIdxs = nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,447 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/pipeline_spec.proto
package go_client
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type PipelineSpec struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Optional input field. The ID of the pipeline user uploaded before.
PipelineId string `protobuf:"bytes,1,opt,name=pipeline_id,proto3" json:"pipeline_id,omitempty"`
// Optional output field. The name of the pipeline.
// Not empty if the pipeline id is not empty.
PipelineName string `protobuf:"bytes,5,opt,name=pipeline_name,proto3" json:"pipeline_name,omitempty"`
// Optional input field. The marshalled raw argo JSON workflow.
// This will be deprecated when pipeline_manifest is in use.
WorkflowManifest string `protobuf:"bytes,2,opt,name=workflow_manifest,proto3" json:"workflow_manifest,omitempty"`
// Optional input field. The raw pipeline JSON spec.
PipelineManifest string `protobuf:"bytes,3,opt,name=pipeline_manifest,proto3" json:"pipeline_manifest,omitempty"`
// The parameter user provide to inject to the pipeline JSON.
// If a default value of a parameter exist in the JSON,
// the value user provided here will replace. V1 only
Parameters []*Parameter `protobuf:"bytes,4,rep,name=parameters,proto3" json:"parameters,omitempty"`
// Runtime config of the pipeline. V2 only
RuntimeConfig *PipelineSpec_RuntimeConfig `protobuf:"bytes,6,opt,name=runtime_config,proto3" json:"runtime_config,omitempty"`
}
func (x *PipelineSpec) Reset() {
*x = PipelineSpec{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_pipeline_spec_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PipelineSpec) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PipelineSpec) ProtoMessage() {}
func (x *PipelineSpec) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_pipeline_spec_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PipelineSpec.ProtoReflect.Descriptor instead.
func (*PipelineSpec) Descriptor() ([]byte, []int) {
return file_backend_api_v1_pipeline_spec_proto_rawDescGZIP(), []int{0}
}
func (x *PipelineSpec) GetPipelineId() string {
if x != nil {
return x.PipelineId
}
return ""
}
func (x *PipelineSpec) GetPipelineName() string {
if x != nil {
return x.PipelineName
}
return ""
}
func (x *PipelineSpec) GetWorkflowManifest() string {
if x != nil {
return x.WorkflowManifest
}
return ""
}
func (x *PipelineSpec) GetPipelineManifest() string {
if x != nil {
return x.PipelineManifest
}
return ""
}
func (x *PipelineSpec) GetParameters() []*Parameter {
if x != nil {
return x.Parameters
}
return nil
}
func (x *PipelineSpec) GetRuntimeConfig() *PipelineSpec_RuntimeConfig {
if x != nil {
return x.RuntimeConfig
}
return nil
}
// Value is the value of the field.
type Value struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Value:
// *Value_IntValue
// *Value_DoubleValue
// *Value_StringValue
Value isValue_Value `protobuf_oneof:"value"`
}
func (x *Value) Reset() {
*x = Value{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_pipeline_spec_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Value) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Value) ProtoMessage() {}
func (x *Value) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_pipeline_spec_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Value.ProtoReflect.Descriptor instead.
func (*Value) Descriptor() ([]byte, []int) {
return file_backend_api_v1_pipeline_spec_proto_rawDescGZIP(), []int{1}
}
func (m *Value) GetValue() isValue_Value {
if m != nil {
return m.Value
}
return nil
}
func (x *Value) GetIntValue() int64 {
if x, ok := x.GetValue().(*Value_IntValue); ok {
return x.IntValue
}
return 0
}
func (x *Value) GetDoubleValue() float64 {
if x, ok := x.GetValue().(*Value_DoubleValue); ok {
return x.DoubleValue
}
return 0
}
func (x *Value) GetStringValue() string {
if x, ok := x.GetValue().(*Value_StringValue); ok {
return x.StringValue
}
return ""
}
type isValue_Value interface {
isValue_Value()
}
type Value_IntValue struct {
// An integer value
IntValue int64 `protobuf:"varint,1,opt,name=int_value,proto3,oneof"`
}
type Value_DoubleValue struct {
// A double value
DoubleValue float64 `protobuf:"fixed64,2,opt,name=double_value,proto3,oneof"`
}
type Value_StringValue struct {
// A string value
StringValue string `protobuf:"bytes,3,opt,name=string_value,proto3,oneof"`
}
func (*Value_IntValue) isValue_Value() {}
func (*Value_DoubleValue) isValue_Value() {}
func (*Value_StringValue) isValue_Value() {}
// The runtime config of a PipelineSpec.
type PipelineSpec_RuntimeConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The runtime parameters of the PipelineSpec. The parameters will be
// used to replace the placeholders
// at runtime.
Parameters map[string]*Value `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// A path in a object store bucket which will be treated as the root
// output directory of the pipeline. It is used by the system to
// generate the paths of output artifacts. Ref:(https://www.kubeflow.org/docs/components/pipelines/pipeline-root/)
PipelineRoot string `protobuf:"bytes,2,opt,name=pipeline_root,proto3" json:"pipeline_root,omitempty"`
}
func (x *PipelineSpec_RuntimeConfig) Reset() {
*x = PipelineSpec_RuntimeConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_pipeline_spec_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PipelineSpec_RuntimeConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PipelineSpec_RuntimeConfig) ProtoMessage() {}
func (x *PipelineSpec_RuntimeConfig) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_pipeline_spec_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PipelineSpec_RuntimeConfig.ProtoReflect.Descriptor instead.
func (*PipelineSpec_RuntimeConfig) Descriptor() ([]byte, []int) {
return file_backend_api_v1_pipeline_spec_proto_rawDescGZIP(), []int{0, 0}
}
func (x *PipelineSpec_RuntimeConfig) GetParameters() map[string]*Value {
if x != nil {
return x.Parameters
}
return nil
}
func (x *PipelineSpec_RuntimeConfig) GetPipelineRoot() string {
if x != nil {
return x.PipelineRoot
}
return ""
}
var File_backend_api_v1_pipeline_spec_proto protoreflect.FileDescriptor
var file_backend_api_v1_pipeline_spec_proto_rawDesc = []byte{
0x0a, 0x22, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x1a, 0x1e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e,
0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74,
0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfb, 0x03, 0x0a, 0x0c, 0x50, 0x69, 0x70,
0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x69, 0x70,
0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x70,
0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0d, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x2c, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x61,
0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x77, 0x6f,
0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12,
0x2c, 0x0a, 0x11, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6d, 0x61, 0x6e, 0x69,
0x66, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x69, 0x70, 0x65,
0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a,
0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x0d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x46, 0x0a, 0x0e,
0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x06,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69,
0x6e, 0x65, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x1a, 0xcf, 0x01, 0x0a, 0x0d, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65,
0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x76, 0x31, 0x2e,
0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, 0x75, 0x6e,
0x74, 0x69, 0x6d, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d,
0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69,
0x6e, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70,
0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x1a, 0x48, 0x0a, 0x0f,
0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
0x79, 0x12, 0x1f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x09, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7c, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
0x1e, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x03, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
0x24, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x24, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x73,
0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x69, 0x70, 0x65,
0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70,
0x69, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x6f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_pipeline_spec_proto_rawDescOnce sync.Once
file_backend_api_v1_pipeline_spec_proto_rawDescData = file_backend_api_v1_pipeline_spec_proto_rawDesc
)
func file_backend_api_v1_pipeline_spec_proto_rawDescGZIP() []byte {
file_backend_api_v1_pipeline_spec_proto_rawDescOnce.Do(func() {
file_backend_api_v1_pipeline_spec_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_pipeline_spec_proto_rawDescData)
})
return file_backend_api_v1_pipeline_spec_proto_rawDescData
}
var file_backend_api_v1_pipeline_spec_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_backend_api_v1_pipeline_spec_proto_goTypes = []interface{}{
(*PipelineSpec)(nil), // 0: v1.PipelineSpec
(*Value)(nil), // 1: v1.Value
(*PipelineSpec_RuntimeConfig)(nil), // 2: v1.PipelineSpec.RuntimeConfig
nil, // 3: v1.PipelineSpec.RuntimeConfig.ParametersEntry
(*Parameter)(nil), // 4: v1.Parameter
}
var file_backend_api_v1_pipeline_spec_proto_depIdxs = []int32{
4, // 0: v1.PipelineSpec.parameters:type_name -> v1.Parameter
2, // 1: v1.PipelineSpec.runtime_config:type_name -> v1.PipelineSpec.RuntimeConfig
3, // 2: v1.PipelineSpec.RuntimeConfig.parameters:type_name -> v1.PipelineSpec.RuntimeConfig.ParametersEntry
1, // 3: v1.PipelineSpec.RuntimeConfig.ParametersEntry.value:type_name -> v1.Value
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_backend_api_v1_pipeline_spec_proto_init() }
func file_backend_api_v1_pipeline_spec_proto_init() {
if File_backend_api_v1_pipeline_spec_proto != nil {
return
}
file_backend_api_v1_parameter_proto_init()
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_pipeline_spec_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PipelineSpec); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_pipeline_spec_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Value); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_pipeline_spec_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PipelineSpec_RuntimeConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_backend_api_v1_pipeline_spec_proto_msgTypes[1].OneofWrappers = []interface{}{
(*Value_IntValue)(nil),
(*Value_DoubleValue)(nil),
(*Value_StringValue)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_pipeline_spec_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_backend_api_v1_pipeline_spec_proto_goTypes,
DependencyIndexes: file_backend_api_v1_pipeline_spec_proto_depIdxs,
MessageInfos: file_backend_api_v1_pipeline_spec_proto_msgTypes,
}.Build()
File_backend_api_v1_pipeline_spec_proto = out.File
file_backend_api_v1_pipeline_spec_proto_rawDesc = nil
file_backend_api_v1_pipeline_spec_proto_goTypes = nil
file_backend_api_v1_pipeline_spec_proto_depIdxs = nil
}

View File

@ -0,0 +1,375 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/report.proto
package go_client
import (
context "context"
_ "google.golang.org/genproto/googleapis/api/annotations"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ReportWorkflowRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Workflow is a workflow custom resource marshalled into a json string.
Workflow string `protobuf:"bytes,1,opt,name=workflow,proto3" json:"workflow,omitempty"`
}
func (x *ReportWorkflowRequest) Reset() {
*x = ReportWorkflowRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_report_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ReportWorkflowRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReportWorkflowRequest) ProtoMessage() {}
func (x *ReportWorkflowRequest) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_report_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReportWorkflowRequest.ProtoReflect.Descriptor instead.
func (*ReportWorkflowRequest) Descriptor() ([]byte, []int) {
return file_backend_api_v1_report_proto_rawDescGZIP(), []int{0}
}
func (x *ReportWorkflowRequest) GetWorkflow() string {
if x != nil {
return x.Workflow
}
return ""
}
type ReportScheduledWorkflowRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// ScheduledWorkflow a ScheduledWorkflow resource marshalled into a json string.
ScheduledWorkflow string `protobuf:"bytes,1,opt,name=scheduled_workflow,proto3" json:"scheduled_workflow,omitempty"`
}
func (x *ReportScheduledWorkflowRequest) Reset() {
*x = ReportScheduledWorkflowRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_report_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ReportScheduledWorkflowRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ReportScheduledWorkflowRequest) ProtoMessage() {}
func (x *ReportScheduledWorkflowRequest) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_report_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ReportScheduledWorkflowRequest.ProtoReflect.Descriptor instead.
func (*ReportScheduledWorkflowRequest) Descriptor() ([]byte, []int) {
return file_backend_api_v1_report_proto_rawDescGZIP(), []int{1}
}
func (x *ReportScheduledWorkflowRequest) GetScheduledWorkflow() string {
if x != nil {
return x.ScheduledWorkflow
}
return ""
}
var File_backend_api_v1_report_proto protoreflect.FileDescriptor
var file_backend_api_v1_report_proto_rawDesc = []byte{
0x0a, 0x1b, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76,
0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x33, 0x0a, 0x15,
0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
0x77, 0x22, 0x50, 0x0a, 0x1e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64,
0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64,
0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66,
0x6c, 0x6f, 0x77, 0x32, 0x8b, 0x02, 0x0a, 0x0d, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x57,
0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x19, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70,
0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93,
0x02, 0x1e, 0x22, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72,
0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x3a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77,
0x12, 0x8e, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64,
0x75, 0x6c, 0x65, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x22, 0x2e, 0x76,
0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65,
0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31,
0x22, 0x1b, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64,
0x75, 0x6c, 0x65, 0x64, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x3a, 0x12, 0x73,
0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f,
0x77, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e,
0x65, 0x73, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76,
0x31, 0x2f, 0x67, 0x6f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_report_proto_rawDescOnce sync.Once
file_backend_api_v1_report_proto_rawDescData = file_backend_api_v1_report_proto_rawDesc
)
func file_backend_api_v1_report_proto_rawDescGZIP() []byte {
file_backend_api_v1_report_proto_rawDescOnce.Do(func() {
file_backend_api_v1_report_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_report_proto_rawDescData)
})
return file_backend_api_v1_report_proto_rawDescData
}
var file_backend_api_v1_report_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_backend_api_v1_report_proto_goTypes = []interface{}{
(*ReportWorkflowRequest)(nil), // 0: v1.ReportWorkflowRequest
(*ReportScheduledWorkflowRequest)(nil), // 1: v1.ReportScheduledWorkflowRequest
(*emptypb.Empty)(nil), // 2: google.protobuf.Empty
}
var file_backend_api_v1_report_proto_depIdxs = []int32{
0, // 0: v1.ReportService.ReportWorkflow:input_type -> v1.ReportWorkflowRequest
1, // 1: v1.ReportService.ReportScheduledWorkflow:input_type -> v1.ReportScheduledWorkflowRequest
2, // 2: v1.ReportService.ReportWorkflow:output_type -> google.protobuf.Empty
2, // 3: v1.ReportService.ReportScheduledWorkflow:output_type -> google.protobuf.Empty
2, // [2:4] is the sub-list for method output_type
0, // [0:2] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_backend_api_v1_report_proto_init() }
func file_backend_api_v1_report_proto_init() {
if File_backend_api_v1_report_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_report_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReportWorkflowRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_report_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ReportScheduledWorkflowRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_report_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_backend_api_v1_report_proto_goTypes,
DependencyIndexes: file_backend_api_v1_report_proto_depIdxs,
MessageInfos: file_backend_api_v1_report_proto_msgTypes,
}.Build()
File_backend_api_v1_report_proto = out.File
file_backend_api_v1_report_proto_rawDesc = nil
file_backend_api_v1_report_proto_goTypes = nil
file_backend_api_v1_report_proto_depIdxs = nil
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConnInterface
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion6
// ReportServiceClient is the client API for ReportService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type ReportServiceClient interface {
ReportWorkflow(ctx context.Context, in *ReportWorkflowRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
ReportScheduledWorkflow(ctx context.Context, in *ReportScheduledWorkflowRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
type reportServiceClient struct {
cc grpc.ClientConnInterface
}
func NewReportServiceClient(cc grpc.ClientConnInterface) ReportServiceClient {
return &reportServiceClient{cc}
}
func (c *reportServiceClient) ReportWorkflow(ctx context.Context, in *ReportWorkflowRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/v1.ReportService/ReportWorkflow", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *reportServiceClient) ReportScheduledWorkflow(ctx context.Context, in *ReportScheduledWorkflowRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/v1.ReportService/ReportScheduledWorkflow", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ReportServiceServer is the server API for ReportService service.
type ReportServiceServer interface {
ReportWorkflow(context.Context, *ReportWorkflowRequest) (*emptypb.Empty, error)
ReportScheduledWorkflow(context.Context, *ReportScheduledWorkflowRequest) (*emptypb.Empty, error)
}
// UnimplementedReportServiceServer can be embedded to have forward compatible implementations.
type UnimplementedReportServiceServer struct {
}
func (*UnimplementedReportServiceServer) ReportWorkflow(context.Context, *ReportWorkflowRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReportWorkflow not implemented")
}
func (*UnimplementedReportServiceServer) ReportScheduledWorkflow(context.Context, *ReportScheduledWorkflowRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method ReportScheduledWorkflow not implemented")
}
func RegisterReportServiceServer(s *grpc.Server, srv ReportServiceServer) {
s.RegisterService(&_ReportService_serviceDesc, srv)
}
func _ReportService_ReportWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportWorkflowRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportServiceServer).ReportWorkflow(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1.ReportService/ReportWorkflow",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportServiceServer).ReportWorkflow(ctx, req.(*ReportWorkflowRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ReportService_ReportScheduledWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReportScheduledWorkflowRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ReportServiceServer).ReportScheduledWorkflow(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1.ReportService/ReportScheduledWorkflow",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ReportServiceServer).ReportScheduledWorkflow(ctx, req.(*ReportScheduledWorkflowRequest))
}
return interceptor(ctx, in, info, handler)
}
var _ReportService_serviceDesc = grpc.ServiceDesc{
ServiceName: "v1.ReportService",
HandlerType: (*ReportServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ReportWorkflow",
Handler: _ReportService_ReportWorkflow_Handler,
},
{
MethodName: "ReportScheduledWorkflow",
Handler: _ReportService_ReportScheduledWorkflow_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "backend/api/v1/report.proto",
}

View File

@ -0,0 +1,256 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: backend/api/v1/report.proto
/*
Package go_client is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package go_client
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
func request_ReportService_ReportWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, client ReportServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ReportWorkflow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ReportService_ReportWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, server ReportServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.Workflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ReportWorkflow(ctx, &protoReq)
return msg, metadata, err
}
func request_ReportService_ReportScheduledWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, client ReportServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportScheduledWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.ScheduledWorkflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ReportScheduledWorkflow(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_ReportService_ReportScheduledWorkflow_0(ctx context.Context, marshaler runtime.Marshaler, server ReportServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReportScheduledWorkflowRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq.ScheduledWorkflow); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ReportScheduledWorkflow(ctx, &protoReq)
return msg, metadata, err
}
// RegisterReportServiceHandlerServer registers the http handlers for service ReportService to "mux".
// UnaryRPC :call ReportServiceServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterReportServiceHandlerFromEndpoint instead.
func RegisterReportServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ReportServiceServer) error {
mux.Handle("POST", pattern_ReportService_ReportWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ReportService/ReportWorkflow", runtime.WithHTTPPathPattern("/apis/v1/workflows"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ReportService_ReportWorkflow_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportWorkflow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ReportService_ReportScheduledWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/v1.ReportService/ReportScheduledWorkflow", runtime.WithHTTPPathPattern("/apis/v1/scheduledworkflows"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_ReportService_ReportScheduledWorkflow_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportScheduledWorkflow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterReportServiceHandlerFromEndpoint is same as RegisterReportServiceHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterReportServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterReportServiceHandler(ctx, mux, conn)
}
// RegisterReportServiceHandler registers the http handlers for service ReportService to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterReportServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterReportServiceHandlerClient(ctx, mux, NewReportServiceClient(conn))
}
// RegisterReportServiceHandlerClient registers the http handlers for service ReportService
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ReportServiceClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ReportServiceClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ReportServiceClient" to call the correct interceptors.
func RegisterReportServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ReportServiceClient) error {
mux.Handle("POST", pattern_ReportService_ReportWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ReportService/ReportWorkflow", runtime.WithHTTPPathPattern("/apis/v1/workflows"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ReportService_ReportWorkflow_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportWorkflow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_ReportService_ReportScheduledWorkflow_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/v1.ReportService/ReportScheduledWorkflow", runtime.WithHTTPPathPattern("/apis/v1/scheduledworkflows"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_ReportService_ReportScheduledWorkflow_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_ReportService_ReportScheduledWorkflow_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_ReportService_ReportWorkflow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "workflows"}, ""))
pattern_ReportService_ReportScheduledWorkflow_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"apis", "v1", "scheduledworkflows"}, ""))
)
var (
forward_ReportService_ReportWorkflow_0 = runtime.ForwardResponseMessage
forward_ReportService_ReportScheduledWorkflow_0 = runtime.ForwardResponseMessage
)

View File

@ -0,0 +1,383 @@
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.17.3
// source: backend/api/v1/resource_reference.proto
package go_client
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type ResourceType int32
const (
ResourceType_UNKNOWN_RESOURCE_TYPE ResourceType = 0
ResourceType_EXPERIMENT ResourceType = 1
ResourceType_JOB ResourceType = 2
ResourceType_PIPELINE ResourceType = 3
ResourceType_PIPELINE_VERSION ResourceType = 4
ResourceType_NAMESPACE ResourceType = 5
)
// Enum value maps for ResourceType.
var (
ResourceType_name = map[int32]string{
0: "UNKNOWN_RESOURCE_TYPE",
1: "EXPERIMENT",
2: "JOB",
3: "PIPELINE",
4: "PIPELINE_VERSION",
5: "NAMESPACE",
}
ResourceType_value = map[string]int32{
"UNKNOWN_RESOURCE_TYPE": 0,
"EXPERIMENT": 1,
"JOB": 2,
"PIPELINE": 3,
"PIPELINE_VERSION": 4,
"NAMESPACE": 5,
}
)
func (x ResourceType) Enum() *ResourceType {
p := new(ResourceType)
*p = x
return p
}
func (x ResourceType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (ResourceType) Descriptor() protoreflect.EnumDescriptor {
return file_backend_api_v1_resource_reference_proto_enumTypes[0].Descriptor()
}
func (ResourceType) Type() protoreflect.EnumType {
return &file_backend_api_v1_resource_reference_proto_enumTypes[0]
}
func (x ResourceType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use ResourceType.Descriptor instead.
func (ResourceType) EnumDescriptor() ([]byte, []int) {
return file_backend_api_v1_resource_reference_proto_rawDescGZIP(), []int{0}
}
type Relationship int32
const (
Relationship_UNKNOWN_RELATIONSHIP Relationship = 0
Relationship_OWNER Relationship = 1
Relationship_CREATOR Relationship = 2
)
// Enum value maps for Relationship.
var (
Relationship_name = map[int32]string{
0: "UNKNOWN_RELATIONSHIP",
1: "OWNER",
2: "CREATOR",
}
Relationship_value = map[string]int32{
"UNKNOWN_RELATIONSHIP": 0,
"OWNER": 1,
"CREATOR": 2,
}
)
func (x Relationship) Enum() *Relationship {
p := new(Relationship)
*p = x
return p
}
func (x Relationship) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Relationship) Descriptor() protoreflect.EnumDescriptor {
return file_backend_api_v1_resource_reference_proto_enumTypes[1].Descriptor()
}
func (Relationship) Type() protoreflect.EnumType {
return &file_backend_api_v1_resource_reference_proto_enumTypes[1]
}
func (x Relationship) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Relationship.Descriptor instead.
func (Relationship) EnumDescriptor() ([]byte, []int) {
return file_backend_api_v1_resource_reference_proto_rawDescGZIP(), []int{1}
}
type ResourceKey struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The type of the resource that referred to.
Type ResourceType `protobuf:"varint,1,opt,name=type,proto3,enum=v1.ResourceType" json:"type,omitempty"`
// The ID of the resource that referred to.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
}
func (x *ResourceKey) Reset() {
*x = ResourceKey{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_resource_reference_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResourceKey) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResourceKey) ProtoMessage() {}
func (x *ResourceKey) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_resource_reference_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResourceKey.ProtoReflect.Descriptor instead.
func (*ResourceKey) Descriptor() ([]byte, []int) {
return file_backend_api_v1_resource_reference_proto_rawDescGZIP(), []int{0}
}
func (x *ResourceKey) GetType() ResourceType {
if x != nil {
return x.Type
}
return ResourceType_UNKNOWN_RESOURCE_TYPE
}
func (x *ResourceKey) GetId() string {
if x != nil {
return x.Id
}
return ""
}
type ResourceReference struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Key *ResourceKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
// The name of the resource that referred to.
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
// Required field. The relationship from referred resource to the object.
Relationship Relationship `protobuf:"varint,2,opt,name=relationship,proto3,enum=v1.Relationship" json:"relationship,omitempty"`
}
func (x *ResourceReference) Reset() {
*x = ResourceReference{}
if protoimpl.UnsafeEnabled {
mi := &file_backend_api_v1_resource_reference_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResourceReference) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResourceReference) ProtoMessage() {}
func (x *ResourceReference) ProtoReflect() protoreflect.Message {
mi := &file_backend_api_v1_resource_reference_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead.
func (*ResourceReference) Descriptor() ([]byte, []int) {
return file_backend_api_v1_resource_reference_proto_rawDescGZIP(), []int{1}
}
func (x *ResourceReference) GetKey() *ResourceKey {
if x != nil {
return x.Key
}
return nil
}
func (x *ResourceReference) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ResourceReference) GetRelationship() Relationship {
if x != nil {
return x.Relationship
}
return Relationship_UNKNOWN_RELATIONSHIP
}
var File_backend_api_v1_resource_reference_proto protoreflect.FileDescriptor
var file_backend_api_v1_resource_reference_proto_rawDesc = []byte{
0x0a, 0x27, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31,
0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65,
0x6e, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x76, 0x31, 0x22, 0x43, 0x0a,
0x0b, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x04,
0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x76, 0x31, 0x2e,
0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79,
0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02,
0x69, 0x64, 0x22, 0x80, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x34, 0x0a, 0x0c, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6c, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x52, 0x0c, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x68, 0x69, 0x70, 0x2a, 0x75, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
0x5f, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00,
0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x45, 0x52, 0x49, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x01,
0x12, 0x07, 0x0a, 0x03, 0x4a, 0x4f, 0x42, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x49, 0x50,
0x45, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x49, 0x50, 0x45, 0x4c,
0x49, 0x4e, 0x45, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x04, 0x12, 0x0d, 0x0a,
0x09, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x50, 0x41, 0x43, 0x45, 0x10, 0x05, 0x2a, 0x40, 0x0a, 0x0c,
0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x12, 0x18, 0x0a, 0x14,
0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x52, 0x45, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e,
0x53, 0x48, 0x49, 0x50, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x57, 0x4e, 0x45, 0x52, 0x10,
0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x52, 0x45, 0x41, 0x54, 0x4f, 0x52, 0x10, 0x02, 0x42, 0x38,
0x5a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62,
0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x2f,
0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x67,
0x6f, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_backend_api_v1_resource_reference_proto_rawDescOnce sync.Once
file_backend_api_v1_resource_reference_proto_rawDescData = file_backend_api_v1_resource_reference_proto_rawDesc
)
func file_backend_api_v1_resource_reference_proto_rawDescGZIP() []byte {
file_backend_api_v1_resource_reference_proto_rawDescOnce.Do(func() {
file_backend_api_v1_resource_reference_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_api_v1_resource_reference_proto_rawDescData)
})
return file_backend_api_v1_resource_reference_proto_rawDescData
}
var file_backend_api_v1_resource_reference_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_backend_api_v1_resource_reference_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_backend_api_v1_resource_reference_proto_goTypes = []interface{}{
(ResourceType)(0), // 0: v1.ResourceType
(Relationship)(0), // 1: v1.Relationship
(*ResourceKey)(nil), // 2: v1.ResourceKey
(*ResourceReference)(nil), // 3: v1.ResourceReference
}
var file_backend_api_v1_resource_reference_proto_depIdxs = []int32{
0, // 0: v1.ResourceKey.type:type_name -> v1.ResourceType
2, // 1: v1.ResourceReference.key:type_name -> v1.ResourceKey
1, // 2: v1.ResourceReference.relationship:type_name -> v1.Relationship
3, // [3:3] is the sub-list for method output_type
3, // [3:3] is the sub-list for method input_type
3, // [3:3] is the sub-list for extension type_name
3, // [3:3] is the sub-list for extension extendee
0, // [0:3] is the sub-list for field type_name
}
func init() { file_backend_api_v1_resource_reference_proto_init() }
func file_backend_api_v1_resource_reference_proto_init() {
if File_backend_api_v1_resource_reference_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_backend_api_v1_resource_reference_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResourceKey); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_backend_api_v1_resource_reference_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResourceReference); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_backend_api_v1_resource_reference_proto_rawDesc,
NumEnums: 2,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_backend_api_v1_resource_reference_proto_goTypes,
DependencyIndexes: file_backend_api_v1_resource_reference_proto_depIdxs,
EnumInfos: file_backend_api_v1_resource_reference_proto_enumTypes,
MessageInfos: file_backend_api_v1_resource_reference_proto_msgTypes,
}.Build()
File_backend_api_v1_resource_reference_proto = out.File
file_backend_api_v1_resource_reference_proto_rawDesc = nil
file_backend_api_v1_resource_reference_proto_goTypes = nil
file_backend_api_v1_resource_reference_proto_depIdxs = nil
}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More