diff --git a/code_search/docker/ks/launch_search_index_creator_job.sh b/code_search/docker/ks/launch_search_index_creator_job.sh old mode 100755 new mode 100644 diff --git a/code_search/docker/ks/submit_code_embeddings_job.sh b/code_search/docker/ks/submit_code_embeddings_job.sh old mode 100755 new mode 100644 diff --git a/code_search/docker/ks/update_index.sh b/code_search/docker/ks/update_index.sh old mode 100755 new mode 100644 diff --git a/code_search/docker/t2t/t2t-entrypoint.sh b/code_search/docker/t2t/t2t-entrypoint.sh old mode 100755 new mode 100644 diff --git a/code_search/docker/ui/build.sh b/code_search/docker/ui/build.sh old mode 100755 new mode 100644 diff --git a/code_search/src/code_search/nmslib/cli/start_test_server.sh b/code_search/src/code_search/nmslib/cli/start_test_server.sh old mode 100755 new mode 100644 diff --git a/demos/simple_pipeline/gpu-example-pipeline.py b/demos/simple_pipeline/gpu-example-pipeline.py old mode 100755 new mode 100644 diff --git a/demos/yelp_demo/demo_setup/create_context.sh b/demos/yelp_demo/demo_setup/create_context.sh old mode 100755 new mode 100644 diff --git a/demos/yelp_demo/pipelines/gpu-example-pipeline.py b/demos/yelp_demo/pipelines/gpu-example-pipeline.py old mode 100755 new mode 100644 diff --git a/demos/yelp_demo/yelp/yelp_sentiment/worker_launcher.sh b/demos/yelp_demo/yelp/yelp_sentiment/worker_launcher.sh old mode 100755 new mode 100644 diff --git a/github_issue_summarization/Makefile b/github_issue_summarization/Makefile old mode 100755 new mode 100644 diff --git a/github_issue_summarization/pipelines/components/kubeflow-resources/containers/tf-serving-gh/build.sh b/github_issue_summarization/pipelines/components/kubeflow-resources/containers/tf-serving-gh/build.sh old mode 100755 new mode 100644 diff --git a/github_issue_summarization/pipelines/components/t2t/containers/base/build.sh b/github_issue_summarization/pipelines/components/t2t/containers/base/build.sh old mode 100755 new mode 100644 diff --git a/github_issue_summarization/pipelines/components/t2t/containers/t2t_app/build.sh b/github_issue_summarization/pipelines/components/t2t/containers/t2t_app/build.sh old mode 100755 new mode 100644 diff --git a/github_issue_summarization/pipelines/components/t2t/containers/t2t_proc/build.sh b/github_issue_summarization/pipelines/components/t2t/containers/t2t_proc/build.sh old mode 100755 new mode 100644 diff --git a/github_issue_summarization/pipelines/components/t2t/containers/t2t_train/build.sh b/github_issue_summarization/pipelines/components/t2t/containers/t2t_train/build.sh old mode 100755 new mode 100644 diff --git a/github_issue_summarization/pipelines/components/t2t/containers/webapp-launcher/build.sh b/github_issue_summarization/pipelines/components/t2t/containers/webapp-launcher/build.sh old mode 100755 new mode 100644 diff --git a/mnist/Makefile b/mnist/Makefile old mode 100755 new mode 100644 diff --git a/mnist/training/base/definition.sh b/mnist/training/base/definition.sh old mode 100755 new mode 100644 diff --git a/pipelines/azurepipeline/.gitignore b/pipelines/azurepipeline/.gitignore new file mode 100644 index 00000000..4b6f63b8 --- /dev/null +++ b/pipelines/azurepipeline/.gitignore @@ -0,0 +1,18 @@ +# standard things +.vscode +.ipynb_checkpoints/ +__pycache__ + +# Environment Variables +*.env +*.cfg +*-creds.yaml + +# models and data +data/ +model/ +*.tar.gz +*.h5 +*.zip + +aml_config/ \ No newline at end of file diff --git a/pipelines/azurepipeline/LICENSE b/pipelines/azurepipeline/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/pipelines/azurepipeline/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pipelines/azurepipeline/README.md b/pipelines/azurepipeline/README.md new file mode 100644 index 00000000..4e860f93 --- /dev/null +++ b/pipelines/azurepipeline/README.md @@ -0,0 +1,5 @@ +[![Build Status](https://dev.azure.com/daaronch/Kubeflow%20and%20MLOps/_apis/build/status/aronchick.kubeflow-and-mlops?branchName=master)](https://dev.azure.com/daaronch/Kubeflow%20and%20MLOps/_build/latest?definitionId=3&branchName=master) + +# Kubeflow and Azure Pipelines + +More to come! diff --git a/pipelines/azurepipeline/args.txt b/pipelines/azurepipeline/args.txt new file mode 100644 index 00000000..ae33034a --- /dev/null +++ b/pipelines/azurepipeline/args.txt @@ -0,0 +1 @@ +-n tacoprofile -m tacosandburritos -i /scripts/inferenceconfig.json -d '{"image":"https://www.exploreveg.org/files/2015/05/sofritas-burrito.jpeg"}' -t 72f988bf-86f1-41af-91ab-2d7cd011db47 -r taco-rg -w taco-workspace -s 2991e3e4-4e8d-451f-aa17-640d89c63144 -p xP2a6Bp1vSOjKholxl5bLPd.=r@ZZJn[ -u 1308a130-d549-44e1-ba66-ce8c487d76e3 \ No newline at end of file diff --git a/pipelines/azurepipeline/azure-pipelines.yml b/pipelines/azurepipeline/azure-pipelines.yml new file mode 100644 index 00000000..f35a6ebc --- /dev/null +++ b/pipelines/azurepipeline/azure-pipelines.yml @@ -0,0 +1,87 @@ +# Kubeflow Container Build Pipeline + +trigger: +- master + +pr: none + +stages: +- stage: ContainerConfig + displayName: 'Configure and Register Containers' + jobs: + - job: Containers + pool: + name: default + steps: + - task: AzureCLI@1 + inputs: + azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)' + scriptLocation: 'inlineScript' + inlineScript: ' + sudo az acr login -n kubeflowregistry && + cd code && + cd preprocess && + sudo docker build -t kubeflowregistry.azurecr.io/kubeflow/preprocess:$BUILD_SOURCEVERSION . && + sudo docker push kubeflowregistry.azurecr.io/kubeflow/preprocess:$BUILD_SOURCEVERSION ' + displayName: 'Build & Push Preprocess Image' + + - task: AzureCLI@1 + inputs: + azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)' + scriptLocation: 'inlineScript' + inlineScript: ' + cd code && + cd training && + sudo docker build -t kubeflowregistry.azurecr.io/kubeflow/training:$BUILD_SOURCEVERSION . && + sudo docker push kubeflowregistry.azurecr.io/kubeflow/training:$BUILD_SOURCEVERSION ' + displayName: 'Build & Push Training Image' + + - task: AzureCLI@1 + inputs: + azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)' + scriptLocation: 'inlineScript' + inlineScript: ' + cd code && + cd register && + sudo docker build -t kubeflowregistry.azurecr.io/kubeflow/register:$BUILD_SOURCEVERSION . && + sudo docker push kubeflowregistry.azurecr.io/kubeflow/register:$BUILD_SOURCEVERSION ' + displayName: 'Build & Push Register Image' + + +# Moved KF step to build +- stage: KubeflowTrigger + dependsOn: ContainerConfig + displayName: 'Trigger Kubeflow Pipeline' + variables: + - group: kf-variables + jobs: + - job: Kubeflow + pool: + name: default + steps: + - task: AzureCLI@1 + env: + KF_MAPPED_SERVICE_PRINCIPAL_PASSWORD: $(KF_SERVICE_PRINCIPAL_PASSWORD) + inputs: + azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)' + scriptLocation: 'inlineScript' + inlineScript: | + az aks get-credentials -g kubeflow-mlops-rg -n kubeflow-mlops-cluster + kubectl port-forward --namespace kubeflow svc/ml-pipeline 8888:8888 & + kubepid=$! + + sudo apt-get install python3-setuptools + pip3 install wheel + pip3 install kfp + + touch script.py + echo "import kfp" >> script.py + echo "client = kfp.Client(host='localhost:8888')" >> script.py + echo "client.run_pipeline('$KF_EXPERIMENT_ID', 'Run ${BUILD_BUILDID}', params={'imagetag': '${BUILD_SOURCEVERSION}', 'tenant-id': '$KF_TENANT_ID', 'service-principal-id': '$KF_SERVICE_PRINCIPAL_ID', 'service-principal-password': '$KF_MAPPED_SERVICE_PRINCIPAL_PASSWORD', 'subscription-id': '$KF_SUBSCRIPTION_ID', 'resource-group': '$KF_RESOURCE_GROUP', 'workspace': '$KF_WORKSPACE', 'persistent-volume-name': '$KF_PERSISTENT_VOLUME_NAME', 'persistent-volume-path': '$KF_PERSISTENT_VOLUME_PATH', 'data-download': '$KF_DATA_DOWNLOAD', 'epochs': '$KF_EPOCHS', 'batch': '$KF_BATCH', 'learning-rate': '$KF_LEARNING_RATE', 'model-name': '$KF_MODEL_NAME'}, pipeline_id='$KF_PIPELINE_ID')" >> script.py + + cat script.py + + python3 script.py + + kill $kubepid + displayName: 'Trigger Kubeflow Pipeline' diff --git a/pipelines/azurepipeline/code/deploy/Dockerfile b/pipelines/azurepipeline/code/deploy/Dockerfile new file mode 100644 index 00000000..35c3468a --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/Dockerfile @@ -0,0 +1,9 @@ +FROM mcr.microsoft.com/azure-cli +RUN az extension add -n azure-cli-ml +RUN pip install --upgrade pip +COPY deploymentconfig.json /scripts/deploymentconfig.json +COPY inferenceconfig.json /scripts/inferenceconfig.json +COPY deploy.sh /scripts/deploy.sh +COPY score.py /scripts/score.py +COPY environment.yml /scripts/environment.yml +CMD bash diff --git a/pipelines/azurepipeline/code/deploy/aksdeploymentconfig.json b/pipelines/azurepipeline/code/deploy/aksdeploymentconfig.json new file mode 100644 index 00000000..3439d5d0 --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/aksdeploymentconfig.json @@ -0,0 +1,4 @@ +{ + "computeType": "aks", + "ComputeTarget": "aks-cluster" +} diff --git a/pipelines/azurepipeline/code/deploy/deploy.sh b/pipelines/azurepipeline/code/deploy/deploy.sh new file mode 100644 index 00000000..b5252885 --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/deploy.sh @@ -0,0 +1,20 @@ +# az ml model deploy -n tacosandburritos -m tacosandburritos:1 --ic inferenceconfig.json --dc deploymentconfig.json --resource-group taco-rg --workspace-name taco-workspace --overwrite -v +#!/bin/sh +while getopts "m:n:i:d:s:p:u:r:w:t:b:" option; + do + case "$option" in + m ) MODEL=${OPTARG};; + n ) MODEL_NAME=${OPTARG};; + i ) INFERENCE_CONFIG=${OPTARG};; + d ) DEPLOYMENTCONFIG=${OPTARG};; + s ) SERVICE_PRINCIPAL_ID=${OPTARG};; + p ) SERVICE_PRINCIPAL_PASSWORD=${OPTARG};; + u ) SUBSCRIPTION_ID=${OPTARG};; + r ) RESOURCE_GROUP=${OPTARG};; + w ) WORKSPACE=${OPTARG};; + t ) TENANT_ID=${OPTARG};; + b ) BASE_PATH=${OPTARG};; + esac +done +az login --service-principal --username ${SERVICE_PRINCIPAL_ID} --password ${SERVICE_PRINCIPAL_PASSWORD} -t $TENANT_ID +az ml model deploy -n $MODEL_NAME -m ${MODEL}:1 --ic $INFERENCE_CONFIG --pi ${BASE_PATH}/model/myprofileresult.json --dc $DEPLOYMENTCONFIG -w $WORKSPACE -g $RESOURCE_GROUP --overwrite -v \ No newline at end of file diff --git a/pipelines/azurepipeline/code/deploy/deploymentconfig.json b/pipelines/azurepipeline/code/deploy/deploymentconfig.json new file mode 100644 index 00000000..6ba315f2 --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/deploymentconfig.json @@ -0,0 +1,8 @@ +{ + "containerResourceRequirements": { + "cpu": 2, + "memoryInGB": 4 + }, + "computeType": "ACI", + "enableAppInsights": "True" + } \ No newline at end of file diff --git a/pipelines/azurepipeline/code/deploy/environment.yml b/pipelines/azurepipeline/code/deploy/environment.yml new file mode 100644 index 00000000..93c5400c --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/environment.yml @@ -0,0 +1,19 @@ +# Conda environment specification. The dependencies defined in this file will +# be automatically provisioned for runs with userManagedDependencies=False. + +# Details about the Conda environment file format: +# https://conda.io/docs/user-guide/tasks/manage-environments.html#create-env-file-manually + +name: project_environment +dependencies: + # The python interpreter version. + # Currently Azure ML only supports 3.5.2 and later. +- python=3.6.2 + +- pip: + # Required packages for AzureML execution, history, and data preparation. + - azureml-defaults + - numpy + - tensorflow==2.0.0-alpha0 + - Pillow + - requests \ No newline at end of file diff --git a/pipelines/azurepipeline/code/deploy/inference.sh b/pipelines/azurepipeline/code/deploy/inference.sh new file mode 100644 index 00000000..ad72d9e3 --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/inference.sh @@ -0,0 +1,4 @@ +echo "test the deployment with a burrito image" +az ml service run -n fooddeployaci -d '{ "image": "https://www.exploreveg.org/files/2015/05/sofritas-burrito.jpeg" }' -w taco-workspace -g taco-rg +echo "test the deployment with a taco image" +az ml service run -n fooddeployaci -d '{ "image": "https://c1.staticflickr.com/5/4022/4401140214_f489c708f0_b.jpg" }' -w taco-workspace -g taco-rg diff --git a/pipelines/azurepipeline/code/deploy/inferenceconfig.json b/pipelines/azurepipeline/code/deploy/inferenceconfig.json new file mode 100644 index 00000000..2a4c0490 --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/inferenceconfig.json @@ -0,0 +1,10 @@ +{ + "entryScript": "/scripts/score.py", + "runtime": "python", + "condaFile": "/scripts/environment.yml", + "extraDockerfileSteps": null, + "sourceDirectory": null, + "enableGpu": false, + "baseImage": null, + "baseImageRegistry": null + } \ No newline at end of file diff --git a/pipelines/azurepipeline/code/deploy/score.py b/pipelines/azurepipeline/code/deploy/score.py new file mode 100644 index 00000000..67223872 --- /dev/null +++ b/pipelines/azurepipeline/code/deploy/score.py @@ -0,0 +1,90 @@ +import json +import time +import requests +import datetime +import numpy as np +from PIL import Image +from io import BytesIO +import tensorflow as tf + +from azureml.core.model import Model + +def init(): + global model + + try: + model_path = Model.get_model_path('tacosandburritos') + except: + model_path = '/model/latest.h5' + + print('Attempting to load model') + model = tf.keras.models.load_model(model_path) + model.summary() + print('Done!') + + print('Initialized model "{}" at {}'.format(model_path, datetime.datetime.now())) + +def run(raw_data): + global model + prev_time = time.time() + + post = json.loads(raw_data) + img_path = post['image'] + + current_time = time.time() + + tensor = process_image(img_path, 160) + t = tf.reshape(tensor, [-1, 160, 160, 3]) + o = model.predict(t, steps=1)#[0][0] + print(o) + o = o[0][0] + inference_time = datetime.timedelta(seconds=current_time - prev_time) + payload = { + 'time': inference_time.total_seconds(), + 'prediction': 'burrito' if o < 0.5 else 'tacos', + 'scores': str(o) + } + + print('Input ({}), Prediction ({})'.format(post['image'], payload)) + + return payload + +def process_image(path, image_size): + # Extract image (from web or path) + if(path.startswith('http')): + response = requests.get(path) + img = np.array(Image.open(BytesIO(response.content))) + else: + img = np.array(Image.open(path)) + + img_tensor = tf.convert_to_tensor(img, dtype=tf.float32) + #tf.image.decode_jpeg(img_raw, channels=3) + img_final = tf.image.resize(img_tensor, [image_size, image_size]) / 255 + return img_final + +def info(msg, char = "#", width = 75): + print("") + print(char * width) + print(char + " %0*s" % ((-1*width)+5, msg) + char) + print(char * width) + +if __name__ == "__main__": + images = { + 'tacos': 'https://c1.staticflickr.com/5/4022/4401140214_f489c708f0_b.jpg', + 'burrito': 'https://www.exploreveg.org/files/2015/05/sofritas-burrito.jpeg' + } + + init() + + for k, v in images.items(): + print('{} => {}'.format(k, v)) + + info('Taco Test') + taco = json.dumps({ 'image': images['tacos'] }) + print(taco) + run(taco) + + info('Burrito Test') + burrito = json.dumps({ 'image': images['burrito'] }) + print(burrito) + run(burrito) \ No newline at end of file diff --git a/pipelines/azurepipeline/code/kflow/Dockerfile b/pipelines/azurepipeline/code/kflow/Dockerfile new file mode 100644 index 00000000..1a72be6c --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/Dockerfile @@ -0,0 +1,9 @@ +FROM tensorflow/tensorflow:2.0.0a0-gpu-py3 +RUN pip install azure-cli +RUN az extension add -n azure-cli-ml +RUN pip install --upgrade pip +COPY profile.sh /scripts/profile.sh +COPY inferenceconfig.json /scripts/inferenceconfig.json +COPY score.py /scripts/score.py +COPY environment.yml /scripts/environment.yml +ENTRYPOINT bash \ No newline at end of file diff --git a/pipelines/azurepipeline/code/kflow/app.yaml b/pipelines/azurepipeline/code/kflow/app.yaml new file mode 100644 index 00000000..2f54e935 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/app.yaml @@ -0,0 +1,45 @@ +apiVersion: kfdef.apps.kubeflow.org/v1alpha1 +kind: KfDef +metadata: + creationTimestamp: null + name: kflow + namespace: kubeflow +spec: + appdir: /home/rebec/kubeflow-and-mlops/code/kflow + componentParams: + ambassador: + - name: ambassadorServiceType + value: NodePort + components: + - ambassador + - argo + - centraldashboard + - jupyter-web-app + - katib + - metacontroller + - notebook-controller + - pipeline + - pytorch-operator + - tensorboard + - tf-job-operator + packages: + - argo + - common + - examples + - gcp + - jupyter + - katib + - metacontroller + - modeldb + - mpi-job + - pipeline + - pytorch-job + - seldon + - tensorboard + - tf-serving + - tf-training + repo: /home/rebec/kubeflow-and-mlops/code/kflow/.cache/v0.5.1/kubeflow + useBasicAuth: false + useIstio: false + version: v0.5.1 +status: {} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/.gitignore b/pipelines/azurepipeline/code/kflow/ks_app/.gitignore new file mode 100644 index 00000000..f8714d3a --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/.gitignore @@ -0,0 +1,4 @@ +/lib +/.ksonnet/registries +/app.override.yaml +/.ks_environment diff --git a/pipelines/azurepipeline/code/kflow/ks_app/app.yaml b/pipelines/azurepipeline/code/kflow/ks_app/app.yaml new file mode 100644 index 00000000..1c78472c --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/app.yaml @@ -0,0 +1,76 @@ +apiVersion: 0.3.0 +environments: + default: + destination: + namespace: kubeflow + server: https://taco-cls-taco-rg-1308a1-e98d0802.hcp.eastus.azmk8s.io:443 + k8sVersion: v1.14.0 + path: default +kind: ksonnet.io/app +libraries: + kubeflow/argo: + name: argo + registry: kubeflow + version: "" + kubeflow/common: + name: common + registry: kubeflow + version: "" + kubeflow/examples: + name: examples + registry: kubeflow + version: "" + kubeflow/gcp: + name: gcp + registry: kubeflow + version: "" + kubeflow/jupyter: + name: jupyter + registry: kubeflow + version: "" + kubeflow/katib: + name: katib + registry: kubeflow + version: "" + kubeflow/metacontroller: + name: metacontroller + registry: kubeflow + version: "" + kubeflow/modeldb: + name: modeldb + registry: kubeflow + version: "" + kubeflow/mpi-job: + name: mpi-job + registry: kubeflow + version: "" + kubeflow/pipeline: + name: pipeline + registry: kubeflow + version: "" + kubeflow/pytorch-job: + name: pytorch-job + registry: kubeflow + version: "" + kubeflow/seldon: + name: seldon + registry: kubeflow + version: "" + kubeflow/tensorboard: + name: tensorboard + registry: kubeflow + version: "" + kubeflow/tf-serving: + name: tf-serving + registry: kubeflow + version: "" + kubeflow/tf-training: + name: tf-training + registry: kubeflow + version: "" +name: ks_app +registries: + kubeflow: + protocol: fs + uri: /home/rebec/kubeflow-and-mlops/code/kflow/.cache/v0.5.1/kubeflow +version: 0.0.1 diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/ambassador.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/ambassador.jsonnet new file mode 100644 index 00000000..6ecf2319 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/ambassador.jsonnet @@ -0,0 +1,6 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.ambassador; + +local ambassador = import "kubeflow/common/ambassador.libsonnet"; +local instance = ambassador.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/argo.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/argo.jsonnet new file mode 100644 index 00000000..b50acac8 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/argo.jsonnet @@ -0,0 +1,6 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.argo; + +local argo = import "kubeflow/argo/argo.libsonnet"; +local instance = argo.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/centraldashboard.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/centraldashboard.jsonnet new file mode 100644 index 00000000..38af2f84 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/centraldashboard.jsonnet @@ -0,0 +1,6 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.centraldashboard; + +local centraldashboard = import "kubeflow/common/centraldashboard.libsonnet"; +local instance = centraldashboard.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/jupyter-web-app.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/jupyter-web-app.jsonnet new file mode 100644 index 00000000..b51e0211 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/jupyter-web-app.jsonnet @@ -0,0 +1,7 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components["jupyter-web-app"]; + +local jupyter_ui = import "kubeflow/jupyter/jupyter-web-app.libsonnet"; + +local instance = jupyter_ui.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/katib.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/katib.jsonnet new file mode 100644 index 00000000..2fb28288 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/katib.jsonnet @@ -0,0 +1,16 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.katib; + +local k = import "k.libsonnet"; + +local studyjobcontroller = import "kubeflow/katib/studyjobcontroller.libsonnet"; +local suggestion = import "kubeflow/katib/suggestion.libsonnet"; +local vizier = import "kubeflow/katib/vizier.libsonnet"; + +local namespace = env.namespace; + +std.prune( + k.core.v1.list.new(vizier.all(params, namespace)) + + k.core.v1.list.new(suggestion.all(params, namespace)) + + k.core.v1.list.new(studyjobcontroller.all(params, namespace)) +) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/metacontroller.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/metacontroller.jsonnet new file mode 100644 index 00000000..221350c7 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/metacontroller.jsonnet @@ -0,0 +1,6 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.metacontroller; + +local metacontroller = import "kubeflow/metacontroller/metacontroller.libsonnet"; +local instance = metacontroller.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/notebook-controller.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/notebook-controller.jsonnet new file mode 100644 index 00000000..d19bf1ce --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/notebook-controller.jsonnet @@ -0,0 +1,6 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components["notebook-controller"]; + +local notebooks = import "kubeflow/jupyter/notebook_controller.libsonnet"; +local instance = notebooks.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/params.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/params.libsonnet new file mode 100644 index 00000000..babbb614 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/params.libsonnet @@ -0,0 +1,93 @@ +{ + global: {}, + components: { + // Component-level parameters, defined initially from 'ks prototype use ...' + // Each object below should correspond to a component in the components/ directory + ambassador: { + ambassadorImage: 'quay.io/datawire/ambassador:0.37.0', + ambassadorNodePort: 0, + ambassadorServiceType: 'NodePort', + name: 'ambassador', + platform: 'none', + replicas: 3, + }, + argo: { + artifactRepositoryAccessKeySecretKey: 'accesskey', + artifactRepositoryAccessKeySecretName: 'mlpipeline-minio-artifact', + artifactRepositoryBucket: 'mlpipeline', + artifactRepositoryEndpoint: 'minio-service.kubeflow:9000', + artifactRepositoryInsecure: 'true', + artifactRepositoryKeyPrefix: 'artifacts', + artifactRepositorySecretKeySecretKey: 'secretkey', + artifactRepositorySecretKeySecretName: 'mlpipeline-minio-artifact', + executorImage: 'argoproj/argoexec:v2.2.0', + name: 'argo', + uiImage: 'argoproj/argoui:v2.2.0', + workflowControllerImage: 'argoproj/workflow-controller:v2.2.0', + }, + centraldashboard: { + image: 'gcr.io/kubeflow-images-public/centraldashboard:v0.5.0', + name: 'centraldashboard', + }, + "jupyter-web-app": { + image: 'gcr.io/kubeflow-images-public/jupyter-web-app:v0.5.0', + name: 'jupyter-web-app', + policy: 'Always', + port: '80', + prefix: 'jupyter', + rokSecretName: 'secret-rok-{username}', + ui: 'default', + }, + katib: { + katibUIImage: 'gcr.io/kubeflow-images-public/katib/katib-ui:v0.1.2-alpha-156-g4ab3dbd', + metricsCollectorImage: 'gcr.io/kubeflow-images-public/katib/metrics-collector:v0.1.2-alpha-156-g4ab3dbd', + name: 'katib', + studyJobControllerImage: 'gcr.io/kubeflow-images-public/katib/studyjob-controller:v0.1.2-alpha-156-g4ab3dbd', + suggestionBayesianOptimizationImage: 'gcr.io/kubeflow-images-public/katib/suggestion-bayesianoptimization:v0.1.2-alpha-156-g4ab3dbd', + suggestionGridImage: 'gcr.io/kubeflow-images-public/katib/suggestion-grid:v0.1.2-alpha-156-g4ab3dbd', + suggestionHyperbandImage: 'gcr.io/kubeflow-images-public/katib/suggestion-hyperband:v0.1.2-alpha-156-g4ab3dbd', + suggestionRandomImage: 'gcr.io/kubeflow-images-public/katib/suggestion-random:v0.1.2-alpha-156-g4ab3dbd', + vizierCoreImage: 'gcr.io/kubeflow-images-public/katib/vizier-core:v0.1.2-alpha-156-g4ab3dbd', + vizierCoreRestImage: 'gcr.io/kubeflow-images-public/katib/vizier-core-rest:v0.1.2-alpha-156-g4ab3dbd', + vizierDbImage: 'mysql:8.0.3', + }, + metacontroller: { + image: 'metacontroller/metacontroller:v0.3.0', + name: 'metacontroller', + }, + "notebook-controller": { + controllerImage: 'gcr.io/kubeflow-images-public/notebook-controller:v20190401-v0.4.0-rc.1-308-g33618cc9-e3b0c4', + injectGcpCredentials: 'true', + name: 'notebook-controller', + }, + pipeline: { + name: 'pipeline', + }, + "pytorch-operator": { + cloud: 'null', + deploymentNamespace: 'null', + deploymentScope: 'cluster', + disks: 'null', + name: 'pytorch-operator', + pytorchDefaultImage: 'null', + pytorchJobImage: 'gcr.io/kubeflow-images-public/pytorch-operator:v0.5.0', + }, + tensorboard: { + defaultTbImage: 'tensorflow/tensorflow:1.8.0', + logDir: 'logs', + name: 'tensorboard', + servicePort: 9000, + serviceType: 'ClusterIP', + targetPort: 6006, + }, + "tf-job-operator": { + cloud: 'null', + deploymentNamespace: 'null', + deploymentScope: 'cluster', + name: 'tf-job-operator', + tfDefaultImage: 'null', + tfJobImage: 'gcr.io/kubeflow-images-public/tf_operator:v0.5.0', + tfJobUiServiceType: 'ClusterIP', + }, + }, +} \ No newline at end of file diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/pipeline.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/pipeline.jsonnet new file mode 100644 index 00000000..4893ecae --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/pipeline.jsonnet @@ -0,0 +1,14 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.pipeline; + +local k = import "k.libsonnet"; +local pipelineBase = import "kubeflow/pipeline/pipeline.libsonnet"; + +// updatedParams includes the namespace from env by default. +local updatedParams = params + env; + +local pipeline = pipelineBase { + params+: updatedParams, +}; + +std.prune(k.core.v1.list.new(pipeline.parts.all)) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/pytorch-operator.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/pytorch-operator.jsonnet new file mode 100644 index 00000000..a3b4efc8 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/pytorch-operator.jsonnet @@ -0,0 +1,7 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components["pytorch-operator"]; + +local k = import "k.libsonnet"; +local operator = import "kubeflow/pytorch-job/pytorch-operator.libsonnet"; + +k.core.v1.list.new(operator.all(params, env)) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/tensorboard.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/tensorboard.jsonnet new file mode 100644 index 00000000..2962aabc --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/tensorboard.jsonnet @@ -0,0 +1,6 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.tensorboard; + +local tensorboard = import "kubeflow/tensorboard/tensorboard.libsonnet"; +local instance = tensorboard.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/components/tf-job-operator.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/components/tf-job-operator.jsonnet new file mode 100644 index 00000000..726675a1 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/components/tf-job-operator.jsonnet @@ -0,0 +1,6 @@ +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components["tf-job-operator"]; + +local tfJobOperator = import "kubeflow/tf-training/tf-job-operator.libsonnet"; +local instance = tfJobOperator.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/environments/base.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/environments/base.libsonnet new file mode 100644 index 00000000..a129affb --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/environments/base.libsonnet @@ -0,0 +1,4 @@ +local components = std.extVar("__ksonnet/components"); +components + { + // Insert user-specified overrides here. +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/environments/default/globals.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/environments/default/globals.libsonnet new file mode 100644 index 00000000..7a73a41b --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/environments/default/globals.libsonnet @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/pipelines/azurepipeline/code/kflow/ks_app/environments/default/main.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/environments/default/main.jsonnet new file mode 100644 index 00000000..1d4f6425 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/environments/default/main.jsonnet @@ -0,0 +1,9 @@ +local base = import "base.libsonnet"; +// uncomment if you reference ksonnet-lib +// local k = import "k.libsonnet"; +// local deployment = k.apps.v1beta2.deployment; + +base + { + // Insert user-specified overrides here. For example if a component is named \"nginx-deployment\", you might have something like:\n") + // "nginx-deployment"+: deployment.mixin.metadata.withLabels({foo: "bar"}) +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/environments/default/params.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/environments/default/params.libsonnet new file mode 100644 index 00000000..b6eb32db --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/environments/default/params.libsonnet @@ -0,0 +1,17 @@ +local params = std.extVar("__ksonnet/params"); +local globals = import "globals.libsonnet"; +local envParams = params + { + components +: { + // Insert component parameter overrides here. Ex: + // guestbook +: { + // name: "guestbook-dev", + // replicas: params.global.replicas, + // }, + }, +}; + +{ + components: { + [x]: envParams.components[x] + globals, for x in std.objectFields(envParams.components) + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/OWNERS b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/OWNERS new file mode 100644 index 00000000..79b074bb --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/OWNERS @@ -0,0 +1,3 @@ +approvers: + - IronPan +reviewers: diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/README.md b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/README.md new file mode 100644 index 00000000..7bdba895 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/README.md @@ -0,0 +1,35 @@ +# Argo + +> Prototypes for deploying Argo and running Argo Workflows + + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [Quickstart](#quickstart) + + + +## Quickstart + +*The following commands use the `io.ksonnet.pkg.argo` prototype to deploy the Argo Workflow operator on your Kubernetes cluster* + +First, create a cluster and install the ksonnet CLI (see root-level [README.md](../../README.md)). + +If you haven't yet created a [ksonnet application](https://ksonnet.io/docs/tutorial#1-initialize-your-app), do so using `ks init `. + +Finally, in the ksonnet application directory, run the following: + +```shell +# Install the kubeflow argo package +$ ks pkg install kubeflow/argo + +# Expand prototype as a Jsonnet file, place in a file in the +# `components/` directory. (YAML and JSON are also available.) +$ ks prototype use io.ksonnet.pkg.argo argo \ + --namespace default \ + --name argo + +# Apply to server. +$ ks apply default -c argo +``` diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/argo.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/argo.libsonnet new file mode 100644 index 00000000..17c56cc8 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/argo.libsonnet @@ -0,0 +1,485 @@ +{ + // TODO(jlewi): Do we need to add parts corresponding to a service account and cluster binding role? + // see https://github.com/argoproj/argo/blob/master/cmd/argo/commands/install.go + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + // CRD's are not namespace scoped; see + // https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ + local workflowCRD = { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "workflows.argoproj.io", + }, + spec: { + group: "argoproj.io", + names: { + kind: "Workflow", + listKind: "WorkflowList", + plural: "workflows", + shortNames: [ + "wf", + ], + singular: "workflow", + }, + scope: "Namespaced", + version: "v1alpha1", + }, + }, // crd + workflowCRD:: workflowCRD, + + // Deploy the controller + local workflowController = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "workflow-controller", + }, + name: "workflow-controller", + namespace: params.namespace, + }, + spec: { + progressDeadlineSeconds: 600, + replicas: 1, + revisionHistoryLimit: 10, + selector: { + matchLabels: { + app: "workflow-controller", + }, + }, + strategy: { + rollingUpdate: { + maxSurge: "25%", + maxUnavailable: "25%", + }, + type: "RollingUpdate", + }, + template: { + metadata: { + creationTimestamp: null, + labels: { + app: "workflow-controller", + }, + }, + spec: { + containers: [ + { + args: [ + "--configmap", + "workflow-controller-configmap", + ], + command: [ + "workflow-controller", + ], + env: [ + { + name: "ARGO_NAMESPACE", + valueFrom: { + fieldRef: { + apiVersion: "v1", + fieldPath: "metadata.namespace", + }, + }, + }, + ], + image: params.workflowControllerImage, + imagePullPolicy: "IfNotPresent", + name: "workflow-controller", + resources: {}, + terminationMessagePath: "/dev/termination-log", + terminationMessagePolicy: "File", + }, + ], + dnsPolicy: "ClusterFirst", + restartPolicy: "Always", + schedulerName: "default-scheduler", + securityContext: {}, + serviceAccount: "argo", + serviceAccountName: "argo", + terminationGracePeriodSeconds: 30, + }, + }, + }, + }, // deploy + workflowController:: workflowController, + + local argoUI = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "argo-ui", + }, + name: "argo-ui", + namespace: params.namespace, + }, + spec: { + progressDeadlineSeconds: 600, + replicas: 1, + revisionHistoryLimit: 10, + selector: { + matchLabels: { + app: "argo-ui", + }, + }, + strategy: { + rollingUpdate: { + maxSurge: "25%", + maxUnavailable: "25%", + }, + type: "RollingUpdate", + }, + template: { + metadata: { + creationTimestamp: null, + labels: { + app: "argo-ui", + }, + }, + spec: { + containers: [ + { + env: [ + { + name: "ARGO_NAMESPACE", + valueFrom: { + fieldRef: { + apiVersion: "v1", + fieldPath: "metadata.namespace", + }, + }, + }, + { + name: "IN_CLUSTER", + value: "true", + }, + { + name: "BASE_HREF", + value: "/argo/", + }, + ], + image: params.uiImage, + imagePullPolicy: "IfNotPresent", + name: "argo-ui", + resources: {}, + terminationMessagePath: "/dev/termination-log", + terminationMessagePolicy: "File", + readinessProbe: { + httpGet: { + path: "/", + port: 8001, + }, + }, + }, + ], + dnsPolicy: "ClusterFirst", + restartPolicy: "Always", + schedulerName: "default-scheduler", + securityContext: {}, + serviceAccount: "argo-ui", + serviceAccountName: "argo-ui", + terminationGracePeriodSeconds: 30, + }, + }, + }, + }, // deployUi + argoUI:: argoUI, + + local argUIService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "argo-ui", + }, + name: "argo-ui", + namespace: params.namespace, + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: argo-ui-mapping", + "prefix: /argo/", + "service: argo-ui." + params.namespace, + ]), + }, //annotations + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8001, + }, + ], + selector: { + app: "argo-ui", + }, + sessionAffinity: "None", + type: "NodePort", + }, + }, + argUIService:: argUIService, + + local workflowControllerConfigmap = { + apiVersion: "v1", + data: { + config: std.format(||| + { + executorImage: %s, + artifactRepository: + { + s3: { + bucket: %s, + keyPrefix: %s, + endpoint: %s, + insecure: %s, + accessKeySecret: { + name: %s, + key: %s + }, + secretKeySecret: { + name: %s, + key: %s + } + } + } + } + |||, + [ + params.executorImage, + params.artifactRepositoryBucket, + params.artifactRepositoryKeyPrefix, + params.artifactRepositoryEndpoint, + params.artifactRepositoryInsecure, + params.artifactRepositoryAccessKeySecretName, + params.artifactRepositoryAccessKeySecretKey, + params.artifactRepositorySecretKeySecretName, + params.artifactRepositorySecretKeySecretKey, + ]), + }, + kind: "ConfigMap", + metadata: { + name: "workflow-controller-configmap", + namespace: params.namespace, + }, + }, + workflowControllerConfigmap:: workflowControllerConfigmap, + + local argoServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "argo", + namespace: params.namespace, + }, + }, // service account + argoServiceAccount:: argoServiceAccount, + + // Keep in sync with https://github.com/argoproj/argo/blob/master/cmd/argo/commands/const.go#L20 + // Permissions need to be cluster wide for the workflow controller to be able to process workflows + // in other namespaces. We could potentially use the ConfigMap of the workflow-controller to + // scope it to a particular namespace in which case we might be able to restrict the permissions + // to a particular namespace. + local argoClusterRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "argo", + }, + name: "argo", + }, + rules: [ + { + apiGroups: [""], + resources: [ + "pods", + "pods/exec", + ], + verbs: [ + "create", + "get", + "list", + "watch", + "update", + "patch", + ], + }, + { + apiGroups: [""], + resources: [ + "configmaps", + ], + verbs: [ + "get", + "watch", + "list", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "persistentvolumeclaims", + ], + verbs: [ + "create", + "delete", + ], + }, + { + apiGroups: [ + "argoproj.io", + ], + resources: [ + "workflows", + ], + verbs: [ + "get", + "list", + "watch", + "update", + "patch", + ], + }, + ], + }, // operator-role + argoClusterRole:: argoClusterRole, + + local argoClusterRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "argo", + }, + name: "argo", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "argo", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "argo", + namespace: params.namespace, + }, + ], + }, // role binding + argoClusterRoleBinding:: argoClusterRoleBinding, + + local argoUIServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "argo-ui", + namespace: params.namespace, + }, + }, // service account + argoUIServiceAccount:: argoUIServiceAccount, + + // Keep in sync with https://github.com/argoproj/argo/blob/master/cmd/argo/commands/const.go#L44 + // Permissions need to be cluster wide for the workflow controller to be able to process workflows + // in other namespaces. We could potentially use the ConfigMap of the workflow-controller to + // scope it to a particular namespace in which case we might be able to restrict the permissions + // to a particular namespace. + local argoUIRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "argo", + }, + name: "argo-ui", + }, + rules: [ + { + apiGroups: [""], + resources: [ + "pods", + "pods/exec", + "pods/log", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [""], + resources: [ + "secrets", + ], + verbs: [ + "get", + ], + }, + { + apiGroups: [ + "argoproj.io", + ], + resources: [ + "workflows", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + ], + }, // operator-role + argoUIRole:: argoUIRole, + + local argUIClusterRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "argo-ui", + }, + name: "argo-ui", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "argo-ui", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "argo-ui", + namespace: params.namespace, + }, + ], + }, // role binding + argUIClusterRoleBinding:: argUIClusterRoleBinding, + + parts: self, + all:: [ + self.workflowCRD, + self.workflowController, + self.argoUI, + self.argUIService, + self.workflowControllerConfigmap, + self.argoServiceAccount, + self.argoClusterRole, + self.argoClusterRoleBinding, + self.argoUIServiceAccount, + self.argoUIRole, + self.argUIClusterRoleBinding, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/parts.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/parts.yaml new file mode 100644 index 00000000..3002fce2 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/parts.yaml @@ -0,0 +1,35 @@ +{ + "name": "argo", + "apiVersion": "0.0.1", + "kind": "ksonnet.io/parts", + "description": "Prototypes for running Argo workflows.\n", + "author": "kubeflow team ", + "contributors": [ + { + "name": "Jeremy Lewi", + "email": "jlewi@google.com" + } + ], + "repository": { + "type": "git", + "url": "https://github.com/kubeflow/kubeflow" + }, + "bugs": { + "url": "https://github.com/kubeflow/kubeflow/issues" + }, + "keywords": [ + "kubeflow", + "argo", + "workflows" + ], + "quickStart": { + "prototype": "io.ksonnet.pkg.argo", + "componentName": "argo", + "flags": { + "name": "argo", + "namespace": "", + }, + "comment": "Deploy Argo" + }, + "license": "Apache 2.0" +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/prototypes/argo.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/prototypes/argo.jsonnet new file mode 100644 index 00000000..eea2d10d --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/prototypes/argo.jsonnet @@ -0,0 +1,20 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.argo +// @description Deploy Argo workflow engine +// @shortDescription Argo workflow engine +// @param name string Name to give to the component +// @optionalParam workflowControllerImage string argoproj/workflow-controller:v2.2.0 workflowControllerImage +// @optionalParam uiImage string argoproj/argoui:v2.2.0 uiImage +// @optionalParam executorImage string argoproj/argoexec:v2.2.0 executorImage +// @optionalParam artifactRepositoryKeyPrefix string artifacts artifactRepositoryKeyPrefix +// @optionalParam artifactRepositoryEndpoint string minio-service.kubeflow:9000 artifactRepositoryEndpoint +// @optionalParam artifactRepositoryBucket string mlpipeline artifactRepositoryBucket +// @optionalParam artifactRepositoryInsecure string true artifactRepositoryInsecure +// @optionalParam artifactRepositoryAccessKeySecretName string mlpipeline-minio-artifact artifactRepositoryAccessKeySecretName +// @optionalParam artifactRepositoryAccessKeySecretKey string accesskey artifactRepositoryAccessKeySecretKey +// @optionalParam artifactRepositorySecretKeySecretName string mlpipeline-minio-artifact artifactRepositorySecretKeySecretName +// @optionalParam artifactRepositorySecretKeySecretKey string secretkey artifactRepositorySecretKeySecretKey + +local argo = import "kubeflow/argo/argo.libsonnet"; +local instance = argo.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/tests/argo_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/tests/argo_test.jsonnet new file mode 100644 index 00000000..1c6d581b --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/argo/tests/argo_test.jsonnet @@ -0,0 +1,452 @@ +local argo = import "kubeflow/argo/argo.libsonnet"; +local testSuite = import "kubeflow/common/testsuite.libsonnet"; + +local params = { + name: "argo", + workflowControllerImage: "argoproj/workflow-controller:v2.2.0", + uiImage: "argoproj/argoui:v2.2.0", + executorImage: "argoproj/argoexec:v2.2.0", + artifactRepositoryKeyPrefix: "artifacts", + artifactRepositoryEndpoint: "minio-service.kubeflow:9000", + artifactRepositoryBucket: "mlpipeline", + artifactRepositoryInsecure: "true", + artifactRepositoryAccessKeySecretName: "mlpipeline-minio-artifact", + artifactRepositoryAccessKeySecretKey: "accesskey", + artifactRepositorySecretKeySecretName: "mlpipeline-minio-artifact", + artifactRepositorySecretKeySecretKey: "secretkey", +}; +local env = { + namespace: "kubeflow", +}; + +local instance = argo.new(env, params); + +local testCases = [ + { + actual: instance.parts.workflowCRD, + expected: { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "workflows.argoproj.io", + }, + spec: { + group: "argoproj.io", + names: { + kind: "Workflow", + listKind: "WorkflowList", + plural: "workflows", + shortNames: [ + "wf", + ], + singular: "workflow", + }, + scope: "Namespaced", + version: "v1alpha1", + }, + }, + }, + { + actual: instance.parts.workflowController, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "workflow-controller", + }, + name: "workflow-controller", + namespace: "kubeflow", + }, + spec: { + progressDeadlineSeconds: 600, + replicas: 1, + revisionHistoryLimit: 10, + selector: { + matchLabels: { + app: "workflow-controller", + }, + }, + strategy: { + rollingUpdate: { + maxSurge: "25%", + maxUnavailable: "25%", + }, + type: "RollingUpdate", + }, + template: { + metadata: { + creationTimestamp: null, + labels: { + app: "workflow-controller", + }, + }, + spec: { + containers: [ + { + args: [ + "--configmap", + "workflow-controller-configmap", + ], + command: [ + "workflow-controller", + ], + env: [ + { + name: "ARGO_NAMESPACE", + valueFrom: { + fieldRef: { + apiVersion: "v1", + fieldPath: "metadata.namespace", + }, + }, + }, + ], + image: "argoproj/workflow-controller:v2.2.0", + imagePullPolicy: "IfNotPresent", + name: "workflow-controller", + resources: {}, + terminationMessagePath: "/dev/termination-log", + terminationMessagePolicy: "File", + }, + ], + dnsPolicy: "ClusterFirst", + restartPolicy: "Always", + schedulerName: "default-scheduler", + securityContext: {}, + serviceAccount: "argo", + serviceAccountName: "argo", + terminationGracePeriodSeconds: 30, + }, + }, + }, + }, + }, + { + actual: instance.parts.argoUI, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "argo-ui", + }, + name: "argo-ui", + namespace: "kubeflow", + }, + spec: { + progressDeadlineSeconds: 600, + replicas: 1, + revisionHistoryLimit: 10, + selector: { + matchLabels: { + app: "argo-ui", + }, + }, + strategy: { + rollingUpdate: { + maxSurge: "25%", + maxUnavailable: "25%", + }, + type: "RollingUpdate", + }, + template: { + metadata: { + creationTimestamp: null, + labels: { + app: "argo-ui", + }, + }, + spec: { + containers: [ + { + env: [ + { + name: "ARGO_NAMESPACE", + valueFrom: { + fieldRef: { + apiVersion: "v1", + fieldPath: "metadata.namespace", + }, + }, + }, + { + name: "IN_CLUSTER", + value: "true", + }, + { + name: "BASE_HREF", + value: "/argo/", + }, + ], + image: "argoproj/argoui:v2.2.0", + imagePullPolicy: "IfNotPresent", + name: "argo-ui", + readinessProbe: { + httpGet: { + path: "/", + port: 8001, + }, + }, + resources: {}, + terminationMessagePath: "/dev/termination-log", + terminationMessagePolicy: "File", + }, + ], + dnsPolicy: "ClusterFirst", + restartPolicy: "Always", + schedulerName: "default-scheduler", + securityContext: {}, + serviceAccount: "argo-ui", + serviceAccountName: "argo-ui", + terminationGracePeriodSeconds: 30, + }, + }, + }, + }, + }, + { + actual: instance.parts.argUIService, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: argo-ui-mapping\nprefix: /argo/\nservice: argo-ui.kubeflow", + }, + labels: { + app: "argo-ui", + }, + name: "argo-ui", + namespace: "kubeflow", + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8001, + }, + ], + selector: { + app: "argo-ui", + }, + sessionAffinity: "None", + type: "NodePort", + }, + }, + }, + { + actual: instance.parts.workflowControllerConfigmap, + expected: { + apiVersion: "v1", + data: { + config: "{\nexecutorImage: argoproj/argoexec:v2.2.0,\nartifactRepository:\n{\n s3: {\n bucket: mlpipeline,\n keyPrefix: artifacts,\n endpoint: minio-service.kubeflow:9000,\n insecure: true,\n accessKeySecret: {\n name: mlpipeline-minio-artifact,\n key: accesskey\n },\n secretKeySecret: {\n name: mlpipeline-minio-artifact,\n key: secretkey\n }\n }\n}\n}\n", + }, + kind: "ConfigMap", + metadata: { + name: "workflow-controller-configmap", + namespace: "kubeflow", + }, + }, + }, + { + actual: instance.parts.argoServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "argo", + namespace: "kubeflow", + }, + }, + }, + { + actual: instance.parts.argoClusterRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "argo", + }, + name: "argo", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "pods", + "pods/exec", + ], + verbs: [ + "create", + "get", + "list", + "watch", + "update", + "patch", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "configmaps", + ], + verbs: [ + "get", + "watch", + "list", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "persistentvolumeclaims", + ], + verbs: [ + "create", + "delete", + ], + }, + { + apiGroups: [ + "argoproj.io", + ], + resources: [ + "workflows", + ], + verbs: [ + "get", + "list", + "watch", + "update", + "patch", + ], + }, + ], + }, + }, + { + actual: instance.parts.argoClusterRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "argo", + }, + name: "argo", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "argo", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "argo", + namespace: "kubeflow", + }, + ], + }, + }, + { + actual: instance.parts.argoUIServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "argo-ui", + namespace: "kubeflow", + }, + }, + }, + { + actual: instance.parts.argoUIRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "argo", + }, + name: "argo-ui", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "pods", + "pods/exec", + "pods/log", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "secrets", + ], + verbs: [ + "get", + ], + }, + { + apiGroups: [ + "argoproj.io", + ], + resources: [ + "workflows", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + ], + }, + }, + { + actual: instance.parts.argUIClusterRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "argo-ui", + }, + name: "argo-ui", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "argo-ui", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "argo-ui", + namespace: "kubeflow", + }, + ], + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/OWNERS b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/OWNERS new file mode 100644 index 00000000..3657653d --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/OWNERS @@ -0,0 +1,3 @@ +approvers: + - gaocegege +reviewers: diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/README.md b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/README.md new file mode 100644 index 00000000..4347af07 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/README.md @@ -0,0 +1,11 @@ +# common + + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [common](#common) + + + +This ksonnet package contains kubeflow common prototypes such as ambassador, spartakus, etc. You can install this using `ks pkg install kubeflow/common`. `ks prototype list` should list the available prototypes. `ks prototype describe ` should describe the prototype. diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/ambassador.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/ambassador.libsonnet new file mode 100644 index 00000000..e85ce1b3 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/ambassador.libsonnet @@ -0,0 +1,226 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local ambassadorService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + service: "ambassador", + }, + name: "ambassador", + namespace: params.namespace, + }, + spec: { + ports: [ + { + name: "ambassador", + port: 80, + targetPort: 80, + [if (params.ambassadorServiceType == 'NodePort') && + (params.ambassadorNodePort >= 30000) && + (params.ambassadorNodePort <= 32767) + then 'nodePort']: params.ambassadorNodePort, + }, + ], + selector: { + service: "ambassador", + }, + type: params.ambassadorServiceType, + }, + }, // service + ambassadorService:: ambassadorService, + + local adminService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + service: "ambassador-admin", + }, + name: "ambassador-admin", + namespace: params.namespace, + }, + spec: { + ports: [ + { + name: "ambassador-admin", + port: 8877, + targetPort: 8877, + }, + ], + selector: { + service: "ambassador", + }, + type: "ClusterIP", + }, + }, // adminService + adminService:: adminService, + + local ambassadorRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + name: "ambassador", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "services", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "configmaps", + ], + verbs: [ + "create", + "update", + "patch", + "get", + "list", + "watch", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "secrets", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + ], + }, // role + ambassadorRole:: ambassadorRole, + + local ambassadorServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "ambassador", + namespace: params.namespace, + }, + }, // serviceAccount + ambassadorServiceAccount:: ambassadorServiceAccount, + + local ambassadorRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + name: "ambassador", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "ambassador", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "ambassador", + namespace: params.namespace, + }, + ], + }, // roleBinding + ambassadorRoleBinding:: ambassadorRoleBinding, + + local ambassadorDeployment = { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "ambassador", + namespace: params.namespace, + }, + spec: { + replicas: params.replicas, + template: { + metadata: { + labels: { + service: "ambassador", + }, + namespace: params.namespace, + }, + spec: { + containers: [ + { + env: [ + { + name: "AMBASSADOR_NAMESPACE", + valueFrom: { + fieldRef: { + fieldPath: "metadata.namespace", + }, + }, + }, + ], + image: params.ambassadorImage, + name: "ambassador", + resources: { + limits: { + cpu: 1, + memory: "400Mi", + }, + requests: { + cpu: "200m", + memory: "100Mi", + }, + }, + readinessProbe: { + httpGet: { + path: "/ambassador/v0/check_ready", + port: 8877, + }, + initialDelaySeconds: 30, + periodSeconds: 30, + }, + livenessProbe: { + httpGet: { + path: "/ambassador/v0/check_alive", + port: 8877, + }, + initialDelaySeconds: 30, + periodSeconds: 30, + }, + }, + ], + restartPolicy: "Always", + serviceAccountName: "ambassador", + }, + }, + }, + }, // deploy + ambassadorDeployment:: ambassadorDeployment, + + parts:: self, + all:: [ + self.ambassadorService, + self.adminService, + self.ambassadorRole, + self.ambassadorServiceAccount, + self.ambassadorRoleBinding, + self.ambassadorDeployment, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/basic-auth.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/basic-auth.libsonnet new file mode 100644 index 00000000..f70fc5aa --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/basic-auth.libsonnet @@ -0,0 +1,197 @@ +{ + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local ui_name = params.name + "-login", + + local authService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: params.name, + }, + name: params.name, + namespace: params.namespace, + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: AuthService", + "name: " + params.name, + "auth_service: " + params.name + "." + params.namespace + ":8085", + 'allowed_headers:\n- "x-from-login"', + ]), + }, //annotations + }, + spec: { + ports: [ + { + port: 8085, + targetPort: 8085, + }, + ], + selector: { + app: params.name, + }, + type: "ClusterIP", + }, + }, + authService:: authService, + + local authDeployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: params.name, + namespace: params.namespace, + + }, + spec: { + // replicas here should always be 1: + // we store auth cookies in memory and we don't support share them among pods. + replicas: 1, + strategy: { + type: "RollingUpdate", + }, + template: { + metadata: { + labels: { + app: params.name, + }, + }, + spec: { + containers: [ + { + image: params.image, + name: "app", + workingDir: "/opt/kubeflow", + env: [ + { + name: "USERNAME", + valueFrom: { + secretKeyRef: { + name: params.authSecretName, + key: "username", + }, + }, + }, + { + name: "PASSWORDHASH", + valueFrom: { + secretKeyRef: { + name: params.authSecretName, + key: "passwordhash", + }, + }, + }, + ], + command: [ + "/opt/kubeflow/gatekeeper", + ], + args: [ + "--username=$(USERNAME)", + "--pwhash=$(PASSWORDHASH)", + ], + ports: [ + { + containerPort: 8085, + }, + ], + }, + ], + }, + }, + }, + }, + authDeployment:: authDeployment, + + local loginService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: ui_name, + }, + name: ui_name, + namespace: params.namespace, + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: kflogin-mapping", + "prefix: /kflogin", + "rewrite: /kflogin", + "timeout_ms: 300000", + "service: " + ui_name + "." + params.namespace, + "use_websocket: true", + ]), + }, //annotations + }, + spec: { + ports: [ + { + port: 80, + targetPort: 5000, + }, + ], + selector: { + app: ui_name, + }, + type: "ClusterIP", + }, + }, + loginService:: loginService, + + local loginDeployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: ui_name, + namespace: params.namespace, + + }, + spec: { + replicas: 1, + strategy: { + type: "RollingUpdate", + }, + template: { + metadata: { + labels: { + app: ui_name, + }, + }, + spec: { + containers: [ + { + image: params.imageui, + name: "app", + ports: [ + { + containerPort: 5000, + }, + ], + }, + ], + }, + }, + }, + }, + loginDeployment:: loginDeployment, + + parts:: self, + all:: [ + self.authService, + self.authDeployment, + self.loginService, + self.loginDeployment, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/centraldashboard.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/centraldashboard.libsonnet new file mode 100644 index 00000000..1cec163d --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/centraldashboard.libsonnet @@ -0,0 +1,221 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local centralDashboardDeployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: params.namespace, + }, + spec: { + template: { + metadata: { + labels: { + app: "centraldashboard", + }, + }, + spec: { + containers: [ + { + image: params.image, + name: "centraldashboard", + ports: [ + { + containerPort: 8082, + }, + ], + }, + ], + serviceAccountName: "centraldashboard", + }, + }, + }, + }, // deployUi + centralDashboardDeployment:: centralDashboardDeployment, + + local centralDashboardService = { + // Due to https://github.com/ksonnet/ksonnet/issues/670, escaped characters in + // jsonnet files are not interpreted correctly by ksonnet, which causes runtime + // parsing failures. This is fixed in ksonnet 0.12.0, so we can merge this back + // to the jsonnet file when we take a dependency on ksonnet 0.12.0 or later. + local annotations = function(namespace) { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: centralui-mapping", + "prefix: /", + "rewrite: /", + "service: centraldashboard." + namespace, + ]), + }, + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: params.namespace, + annotations: annotations(params.namespace), + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8082, + }, + ], + selector: { + app: "centraldashboard", + }, + sessionAffinity: "None", + type: "ClusterIP", + }, + }, //service + centralDashboardService:: centralDashboardService, + + local centralDashboardServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "centraldashboard", + namespace: params.namespace, + }, + }, // service account + centralDashboardServiceAccount:: centralDashboardServiceAccount, + + local centralDashboardRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "Role", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: params.namespace, + }, + rules: [ + { + apiGroups: [""], + resources: [ + "pods", + "pods/exec", + "pods/log", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [""], + resources: [ + "secrets", + ], + verbs: [ + "get", + ], + }, + ], + }, // role + centralDashboardRole:: centralDashboardRole, + + local centralDashboardRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "RoleBinding", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: params.namespace, + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: "centraldashboard", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "centraldashboard", + namespace: params.namespace, + }, + ], + }, // role binding + centralDashboardRoleBinding:: centralDashboardRoleBinding, + + local centralDashboardClusterRole = { + apiVersion: "rbac.authorization.k8s.io/v1", + kind: "ClusterRole", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + }, + rules: [ + { + apiGroups: [""], + resources: [ + "namespaces", + "events" + ], + verbs: [ + "get", + "list", + "watch", + ], + } + ], + }, // clusterrole + centralDashboardClusterRole:: centralDashboardClusterRole, + + local centralDashboardClusterRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "centraldashboard", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "centraldashboard", + namespace: params.namespace, + }, + ], + }, // clusterrolebinding + centralDashboardClusterRoleBinding:: centralDashboardClusterRoleBinding, + + parts:: self, + all:: [ + self.centralDashboardDeployment, + self.centralDashboardService, + self.centralDashboardServiceAccount, + self.centralDashboardRole, + self.centralDashboardRoleBinding, + self.centralDashboardClusterRole, + self.centralDashboardClusterRoleBinding, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/echo-server.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/echo-server.libsonnet new file mode 100644 index 00000000..dddda499 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/echo-server.libsonnet @@ -0,0 +1,94 @@ +{ + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local service = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: params.name, + }, + name: params.name, + namespace: params.namespace, + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: " + params.name + "-mapping", + "prefix: /" + params.name, + "rewrite: /", + "service: " + params.name + "." + params.namespace, + ]), + }, //annotations + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8080, + }, + ], + selector: { + app: params.name, + }, + type: "ClusterIP", + }, + }, + service:: service, + + local deployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: params.name, + namespace: params.namespace, + + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: params.name, + }, + }, + spec: { + containers: [ + { + image: params.image, + name: "app", + ports: [ + { + containerPort: 8080, + }, + ], + + readinessProbe: { + httpGet: { + path: "/headers", + port: 8080, + }, + initialDelaySeconds: 5, + periodSeconds: 30, + }, + }, + ], + }, + }, + }, + }, + deployment:: deployment, + + parts:: self, + all:: [ + self.service, + self.deployment, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/parts.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/parts.yaml new file mode 100644 index 00000000..f423cabe --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/parts.yaml @@ -0,0 +1,35 @@ +{ + "name": "common", + "apiVersion": "0.0.1", + "kind": "ksonnet.io/parts", + "description": "Common components of Kubeflow.\n", + "author": "kubeflow team ", + "contributors": [ + { + "name": "Jeremy Lewi", + "email": "jlewi@google.com" + } + ], + "repository": { + "type": "git", + "url": "https://github.com/kubeflow/kubeflow" + }, + "bugs": { + "url": "https://github.com/kubeflow/kubeflow/issues" + }, + "keywords": [ + "kubeflow", + "tensorflow" + ], + "quickStart": { + "prototype": "io.ksonnet.pkg.kubeflow", + "componentName": "common", + "flags": { + "name": "common", + "namespace": "default", + "disks": "" + }, + "comment": "Common Kubeflow components." + }, + "license": "Apache 2.0" +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/ambassador.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/ambassador.jsonnet new file mode 100644 index 00000000..10b3ccc5 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/ambassador.jsonnet @@ -0,0 +1,14 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.ambassador +// @description Ambassador Component +// @shortDescription Ambassador +// @param name string Name +// @optionalParam platform string none supported platforms {none|gke|minikube} +// @optionalParam ambassadorServiceType string ClusterIP The service type for the API Gateway {ClusterIP|NodePort|LoadBalancer}. +// @optionalParam ambassadorNodePort number 0 Optional nodePort to use when ambassadorServiceType is NodePort {30000-32767}. +// @optionalParam ambassadorImage string quay.io/datawire/ambassador:0.37.0 The image for the API Gateway. +// @optionalParam replicas number 3 The number of replicas. + +local ambassador = import "kubeflow/common/ambassador.libsonnet"; +local instance = ambassador.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/basic-auth.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/basic-auth.jsonnet new file mode 100644 index 00000000..9f270076 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/basic-auth.jsonnet @@ -0,0 +1,12 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.basic-auth +// @description Provides http basic auth for all ambassador traffic. +// @shortDescription Http basic auth. +// @param name string Name for the component +// @optionalParam authSecretName string kubeflow-login Contains username and passwordhash for UI/API auth. +// @optionalParam image string gcr.io/kubeflow-images-public/gatekeeper:v0.5.0 Auth service image to use. +// @optionalParam imageui string gcr.io/kubeflow-images-public/kflogin-ui:v0.5.0 UI image to use. + +local basicauth = import "kubeflow/common/basic-auth.libsonnet"; +local instance = basicauth.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/centraldashboard.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/centraldashboard.jsonnet new file mode 100644 index 00000000..a6fd49f6 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/centraldashboard.jsonnet @@ -0,0 +1,10 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.centraldashboard +// @description centraldashboard Component +// @shortDescription centraldashboard +// @param name string Name +// @optionalParam image string gcr.io/kubeflow-images-public/centraldashboard:v0.5.0 Image for the central dashboard + +local centraldashboard = import "kubeflow/common/centraldashboard.libsonnet"; +local instance = centraldashboard.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/echo-server.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/echo-server.jsonnet new file mode 100644 index 00000000..988890d0 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/echo-server.jsonnet @@ -0,0 +1,10 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.echo-server +// @description Provides a simple server for testing connections; primarily IAP. +// @shortDescription A simple echo server. +// @param name string Name for the component +// @optionalParam image string gcr.io/kubeflow-images-staging/echo-server:v20180628-44f08d31 The image to use. + +local echoserver = import "kubeflow/common/echo-server.libsonnet"; +local instance = echoserver.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/spartakus.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/spartakus.jsonnet new file mode 100644 index 00000000..cfaab490 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/prototypes/spartakus.jsonnet @@ -0,0 +1,11 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.spartakus +// @description spartakus component for usage collection +// @shortDescription spartakus component for usage collection +// @param name string Name +// @optionalParam usageId string unknown_cluster Optional id to use when reporting usage to kubeflow.org +// @optionalParam reportUsage string false Whether or not to report Kubeflow usage to kubeflow.org. + +local spartakus = import "kubeflow/common/spartakus.libsonnet"; +local instance = spartakus.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/spartakus.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/spartakus.libsonnet new file mode 100644 index 00000000..a87d0029 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/spartakus.libsonnet @@ -0,0 +1,122 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env { + reportUsageBool: util.toBool(_params.reportUsage), + }, + + // Spartakus needs to be able to get information about the cluster to create a report. + local clusterRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "spartakus", + }, + name: "spartakus", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "nodes", + ], + verbs: [ + "get", + "list", + ], + }, + ], + }, // role + clusterRole:: clusterRole, + + local clusterRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "spartakus", + }, + name: "spartakus", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "spartakus", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "spartakus", + namespace: params.namespace, + }, + ], + }, // operator-role binding + clusterRoleBinding:: clusterRoleBinding, + + local serviceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: "spartakus", + }, + name: "spartakus", + namespace: params.namespace, + }, + }, + serviceAccount:: serviceAccount, + + local volunteer = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "spartakus-volunteer", + namespace: params.namespace, + labels: { + app: "spartakus", + }, + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "spartakus-volunteer", + }, + }, + spec: { + containers: [ + { + image: "gcr.io/google_containers/spartakus-amd64:v1.1.0", + name: "volunteer", + args: [ + "volunteer", + "--cluster-id=" + params.usageId, + "--database=https://stats-collector.kubeflow.org", + ], + }, + ], + serviceAccountName: "spartakus", + }, // spec + }, + }, + }, // deployment + volunteer:: volunteer, + + parts:: self, + all:: if params.reportUsageBool then ( + [ + self.clusterRole, + self.clusterRoleBinding, + self.serviceAccount, + self.volunteer, + ] + ) else [], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/ambassador_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/ambassador_test.jsonnet new file mode 100644 index 00000000..98b5a00b --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/ambassador_test.jsonnet @@ -0,0 +1,231 @@ +local ambassador = import "kubeflow/common/ambassador.libsonnet"; +local testSuite = import "kubeflow/common/testsuite.libsonnet"; + +local params = { + name: "ambassador", + platform: "gke", + ambassadorServiceType: "ClusterIP", + ambassadorImage: "quay.io/datawire/ambassador:0.37.0", + replicas: 3, +}; +local env = { + namespace: "kubeflow", +}; + +local instance = ambassador.new(env, params); + +local testCases = [ + { + actual: instance.parts.ambassadorService, + expected: + { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + service: "ambassador", + }, + name: "ambassador", + namespace: "kubeflow", + }, + spec: { + ports: [ + { + name: "ambassador", + port: 80, + targetPort: 80, + }, + ], + selector: { + service: "ambassador", + }, + type: "ClusterIP", + }, + }, + }, + { + actual: instance.parts.adminService, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + service: "ambassador-admin", + }, + name: "ambassador-admin", + namespace: "kubeflow", + }, + spec: { + ports: [ + { + name: "ambassador-admin", + port: 8877, + targetPort: 8877, + }, + ], + selector: { + service: "ambassador", + }, + type: "ClusterIP", + }, + }, + }, + { + actual: instance.parts.ambassadorRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + name: "ambassador", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "services", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "configmaps", + ], + verbs: [ + "create", + "update", + "patch", + "get", + "list", + "watch", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "secrets", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + ], + }, + }, + { + actual: instance.parts.ambassadorServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "ambassador", + namespace: "kubeflow", + }, + }, + }, + { + actual: instance.parts.ambassadorRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + name: "ambassador", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "ambassador", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "ambassador", + namespace: "kubeflow", + }, + ], + }, + }, + { + actual: instance.parts.ambassadorDeployment, + expected: + { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "ambassador", + namespace: "kubeflow", + }, + spec: { + replicas: 3, + template: { + metadata: { + labels: { + service: "ambassador", + }, + namespace: "kubeflow", + }, + spec: { + containers: [ + { + env: [ + { + name: "AMBASSADOR_NAMESPACE", + valueFrom: { + fieldRef: { + fieldPath: "metadata.namespace", + }, + }, + }, + ], + image: "quay.io/datawire/ambassador:0.37.0", + livenessProbe: { + httpGet: { + path: "/ambassador/v0/check_alive", + port: 8877, + }, + initialDelaySeconds: 30, + periodSeconds: 30, + }, + name: "ambassador", + readinessProbe: { + httpGet: { + path: "/ambassador/v0/check_ready", + port: 8877, + }, + initialDelaySeconds: 30, + periodSeconds: 30, + }, + resources: { + limits: { + cpu: 1, + memory: "400Mi", + }, + requests: { + cpu: "200m", + memory: "100Mi", + }, + }, + }, + ], + restartPolicy: "Always", + serviceAccountName: "ambassador", + }, + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/centraldashboard_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/centraldashboard_test.jsonnet new file mode 100644 index 00000000..b0ca7222 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/centraldashboard_test.jsonnet @@ -0,0 +1,167 @@ +local centraldashboard = import "../centraldashboard.libsonnet"; +local testSuite = import "kubeflow/common/testsuite.libsonnet"; + +local params = { + image: "gcr.io/kubeflow-images-public/centraldashboard:v0.3.0", +}; +local env = { + namespace: "kftest", +}; +local centraldash = centraldashboard.new(params, env); + +local testCases = [ + { + actual: centraldash.centralDashboardDeployment, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: "kftest", + }, + spec: { + template: { + metadata: { + labels: { + app: "centraldashboard", + }, + }, + spec: { + containers: [ + { + image: "gcr.io/kubeflow-images-public/centraldashboard:v0.3.0", + name: "centraldashboard", + ports: [ + { + containerPort: 8082, + }, + ], + }, + ], + serviceAccountName: "centraldashboard", + }, + }, + }, + }, + }, + { + actual: centraldash.centralDashboardService, + expected: { + local annotations = function(namespace) { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: centralui-mapping", + "prefix: /", + "rewrite: /", + "service: centraldashboard." + namespace, + ]), + }, + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: "kftest", + annotations: annotations("kftest"), + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8082, + }, + ], + selector: { + app: "centraldashboard", + }, + sessionAffinity: "None", + type: "ClusterIP", + }, + }, + }, + { + actual: centraldash.centralDashboardServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "centraldashboard", + namespace: "kftest", + }, + }, + }, + { + actual: centraldash.centralDashboardRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "Role", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: "kftest", + }, + rules: [ + { + apiGroups: [""], + resources: [ + "pods", + "pods/exec", + "pods/log", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [""], + resources: [ + "secrets", + ], + verbs: [ + "get", + ], + }, + ], + }, + }, + { + actual: centraldash.centralDashboardRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "RoleBinding", + metadata: { + labels: { + app: "centraldashboard", + }, + name: "centraldashboard", + namespace: "kftest", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: "centraldashboard", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "centraldashboard", + namespace: "kftest", + }, + ], + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/echo-server_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/echo-server_test.jsonnet new file mode 100644 index 00000000..533819cc --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/echo-server_test.jsonnet @@ -0,0 +1,88 @@ +local echoServer = import "kubeflow/common/echo-server.libsonnet"; +local testSuite = import "kubeflow/common/testsuite.libsonnet"; + +local params = { + name: "echo-server", + image: "gcr.io/kubeflow-images-staging/echo-server:v20180628-44f08d31", +}; +local env = { + namespace: "kubeflow", +}; + +local instance = echoServer.new(env, params); + +local testCases = [ + { + actual: instance.parts.service, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: echo-server-mapping\nprefix: /echo-server\nrewrite: /\nservice: echo-server.kubeflow", + }, + labels: { + app: "echo-server", + }, + name: "echo-server", + namespace: "kubeflow", + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8080, + }, + ], + selector: { + app: "echo-server", + }, + type: "ClusterIP", + }, + }, + }, + { + actual: instance.parts.deployment, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "echo-server", + namespace: "kubeflow", + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "echo-server", + }, + }, + spec: { + containers: [ + { + image: "gcr.io/kubeflow-images-staging/echo-server:v20180628-44f08d31", + name: "app", + ports: [ + { + containerPort: 8080, + }, + ], + readinessProbe: { + httpGet: { + path: "/headers", + port: 8080, + }, + initialDelaySeconds: 5, + periodSeconds: 30, + }, + }, + ], + }, + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/spartakus_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/spartakus_test.jsonnet new file mode 100644 index 00000000..8286b1fe --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/spartakus_test.jsonnet @@ -0,0 +1,122 @@ +local spartakus = import "kubeflow/common/spartakus.libsonnet"; +local testSuite = import "kubeflow/common/testsuite.libsonnet"; + +local params = { + name: "spartakus", + usageId: "unknown_cluster", + reportUsage: "false", +}; +local env = { + namespace: "kubeflow", +}; + +local instance = spartakus.new(env, params); + +local testCases = [ + { + actual: instance.parts.clusterRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "spartakus", + }, + name: "spartakus", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "nodes", + ], + verbs: [ + "get", + "list", + ], + }, + ], + }, + }, + { + actual: instance.parts.clusterRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "spartakus", + }, + name: "spartakus", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "spartakus", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "spartakus", + namespace: "kubeflow", + }, + ], + }, + }, + { + actual: instance.parts.serviceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: "spartakus", + }, + name: "spartakus", + namespace: "kubeflow", + }, + }, + }, + { + actual: instance.parts.volunteer, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "spartakus", + }, + name: "spartakus-volunteer", + namespace: "kubeflow", + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "spartakus-volunteer", + }, + }, + spec: { + containers: [ + { + args: [ + "volunteer", + "--cluster-id=unknown_cluster", + "--database=https://stats-collector.kubeflow.org", + ], + image: "gcr.io/google_containers/spartakus-amd64:v1.1.0", + name: "volunteer", + }, + ], + serviceAccountName: "spartakus", + }, + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/util_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/util_test.jsonnet new file mode 100644 index 00000000..6509d063 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/tests/util_test.jsonnet @@ -0,0 +1,56 @@ +local util = import "../util.libsonnet"; + +std.assertEqual(util.lower("aTruez"), "atruez") && +std.assertEqual(util.lower("ATrUez"), "atruez") && +std.assertEqual(util.lower("atruez"), "atruez") && +std.assertEqual(util.lower("ATRUEZ"), "atruez") && +std.assertEqual(util.toBool(false), false) && +std.assertEqual(util.toBool(true), true) && +std.assertEqual(util.toBool("true"), true) && +std.assertEqual(util.toBool("True"), true) && +std.assertEqual(util.toBool("TRUE"), true) && +std.assertEqual(util.toBool("false"), false) && +std.assertEqual(util.toBool("False"), false) && +std.assertEqual(util.toBool("FALSE"), false) && +std.assertEqual(util.toBool("random string"), false) && +std.assertEqual(util.toBool(1), true) && +std.assertEqual(util.toBool(0), false) && +std.assertEqual(util.toBool(123), true) && +std.assertEqual(util.toArray("a,b,c,d"), ["a", "b", "c", "d"]) && +std.assertEqual(util.toArray("ca, or,fl, mo"), ["ca", "or", "fl", "mo"]) && +std.assertEqual(std.length(util.toArray(2)), 0) && +std.assertEqual(std.length(util.toArray("hello world")), 1) && +std.assertEqual(std.length(util.toArray([1, 2, 3, 4])), 0) && +std.assertEqual(util.sort(["Craydad", "CCall", "crayon"]), ["CCall", "Craydad", "crayon"]) && +std.assertEqual( + { + new():: self + { + local configMap = { + kind: "ConfigMap", + }, + local service = { + kind: "Service", + }, + list:: util.list([configMap, service]), + }, + }.new().list, + { + apiVersion: "v1", + items: [ + { + kind: "ConfigMap", + }, + { + kind: "Service", + }, + ], + kind: "List", + } +) && +std.assertEqual( + util.setDiff( + util.sort(["CCall", "Craydad", "crayon", "fuzzball"]), + util.sort(["CCall", "Craydad", "crayon"]) + ), + ["fuzzball"] +) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/testsuite.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/testsuite.libsonnet new file mode 100644 index 00000000..c3871887 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/testsuite.libsonnet @@ -0,0 +1,29 @@ +// Some useful routines. +{ + run:: function(testCases) { + local testEqual(x) = x { + pass: x.actual == x.expected, + }, + local curry(testCases) = { + // For each test case determine whether expected matches equals + local testCasesWithResults = std.map( + testEqual, + testCases, + ), + return:: + testCasesWithResults, + }.return, + // Compute test suite. + local foldResults(left, right) = { + pass: left.pass && right.pass, + }, + local initResult = { pass: true }, + local suiteResult = std.foldl(foldResults, curry(testCases), initResult), + local testSuite = suiteResult { + testCases: curry(testCases), + }, + result:: + testSuite, + }.result, + +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/util.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/util.libsonnet new file mode 100644 index 00000000..ca95f4a3 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/util.libsonnet @@ -0,0 +1,207 @@ +// Some useful routines. +{ + local k = import "k.libsonnet", + local util = self, + + // Convert a string to lower case. + lower:: function(x) { + local cp(c) = std.codepoint(c), + local lowerLetter(c) = if cp(c) >= 65 && cp(c) < 91 then + std.char(cp(c) + 32) + else c, + result:: std.join("", std.map(lowerLetter, std.stringChars(x))), + }.result, + + // Convert non-boolean types like string,number to a boolean. + // This is primarily intended for dealing with parameters that should be booleans. + toBool:: function(x) { + result:: + if std.type(x) == "boolean" then + x + else if std.type(x) == "string" then + std.asciiUpper(x) == "TRUE" + else if std.type(x) == "number" then + x != 0 + else + false, + }.result, + + // Convert a comma-delimited string to an Array + toArray:: function(str) { + local trim(str) = { + rest:: + if std.startsWith(str, " ") then + std.substr(str, 1, std.length(str) - 1) + else + str, + }.rest, + result:: + if std.type(str) == "string" && str != "null" && std.length(str) > 0 then + std.map(trim, std.split(str, ",")) + else [], + }.result, + + foldl:: function(key, value, objs) { + local aux(arr, i, running) = + if i >= std.length(arr) then + running + else + aux(arr, i + 1, running { [key(arr[i])]+: value(arr[i]) }) tailstrict, + return:: aux(objs, 0, {},), + }.return, + + sort:: function(arr, compare=function(a, b) { + return:: + if a == b then + 0 + else + if a < b then + -1 + else + 1, + }.return) { + local _sort(arr, compare) = { + local l = std.length(arr), + local f = { + local pivot = arr[0], + local rest = std.makeArray(l - 1, function(i) arr[i + 1]), + local lessorequal(x) = compare(x, pivot) <= 0, + local greater(x) = compare(x, pivot) > 0, + local left = _sort(std.filter(lessorequal, rest), compare) tailstrict, + local right = _sort(std.filter(greater, rest), compare) tailstrict, + return:: left + [pivot] + right, + }.return, + return:: + if l == 0 then + [] + else + f, + }.return, + return:: _sort(arr, compare), + }.return, + + setDiff:: function(a, b, compare=function(a, b) { + return:: + if a == b then + 0 + else if a < b then + -1 + else + 1, + }.return) { + local aux(a, b, i, j, acc) = + if i >= std.length(a) then + acc + else + if j >= std.length(b) then + aux(a, b, i + 1, j, acc + [a[i]]) tailstrict + else + if compare(a[i], b[j]) == 0 then + aux(a, b, i + 1, j + 1, acc) tailstrict + else + if compare(a[i], b[j]) == -1 then + aux(a, b, i + 1, j, acc + [a[i]]) tailstrict + else + aux(a, b, i, j + 1, acc) tailstrict, + return:: aux(a, b, 0, 0, []) tailstrict, + }.return, + + getApiVersionKindAndMetadata(resource):: { + return:: + if std.objectHas(resource.metadata, "resourceVersion") then { + apiVersion: resource.apiVersion, + kind: resource.kind, + metadata: { + labels: resource.metadata.labels, + name: resource.metadata.name, + namespace: resource.metadata.namespace, + resourceVersion: resource.metadata.resourceVersion, + } + } else { + apiVersion: resource.apiVersion, + kind: resource.kind, + metadata: { + labels: resource.metadata.labels, + name: resource.metadata.name, + namespace: resource.metadata.namespace, + }, + }, + }.return, + + groupByResource(resources):: { + local getKey(resource) = { + return:: + resource.kind, + }.return, + local getValue(resource) = { + return:: + { [resource.metadata.name]+: resource }, + }.return, + return:: util.foldl(getKey, getValue, resources), + }.return, + + comparator(a, b):: { + return:: + if a.metadata.name == b.metadata.name then + 0 + else + if a.metadata.name < b.metadata.name then + -1 + else + 1, + }.return, + + validateResource(resource):: { + return:: + if std.type(resource) == "object" && + std.objectHas(resource, "kind") && + std.objectHas(resource, "apiVersion") && + std.objectHas(resource, "metadata") && + std.objectHas(resource.metadata, "name") then + true + else + false, + }.return, + + extractGroups(obj):: + if std.type(obj) == "object" then + [obj[key] for key in std.objectFields(obj)] + else + [], + + extractResources(group):: + if std.type(group) == "object" then + [group[key] for key in std.objectFields(group)] + else + [], + + curryResources(resources, exists):: { + local existingResource(resource) = { + local resourceExists(kind, name) = { + return:: + if std.objectHas(resources, kind) && + std.objectHas(resources[kind], name) then + true + else + false, + }.return, + return:: + if util.validateResource(resource) then + resourceExists(resource.kind, resource.metadata.name) + else + false, + }.return, + local missingResource(resource) = { + return:: + existingResource(resource) == false, + }.return, + return:: + if exists == true then + existingResource + else + missingResource, + }.return, + + // Produce a list of manifests. obj must be an array + list(obj):: k.core.v1.list.new(obj,), +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/version-info.json b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/version-info.json new file mode 100644 index 00000000..a55948ff --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/version-info.json @@ -0,0 +1,8 @@ +{ + "Major": "0", + "Minor": "2", + "Patch": "devel", + "GitCommit": "", + "BuildDate": "", + "ksonnetVersion": "0.9.2", +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/version.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/version.libsonnet new file mode 100644 index 00000000..7b3eb21b --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/common/version.libsonnet @@ -0,0 +1,15 @@ +{ + all(params):: [ + { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "kubeflow-version", + namespace: params.namespace, + }, + data: { + "kubeflow-version": importstr "version-info.json", + }, + }, + ], +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/parts.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/parts.yaml new file mode 100644 index 00000000..e2990445 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/parts.yaml @@ -0,0 +1,22 @@ +{ + "name": "kubeflow examples", + "apiVersion": "0.0.1", + "kind": "ksonnet.io/parts", + "description": "kubeflow examples.\n", + "author": "kubeflow-team ", + "contributors": [ + ], + "repository": { + "type": "git", + "url": "https://github.com/kubeflow/kubeflow" + }, + "bugs": { + "url": "https://github.com/kubeflow/kubeflow/issues" + }, + "keywords": [ + "kubernetes", + "kubeflow", + "machine learning" + ], + "license": "Apache 2.0", +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/katib-studyjob-test-v1alpha1.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/katib-studyjob-test-v1alpha1.jsonnet new file mode 100644 index 00000000..3b37e282 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/katib-studyjob-test-v1alpha1.jsonnet @@ -0,0 +1,88 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.katib-studyjob-test-v1alpha1 +// @description katib-studyjob-test +// @shortDescription A Katib StudyJob using random suggestion +// @param name string Name for the job. + +local k = import "k.libsonnet"; + +local name = params.name; +local namespace = env.namespace; + +local studyjob = { + apiVersion: "kubeflow.org/v1alpha1", + kind: "StudyJob", + metadata: { + name: name, + namespace: namespace, + }, + spec: { + studyName: name, + owner: "crd", + optimizationtype: "maximize", + objectivevaluename: "Validation-accuracy", + optimizationgoal: 0.99, + requestcount: 1, + metricsnames: ["accuracy"], + parameterconfigs: [ + { + name: "--lr", + parametertype: "double", + feasible: { + min: "0.01", + max: "0.03", + }, + }, + { + name: "--num-layers", + parametertype: "int", + feasible: { + min: "2", + max: "5", + }, + }, + { + name: "--optimizer", + parametertype: "categorical", + feasible: { + list: ["sgd", "adam", "ftrl"], + }, + }, + ], + workerSpec: { + goTemplate: { + rawTemplate: ||| + apiVersion: batch/v1 + kind: Job + metadata: + name: {{.WorkerID}} + namespace: {{.NameSpace}} + spec: + template: + spec: + containers: + - name: {{.WorkerID}} + image: katib/mxnet-mnist-example + command: + - "python" + - "/mxnet/example/image-classification/train_mnist.py" + - "--batch-size=64" + {{- with .HyperParameters}} + {{- range .}} + - "{{.Name}}={{.Value}}" + {{- end}} + {{- end}} + restartPolicy: Never + |||, + }, + }, + suggestionSpec: { + suggestionAlgorithm: "random", + requestNumber: 1, + }, + }, +}; + +k.core.v1.list.new([ + studyjob, +]) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tensorboard.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tensorboard.jsonnet new file mode 100644 index 00000000..f3765845 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tensorboard.jsonnet @@ -0,0 +1,14 @@ +// @apiVersion 1 +// @name io.ksonnet.pkg.tensorboard +// @description Tensorboard components +// @shortDescription ksonnet components for Tensorboard +// @param name string Name to give to each of the components +// @optionalParam logDir string logs Name of the log directory holding the TF events file +// @optionalParam targetPort number 6006 Name of the targetPort +// @optionalParam servicePort number 9000 Name of the servicePort +// @optionalParam serviceType string ClusterIP The service type for tensorboard service +// @optionalParam defaultTbImage string tensorflow/tensorflow:1.8.0 default tensorboard image to use + +local tensorboard = import "kubeflow/tensorboard/tensorboard.libsonnet"; +local instance = tensorboard.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple-v1beta1.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple-v1beta1.jsonnet new file mode 100644 index 00000000..54cf24e6 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple-v1beta1.jsonnet @@ -0,0 +1,82 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.tf-job-simple-v1beta1 +// @description tf-job-simple +// @shortDescription A simple TFJob to run CNN benchmark +// @param name string Name for the job. + +local k = import "k.libsonnet"; + +local name = params.name; +local namespace = env.namespace; +local image = "gcr.io/kubeflow/tf-benchmarks-cpu:v20171202-bdab599-dirty-284af3"; + +local tfjob = { + apiVersion: "kubeflow.org/v1beta1", + kind: "TFJob", + metadata: { + name: name, + namespace: namespace, + }, + spec: { + tfReplicaSpecs: { + Worker: { + replicas: 1, + template: { + spec: { + containers: [ + { + args: [ + "python", + "tf_cnn_benchmarks.py", + "--batch_size=32", + "--model=resnet50", + "--variable_update=parameter_server", + "--flush_stdout=true", + "--num_gpus=1", + "--local_parameter_device=cpu", + "--device=cpu", + "--data_format=NHWC", + ], + image: image, + name: "tensorflow", + workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks", + }, + ], + restartPolicy: "OnFailure", + }, + }, + }, + Ps: { + template: { + spec: { + containers: [ + { + args: [ + "python", + "tf_cnn_benchmarks.py", + "--batch_size=32", + "--model=resnet50", + "--variable_update=parameter_server", + "--flush_stdout=true", + "--num_gpus=1", + "--local_parameter_device=cpu", + "--device=cpu", + "--data_format=NHWC", + ], + image: image, + name: "tensorflow", + workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks", + }, + ], + restartPolicy: "OnFailure", + }, + }, + tfReplicaType: "PS", + }, + }, + }, +}; + +k.core.v1.list.new([ + tfjob, +]) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple-v1beta2.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple-v1beta2.jsonnet new file mode 100644 index 00000000..c33188e0 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple-v1beta2.jsonnet @@ -0,0 +1,82 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.tf-job-simple-v1beta2 +// @description tf-job-simple +// @shortDescription A simple TFJob to run CNN benchmark +// @param name string Name for the job. + +local k = import "k.libsonnet"; + +local name = params.name; +local namespace = env.namespace; +local image = "gcr.io/kubeflow/tf-benchmarks-cpu:v20171202-bdab599-dirty-284af3"; + +local tfjob = { + apiVersion: "kubeflow.org/v1beta2", + kind: "TFJob", + metadata: { + name: name, + namespace: namespace, + }, + spec: { + tfReplicaSpecs: { + Worker: { + replicas: 1, + template: { + spec: { + containers: [ + { + args: [ + "python", + "tf_cnn_benchmarks.py", + "--batch_size=32", + "--model=resnet50", + "--variable_update=parameter_server", + "--flush_stdout=true", + "--num_gpus=1", + "--local_parameter_device=cpu", + "--device=cpu", + "--data_format=NHWC", + ], + image: image, + name: "tensorflow", + workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks", + }, + ], + restartPolicy: "OnFailure", + }, + }, + }, + Ps: { + template: { + spec: { + containers: [ + { + args: [ + "python", + "tf_cnn_benchmarks.py", + "--batch_size=32", + "--model=resnet50", + "--variable_update=parameter_server", + "--flush_stdout=true", + "--num_gpus=1", + "--local_parameter_device=cpu", + "--device=cpu", + "--data_format=NHWC", + ], + image: image, + name: "tensorflow", + workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks", + }, + ], + restartPolicy: "OnFailure", + }, + }, + tfReplicaType: "PS", + }, + }, + }, +}; + +k.core.v1.list.new([ + tfjob, +]) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple.jsonnet new file mode 100644 index 00000000..57b1354e --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-job-simple.jsonnet @@ -0,0 +1,82 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.tf-job-simple +// @description tf-job-simple +// @shortDescription A simple TFJob to run CNN benchmark +// @param name string Name for the job. + +local k = import "k.libsonnet"; + +local name = params.name; +local namespace = env.namespace; +local image = "gcr.io/kubeflow/tf-benchmarks-cpu:v20171202-bdab599-dirty-284af3"; + +local tfjob = { + apiVersion: "kubeflow.org/v1alpha2", + kind: "TFJob", + metadata: { + name: name, + namespace: namespace, + }, + spec: { + tfReplicaSpecs: { + Worker: { + replicas: 1, + template: { + spec: { + containers: [ + { + args: [ + "python", + "tf_cnn_benchmarks.py", + "--batch_size=32", + "--model=resnet50", + "--variable_update=parameter_server", + "--flush_stdout=true", + "--num_gpus=1", + "--local_parameter_device=cpu", + "--device=cpu", + "--data_format=NHWC", + ], + image: image, + name: "tensorflow", + workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks", + }, + ], + restartPolicy: "OnFailure", + }, + }, + }, + Ps: { + template: { + spec: { + containers: [ + { + args: [ + "python", + "tf_cnn_benchmarks.py", + "--batch_size=32", + "--model=resnet50", + "--variable_update=parameter_server", + "--flush_stdout=true", + "--num_gpus=1", + "--local_parameter_device=cpu", + "--device=cpu", + "--data_format=NHWC", + ], + image: image, + name: "tensorflow", + workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks", + }, + ], + restartPolicy: "OnFailure", + }, + }, + tfReplicaType: "PS", + }, + }, + }, +}; + +k.core.v1.list.new([ + tfjob, +]) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-serving-simple.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-serving-simple.jsonnet new file mode 100644 index 00000000..4210a72f --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-serving-simple.jsonnet @@ -0,0 +1,94 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.tf-serving-simple +// @description tf-serving-simple +// @shortDescription tf-serving-simple +// @param name string Name to give to each of the components + +local k = import "k.libsonnet"; + +local namespace = "default"; +local appName = import "param://name"; +local modelBasePath = "gs://kubeflow-models/inception"; +local modelName = "inception"; +local image = "gcr.io/kubeflow-images-public/tf-model-server-cpu:v20180327-995786ec"; + +local service = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: appName, + }, + name: appName, + namespace: namespace, + }, + spec: { + ports: [ + { + name: "grpc-tf-serving", + port: 9000, + targetPort: 9000, + }, + ], + selector: { + app: appName, + }, + type: "ClusterIP", + }, +}; + +local deployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: appName, + }, + name: appName, + namespace: namespace, + }, + spec: { + template: { + metadata: { + labels: { + app: appName, + }, + }, + spec: { + containers: [ + { + args: [ + "/usr/bin/tensorflow_model_server", + "--port=9000", + "--model_name=" + modelName, + "--model_base_path=" + modelBasePath, + ], + image: image, + imagePullPolicy: "IfNotPresent", + name: "inception", + ports: [ + { + containerPort: 9000, + }, + ], + resources: { + limits: { + cpu: "4", + memory: "4Gi", + }, + requests: { + cpu: "1", + memory: "1Gi", + }, + }, + }, + ], + }, + }, + }, +}; + +k.core.v1.list.new([ + service, + deployment, +]) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-serving-with-istio.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-serving-with-istio.jsonnet new file mode 100644 index 00000000..391565a5 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/prototypes/tf-serving-with-istio.jsonnet @@ -0,0 +1,179 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.tf-serving-with-istio +// @description tf-serving-with-istio +// @shortDescription tf-serving-with-istio +// @param name string Name to give to each of the components + +local k = import "k.libsonnet"; + +local namespace = "default"; +local appName = import "param://name"; +local modelBasePath = "gs://kubeflow-models/inception"; +local modelName = "inception"; +local image = "gcr.io/kubeflow-images-public/tf-model-server-cpu:v20180327-995786ec"; +local httpProxyImage = "gcr.io/kubeflow-images-public/tf-model-server-http-proxy:v20180327-995786ec"; + +local routeRule = { + apiVersion: "config.istio.io/v1alpha2", + kind: "RouteRule", + metadata: { + name: appName, + namespace: namespace, + }, + spec: { + destination: { + name: "tf-serving", + }, + precedence: 0, + route: [ + { + labels: { + version: "v1", + }, + }, + ], + }, +}; + +local service = { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: tfserving-mapping-tf-serving-get", + "prefix: /models/tf-serving/", + "rewrite: /", + "method: GET", + "service: tf-serving." + namespace + ":8000", + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: tfserving-mapping-tf-serving-post", + "prefix: /models/tf-serving/", + "rewrite: /model/tf-serving:predict", + "method: POST", + "service: tf-serving." + namespace + ":8000", + ]), + }, + labels: { + app: appName, + }, + name: appName, + namespace: namespace, + }, + spec: { + ports: [ + { + name: "grpc-tf-serving", + port: 9000, + targetPort: 9000, + }, + { + name: "http-tf-serving-proxy", + port: 8000, + targetPort: 8000, + }, + ], + selector: { + app: appName, + }, + type: "ClusterIP", + }, +}; + +local deployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: appName, + }, + name: appName, + namespace: namespace, + }, + spec: { + template: { + metadata: { + labels: { + app: appName, + }, + annotations: { + "sidecar.istio.io/inject": "true", + }, + }, + spec: { + containers: [ + { + args: [ + "/usr/bin/tensorflow_model_server", + "--port=9000", + "--model_name=" + modelName, + "--model_base_path=" + modelBasePath, + ], + image: image, + imagePullPolicy: "IfNotPresent", + name: "inception", + ports: [ + { + containerPort: 9000, + }, + ], + resources: { + limits: { + cpu: "4", + memory: "4Gi", + }, + requests: { + cpu: "1", + memory: "1Gi", + }, + }, + }, + { + name: appName + "-http-proxy", + image: httpProxyImage, + imagePullPolicy: "IfNotPresent", + command: [ + "python", + "/usr/src/app/server.py", + "--port=8000", + "--rpc_port=9000", + "--rpc_timeout=10.0", + ], + env: [], + ports: [ + { + containerPort: 8000, + }, + ], + resources: { + requests: { + memory: "1Gi", + cpu: "1", + }, + limits: { + memory: "4Gi", + cpu: "4", + }, + }, + securityContext: { + runAsUser: 1000, + fsGroup: 1000, + }, + }, + ], + }, + }, + }, +}; + +k.core.v1.list.new([ + routeRule, + service, + deployment, +]) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/tests/tensorboard_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/tests/tensorboard_test.jsonnet new file mode 100644 index 00000000..76498810 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/examples/tests/tensorboard_test.jsonnet @@ -0,0 +1,102 @@ +local tensorboard = import "kubeflow/tensorboard/tensorboard.libsonnet"; + +local params = { + name: "tensorboard", + logDir: "logs", + targetPort: "6006", + servicePort: "9000", + serviceType: "LoadBalancer", + defaultTbImage: "tensorflow/tensorflow:1.9.0", +}; +local env = { + namespace: "test-kf-001", +}; + +local instance = tensorboard.new(env, params); + +std.assertEqual( + instance.tbService, + { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: tb-mapping-tensorboard-get\nprefix: /tensorboard/ tensorboard/\nrewrite: /\nmethod: GET\nservice: tensorboard.test-kf-001:9000", + }, + labels: { + app: "tensorboard", + }, + name: "tensorboard", + namespace: "test-kf-001", + }, + spec: { + ports: [ + { + name: "tb", + port: "9000", + targetPort: "6006", + }, + ], + selector: { + app: "tensorboard", + }, + type: "LoadBalancer", + }, + } +) && + +std.assertEqual( + instance.tbDeployment, + { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "tensorboard", + }, + name: "tensorboard", + namespace: "test-kf-001", + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "tensorboard", + }, + }, + spec: { + containers: [ + { + args: [ + "--logdir=logs", + "--port=6006", + ], + command: [ + "/usr/local/bin/tensorboard", + ], + image: "tensorflow/tensorflow:1.9.0", + imagePullPolicy: "IfNotPresent", + name: "tensorboard", + ports: [ + { + containerPort: "6006", + }, + ], + resources: { + limits: { + cpu: "4", + memory: "4Gi", + }, + requests: { + cpu: "1", + memory: "1Gi", + }, + }, + }, + ], + }, + }, + }, + } +) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/OWNERS b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/OWNERS new file mode 100644 index 00000000..beae48cb --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/OWNERS @@ -0,0 +1,7 @@ +approvers: + - abhi-g + - jlewi + - kunmingg + - lluunn + - r2d4 + - richardsliu diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/README.md b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/README.md new file mode 100644 index 00000000..59793f71 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/README.md @@ -0,0 +1,12 @@ +# gcp + + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [gcp](#gcp) + + + + +> This ksonnet package contains GCP specific prototypes. diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/basic-auth-ingress.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/basic-auth-ingress.libsonnet new file mode 100644 index 00000000..09705d1a --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/basic-auth-ingress.libsonnet @@ -0,0 +1,451 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env { + hostname: if std.objectHas(_params, "hostname") then _params.hostname else "null", + ingressName: "envoy-ingress" + }, + local namespace = params.namespace, + + // Test if the given hostname is in the form of: "NAME.endpoints.PROJECT.cloud.goog" + local isCloudEndpoint(str) = { + local toks = if std.type(str) == "null" then [] else std.split(str, "."), + result:: + (std.length(toks) == 5 && toks[1] == "endpoints" && toks[3] == "cloud" && toks[4] == "goog"), + }.result, + + local initServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "envoy", + namespace: namespace, + }, + }, // initServiceAccount + initServiceAccount:: initServiceAccount, + + local initClusterRoleBinding = { + kind: "ClusterRoleBinding", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "envoy", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "envoy", + namespace: namespace, + }, + ], + roleRef: { + kind: "ClusterRole", + name: "envoy", + apiGroup: "rbac.authorization.k8s.io", + }, + }, // initClusterRoleBinding + initClusterRoleBinding:: initClusterRoleBinding, + + local initClusterRole = { + kind: "ClusterRole", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "envoy", + namespace: namespace, + }, + rules: [ + { + apiGroups: [""], + resources: ["services", "configmaps", "secrets"], + verbs: ["get", "list", "patch", "update"], + }, + { + apiGroups: ["extensions"], + resources: ["ingresses"], + verbs: ["get", "list", "update", "patch"], + }, + ], + }, // initClusterRoleBinding + initClusterRole:: initClusterRole, + + local configMap = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "envoy-config", + namespace: namespace, + }, + data: { + "update_backend.sh": importstr "update_backend.sh", + }, + }, + configMap:: configMap, + + local whoamiService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "whoami", + }, + name: "whoami-app", + namespace: params.namespace, + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: whoami-mapping", + "prefix: /whoami", + "rewrite: /whoami", + "service: whoami-app." + namespace, + ]), + }, //annotations + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8081, + }, + ], + selector: { + app: "whoami", + }, + type: "ClusterIP", + }, + }, // whoamiService + whoamiService:: whoamiService, + + local whoamiApp = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "whoami-app", + namespace: params.namespace, + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "whoami", + }, + }, + spec: { + containers: [ + { + env: [ + { + name: "PORT", + value: "8081", + }, + ], + image: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0", + name: "app", + ports: [ + { + containerPort: 8081, + }, + ], + readinessProbe: { + failureThreshold: 2, + httpGet: { + path: "/healthz", + port: 8081, + scheme: "HTTP", + }, + periodSeconds: 10, + successThreshold: 1, + timeoutSeconds: 5, + }, + }, + ], + }, + }, + }, + }, + whoamiApp:: whoamiApp, + + // Run the process to update the backend service + local backendUpdater = { + apiVersion: "apps/v1", + kind: "StatefulSet", + metadata: { + name: "backend-updater", + namespace: namespace, + labels: { + service: "backend-updater", + }, + }, + spec: { + selector: { + matchLabels: { + service: "backend-updater", + }, + }, + template: { + metadata: { + labels: { + service: "backend-updater", + }, + }, + spec: { + serviceAccountName: "envoy", + containers: [ + { + name: "backend-updater", + image: params.ingressSetupImage, + command: [ + "bash", + "/var/envoy-config/update_backend.sh", + ], + env: [ + { + name: "NAMESPACE", + value: namespace, + }, + { + name: "SERVICE", + value: "ambassador", + }, + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/admin-gcp-sa.json", + }, + { + name: "HEALTHCHECK_PATH", + value: "/whoami", + }, + { + name: "INGRESS_NAME", + value: params.ingressName, + }, + ], + volumeMounts: [ + { + mountPath: "/var/envoy-config/", + name: "config-volume", + }, + { + name: "sa-key", + readOnly: true, + mountPath: "/var/run/secrets/sa", + }, + ], + }, + ], + volumes: [ + { + configMap: { + name: "envoy-config", + }, + name: "config-volume", + }, + { + name: "sa-key", + secret: { + secretName: "admin-gcp-sa", + }, + }, + ], + }, + }, + }, + + }, // backendUpdater + backendUpdater:: backendUpdater, + + // TODO(danisla): Remove afer https://github.com/kubernetes/ingress-gce/pull/388 is resolved per #1327. + local ingressBootstrapConfigMap = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "ingress-bootstrap-config", + namespace: namespace, + }, + data: { + "ingress_bootstrap.sh": importstr "ingress_bootstrap.sh", + }, + }, + ingressBootstrapConfigMap:: ingressBootstrapConfigMap, + + local ingressBootstrapJob = { + apiVersion: "batch/v1", + kind: "Job", + metadata: { + name: "ingress-bootstrap", + namespace: namespace, + }, + spec: { + template: { + spec: { + restartPolicy: "OnFailure", + serviceAccountName: "envoy", + containers: [ + { + name: "bootstrap", + image: params.ingressSetupImage, + command: ["/var/ingress-config/ingress_bootstrap.sh"], + env: [ + { + name: "NAMESPACE", + value: namespace, + }, + { + name: "TLS_SECRET_NAME", + value: params.secretName, + }, + { + name: "TLS_HOST_NAME", + value: params.hostname, + }, + { + name: "INGRESS_NAME", + value: "envoy-ingress", + }, + ], + volumeMounts: [ + { + mountPath: "/var/ingress-config/", + name: "ingress-config", + }, + ], + }, + ], + volumes: [ + { + configMap: { + name: "ingress-bootstrap-config", + // TODO(danisla): replace with std.parseOctal("0755") after upgrading to ksonnet 0.12 + defaultMode: 493, + }, + name: "ingress-config", + }, + ], + }, + }, + }, + }, // ingressBootstrapJob + ingressBootstrapJob:: ingressBootstrapJob, + + local ingress = { + apiVersion: "extensions/v1beta1", + kind: "Ingress", + metadata: { + name: "envoy-ingress", + namespace: namespace, + annotations: { + "kubernetes.io/tls-acme": "true", + "ingress.kubernetes.io/ssl-redirect": "true", + "kubernetes.io/ingress.global-static-ip-name": params.ipName, + "certmanager.k8s.io/issuer": params.issuer, + }, + }, + spec: { + rules: [ + { + [if params.hostname != "null" then "host"]: params.hostname, + http: { + paths: [ + { + backend: { + // Due to https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/examples/health_checks/README.md#limitations + // Keep port the servicePort the same as the port we are targeting on the backend so that servicePort will be the same as targetPort for the purpose of + // health checking. + serviceName: "ambassador", + servicePort: 80, + }, + path: "/*", + }, + ], + }, + }, + ], + }, + }, // iapIngress + ingress:: ingress, + + local certificate = if params.privateGKECluster == "false" then ( + { + apiVersion: "certmanager.k8s.io/v1alpha1", + kind: "Certificate", + metadata: { + name: params.secretName, + namespace: namespace, + }, + + spec: { + secretName: params.secretName, + issuerRef: { + name: params.issuer, + kind: "ClusterIssuer", + }, + commonName: params.hostname, + dnsNames: [ + params.hostname, + ], + acme: { + config: [ + { + http01: { + ingress: "envoy-ingress", + }, + domains: [ + params.hostname, + ], + }, + ], + }, + }, + } // certificate + ), + certificate:: certificate, + + local cloudEndpoint = if isCloudEndpoint(params.hostname) then ( + { + local makeEndpointParams(str) = { + local toks = std.split(str, "."), + result:: { + name: toks[0], + project: toks[2], + }, + }.result, + local endpointParams = makeEndpointParams(params.hostname), + apiVersion: "ctl.isla.solutions/v1", + kind: "CloudEndpoint", + metadata: { + name: endpointParams.name, + namespace: namespace, + }, + spec: { + project: endpointParams.project, + targetIngress: { + name: "envoy-ingress", + namespace: namespace, + }, + }, + } // cloudEndpoint + ), + cloudEndpoint:: cloudEndpoint, + + parts:: self, + all:: [ + self.initServiceAccount, + self.initClusterRoleBinding, + self.initClusterRole, + self.whoamiService, + self.whoamiApp, + self.backendUpdater, + self.configMap, + self.ingressBootstrapConfigMap, + self.ingressBootstrapJob, + self.ingress, + self.certificate, + self.cloudEndpoint, + ], + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/cert-manager.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/cert-manager.libsonnet new file mode 100644 index 00000000..fbc3fb4f --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/cert-manager.libsonnet @@ -0,0 +1,189 @@ +{ + local k = import "k.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local certificateCRD = { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "certificates.certmanager.k8s.io", + }, + spec: { + group: "certmanager.k8s.io", + version: "v1alpha1", + names: { + kind: "Certificate", + plural: "certificates", + }, + scope: "Namespaced", + }, + }, + certificateCRD:: certificateCRD, + + local clusterIssuerCRD = { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "clusterissuers.certmanager.k8s.io", + }, + + spec: { + group: "certmanager.k8s.io", + version: "v1alpha1", + names: { + kind: "ClusterIssuer", + plural: "clusterissuers", + }, + scope: "Cluster", + }, + }, + clusterIssuerCRD:: clusterIssuerCRD, + + local issuerCRD = { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "issuers.certmanager.k8s.io", + }, + spec: { + group: "certmanager.k8s.io", + version: "v1alpha1", + names: { + kind: "Issuer", + plural: "issuers", + }, + scope: "Namespaced", + }, + }, + issuerCRD:: issuerCRD, + + local serviceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "cert-manager", + namespace: params.namespace, + }, + }, + serviceAccount:: serviceAccount, + + local clusterRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + name: "cert-manager", + }, + rules: [ + { + apiGroups: ["certmanager.k8s.io"], + resources: ["certificates", "issuers", "clusterissuers"], + verbs: ["*"], + }, + { + apiGroups: [""], + resources: ["secrets", "events", "endpoints", "services", "pods", "configmaps"], + verbs: ["*"], + }, + { + apiGroups: ["extensions"], + resources: ["ingresses"], + verbs: ["*"], + }, + ], + }, + clusterRole:: clusterRole, + + local clusterRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + name: "cert-manager", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "cert-manager", + }, + subjects: [ + { + name: "cert-manager", + namespace: params.namespace, + kind: "ServiceAccount", + }, + ], + }, + clusterRoleBinding:: clusterRoleBinding, + + local deploy = { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "cert-manager", + namespace: params.namespace, + labels: { + app: "cert-manager", + }, + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "cert-manager", + }, + }, + spec: { + serviceAccountName: "cert-manager", + containers: [ + { + name: "cert-manager", + image: params.certManagerImage, + imagePullPolicy: "IfNotPresent", + args: [ + "--cluster-resource-namespace=" + params.namespace, + "--leader-election-namespace=" + params.namespace, + ], + }, + ], + }, + }, + }, + }, + deploy:: deploy, + + local issuerLEProd = { + apiVersion: "certmanager.k8s.io/v1alpha1", + kind: "ClusterIssuer", + metadata: { + name: "letsencrypt-prod", + }, + spec: { + acme: { + server: params.acmeUrl, + email: params.acmeEmail, + privateKeySecretRef: { + name: "letsencrypt-prod-secret", + }, + http01: { + }, + }, + }, + }, + issuerLEProd:: issuerLEProd, + + parts:: self, + all:: [ + self.certificateCRD, + self.clusterIssuerCRD, + self.issuerCRD, + self.serviceAccount, + self.clusterRole, + self.clusterRoleBinding, + self.deploy, + self.issuerLEProd, + ], + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/cloud-endpoints.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/cloud-endpoints.libsonnet new file mode 100644 index 00000000..8b82d785 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/cloud-endpoints.libsonnet @@ -0,0 +1,214 @@ +{ + local k = import "k.libsonnet", + new(_env, _params):: { + local params = { + cloudEndpointsImage: "gcr.io/cloud-solutions-group/cloud-endpoints-controller:0.2.1", + } + _params + _env, + + local endpointsCRD = { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "cloudendpoints.ctl.isla.solutions", + }, + spec: { + group: "ctl.isla.solutions", + version: "v1", + scope: "Namespaced", + names: { + plural: "cloudendpoints", + singular: "cloudendpoint", + kind: "CloudEndpoint", + shortNames: [ + "cloudep", + "ce", + ], + }, + }, + }, // endpointsCRD + endpointsCRD:: endpointsCRD, + + local endpointsClusterRole = { + kind: "ClusterRole", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "cloud-endpoints-controller", + }, + rules: [ + { + apiGroups: [""], + resources: ["services", "configmaps"], + verbs: ["get", "list"], + }, + { + apiGroups: ["extensions"], + resources: ["ingresses"], + verbs: ["get", "list"], + }, + ], + }, + endpointsClusterRole:: endpointsClusterRole, + + local endpointsClusterRoleBinding = { + kind: "ClusterRoleBinding", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "cloud-endpoints-controller", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "cloud-endpoints-controller", + namespace: params.namespace, + }, + ], + roleRef: { + kind: "ClusterRole", + name: "cloud-endpoints-controller", + apiGroup: "rbac.authorization.k8s.io", + }, + }, + endpointsClusterRoleBinding:: endpointsClusterRoleBinding, + + local endpointsService = { + apiVersion: "v1", + kind: "Service", + metadata: { + name: "cloud-endpoints-controller", + namespace: params.namespace, + }, + spec: { + type: "ClusterIP", + ports: [ + { + name: "http", + port: 80, + }, + ], + selector: { + app: "cloud-endpoints-controller", + }, + }, + }, // endpointsService + endpointsService:: endpointsService, + + local endpointsServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "cloud-endpoints-controller", + namespace: params.namespace, + }, + }, // endpointsServiceAccount + endpointsServiceAccount:: endpointsServiceAccount, + + local endpointsDeploy = { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "cloud-endpoints-controller", + namespace: params.namespace, + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "cloud-endpoints-controller", + }, + }, + spec: { + serviceAccountName: "cloud-endpoints-controller", + terminationGracePeriodSeconds: 5, + containers: [ + { + name: "cloud-endpoints-controller", + image: params.cloudEndpointsImage, + imagePullPolicy: "Always", + env: [ + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/" + params.secretKey, + }, + ], + volumeMounts: [ + { + name: "sa-key", + readOnly: true, + mountPath: "/var/run/secrets/sa", + }, + ], + readinessProbe: { + httpGet: { + path: "/healthz", + port: 80, + scheme: "HTTP", + }, + periodSeconds: 5, + timeoutSeconds: 5, + successThreshold: 1, + failureThreshold: 2, + }, + }, + ], + volumes: [ + { + name: "sa-key", + secret: { + secretName: params.secretName, + }, + }, + ], + }, + }, + }, + }, // endpointsDeploy + endpointsDeploy:: endpointsDeploy, + + local endpointsCompositeController = { + apiVersion: "metacontroller.k8s.io/v1alpha1", + kind: "CompositeController", + metadata: { + name: "cloud-endpoints-controller", + }, + spec: { + generateSelector: true, + resyncPeriodSeconds: 2, + parentResource: { + apiVersion: "ctl.isla.solutions/v1", + resource: "cloudendpoints", + }, + childResources: [], + clientConfig: { + service: { + name: "cloud-endpoints-controller", + namespace: params.namespace, + caBundle: "...", + }, + }, + hooks: { + sync: { + webhook: { + url: "http://cloud-endpoints-controller." + params.namespace + "/sync", + }, + }, + }, + }, + }, // endpointsCompositeController + endpointsCompositeController:: endpointsCompositeController, + + parts:: self, + local all = [ + self.endpointsCRD, + self.endpointsClusterRole, + self.endpointsClusterRoleBinding, + self.endpointsService, + self.endpointsServiceAccount, + self.endpointsDeploy, + self.endpointsCompositeController, + ], + all:: all, + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/configure_envoy_for_iap.sh b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/configure_envoy_for_iap.sh new file mode 100644 index 00000000..1b452733 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/configure_envoy_for_iap.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# +# A script to modify envoy config to perform JWT validation +# given the information for the service. +# Script executed by the iap container to configure IAP. When finished, the envoy config is created with the JWT audience. + +[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1 +[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1 + +PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id) +if [ -z ${PROJECT} ]; then + echo Error unable to fetch PROJECT from compute metadata + exit 1 +fi + +PROJECT_NUM=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/numeric-project-id) +if [ -z ${PROJECT_NUM} ]; then + echo Error unable to fetch PROJECT_NUM from compute metadata + exit 1 +fi + +checkIAP() { + # created by init container. + . /var/shared/healthz.env + + # If node port or backend id change, so does the JWT audience. + CURR_NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}') + CURR_BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~k8s-be-${CURR_NODE_PORT}- --format='value(id)') + [ "$BACKEND_ID" == "$CURR_BACKEND_ID" ] +} + +# Activate the service account +for i in $(seq 1 10); do + gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} && break || sleep 10 +done + +# Print out the config for debugging +gcloud config list + +NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}') +while [[ -z ${BACKEND_ID} ]]; do + BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~k8s-be-${NODE_PORT}- --format='value(id)') + echo "Waiting for backend id PROJECT=${PROJECT} NAMESPACE=${NAMESPACE} SERVICE=${SERVICE} filter=name~k8s-be-${NODE_PORT}-..." + sleep 2 +done +echo BACKEND_ID=${BACKEND_ID} + +NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}') +BACKEND_SERVICE=$(gcloud --project=${PROJECT} compute backend-services list --filter=name~k8s-be-${NODE_PORT}- --uri) + +JWT_AUDIENCE="/projects/${PROJECT_NUM}/global/backendServices/${BACKEND_ID}" + +# For healthcheck compare. +echo "JWT_AUDIENCE=${JWT_AUDIENCE}" > /var/shared/healthz.env +echo "NODE_PORT=${NODE_PORT}" >> /var/shared/healthz.env +echo "BACKEND_ID=${BACKEND_ID}" >> /var/shared/healthz.env + +kubectl get configmap -n ${NAMESPACE} envoy-config -o jsonpath='{.data.envoy-config\.json}' | + sed -e "s|{{JWT_AUDIENCE}}|${JWT_AUDIENCE}|g" >/var/shared/envoy-config.json + +echo "Restarting envoy" +curl -s ${ENVOY_ADMIN}/quitquitquit + +# Verify IAP every 10 seconds. +while true; do + if ! checkIAP; then + echo "$(date) WARN: IAP check failed, restarting container." + exit 1 + fi + sleep 10 +done diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/create_ca.sh b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/create_ca.sh new file mode 100644 index 00000000..c10d9912 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/create_ca.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +set -e + +usage() { + cat <> ${tmpdir}/csr.conf +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +DNS.1 = ${service} +DNS.2 = ${service}.${namespace} +DNS.3 = ${service}.${namespace}.svc +EOF + +openssl genrsa -out ${tmpdir}/server-key.pem 2048 +openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf + +# Self sign +openssl x509 -req -days 365 -in ${tmpdir}/server.csr -CA ${tmpdir}/self_ca.crt -CAkey ${tmpdir}/self_ca.key -CAcreateserial -out ${tmpdir}/server-cert.pem + +# create the secret with CA cert and server cert/key +kubectl create secret generic ${secret} \ + --from-file=key.pem=${tmpdir}/server-key.pem \ + --from-file=cert.pem=${tmpdir}/server-cert.pem \ + --dry-run -o yaml | + kubectl -n ${namespace} apply -f - + +cat ${tmpdir}/self_ca.crt +# -a means base64 encode +caBundle=`cat ${tmpdir}/self_ca.crt | openssl enc -a -A` +echo ${caBundle} + +patchString='[{"op": "replace", "path": "/webhooks/0/clientConfig/caBundle", "value":"{{CA_BUNDLE}}"}]' +patchString=`echo ${patchString} | sed "s|{{CA_BUNDLE}}|${caBundle}|g"` +echo ${patchString} + +checkWebhookConfig() { + currentBundle=$(kubectl get mutatingwebhookconfigurations -n ${namespace} gcp-cred-webhook -o jsonpath='{.webhooks[0].clientConfig.caBundle}') + [[ "$currentBundle" == "$caBundle" ]] +} + +while true; do + if ! checkWebhookConfig; then + echo "patching ca bundle for webhook configuration..." + kubectl patch mutatingwebhookconfiguration gcp-cred-webhook \ + --type='json' -p="${patchString}" + fi + sleep 10 +done diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/google-cloud-filestore-pv.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/google-cloud-filestore-pv.libsonnet new file mode 100644 index 00000000..23dcab8f --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/google-cloud-filestore-pv.libsonnet @@ -0,0 +1,104 @@ +{ + local k = import "k.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local persistentVolume = { + apiVersion: "v1", + kind: "PersistentVolume", + metadata: { + name: params.name, + namespace: params.namespace, + }, + spec: { + capacity: { + storage: params.storageCapacity, + }, + accessModes: [ + "ReadWriteMany", + ], + nfs: { + path: params.path, + server: params.serverIP, + }, + }, + }, + persistentVolume:: persistentVolume, + + local persistentVolumeClaim = { + apiVersion: "v1", + kind: "PersistentVolumeClaim", + metadata: { + name: params.name, + namespace: params.namespace, + }, + spec: { + accessModes: [ + "ReadWriteMany", + ], + storageClassName: "nfs-storage", + volumeName: params.name, + resources: { + requests: { + storage: params.storageCapacity, + }, + }, + }, + }, + persistentVolumeClaim:: persistentVolumeClaim, + + // Set 777 permissions on the GCFS NFS so that non-root users + // like jovyan can use that NFS share + local gcfsPersmissions = { + apiVersion: "batch/v1", + kind: "Job", + metadata: { + name: "set-gcfs-permissions", + namespace: params.namespace, + }, + spec: { + template: { + spec: { + containers: [ + { + name: "set-gcfs-permissions", + image: params.image, + command: [ + "chmod", + "777", + "/kubeflow-gcfs", + ], + volumeMounts: [ + { + mountPath: "/kubeflow-gcfs", + name: params.name, + }, + ], + }, + ], + restartPolicy: "OnFailure", + volumes: [ + { + name: params.name, + persistentVolumeClaim: { + claimName: params.name, + readOnly: false, + }, + }, + ], + }, + }, + }, + }, + gcfsPersmissions:: gcfsPersmissions, + + parts:: self, + all:: [ + self.persistentVolume, + self.persistentVolumeClaim, + self.gcfsPersmissions, + ], + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/gpu-driver.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/gpu-driver.libsonnet new file mode 100644 index 00000000..0b0d9159 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/gpu-driver.libsonnet @@ -0,0 +1,130 @@ +{ + local k = import "k.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local daemonset = { + "apiVersion": "extensions/v1beta1", + "kind": "DaemonSet", + "metadata": { + "name": "nvidia-driver-installer", + "namespace": "kube-system", + "labels": { + "k8s-app": "nvidia-driver-installer" + } + }, + "spec": { + "template": { + "metadata": { + "labels": { + "name": "nvidia-driver-installer", + "k8s-app": "nvidia-driver-installer" + } + }, + "spec": { + "affinity": { + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "cloud.google.com/gke-accelerator", + "operator": "Exists" + } + ] + } + ] + } + } + }, + "tolerations": [ + { + "operator": "Exists" + } + ], + "hostNetwork": true, + "hostPID": true, + "volumes": [ + { + "name": "dev", + "hostPath": { + "path": "/dev" + } + }, + { + "name": "nvidia-install-dir-host", + "hostPath": { + "path": "/home/kubernetes/bin/nvidia" + } + }, + { + "name": "root-mount", + "hostPath": { + "path": "/" + } + } + ], + "initContainers": [ + { + "image": "cos-nvidia-installer:fixed", + "imagePullPolicy": "Never", + "name": "nvidia-driver-installer", + "resources": { + "requests": { + "cpu": 0.15 + } + }, + "securityContext": { + "privileged": true + }, + "env": [ + { + "name": "NVIDIA_INSTALL_DIR_HOST", + "value": "/home/kubernetes/bin/nvidia" + }, + { + "name": "NVIDIA_INSTALL_DIR_CONTAINER", + "value": "/usr/local/nvidia" + }, + { + "name": "ROOT_MOUNT_DIR", + "value": "/root" + } + ], + "volumeMounts": [ + { + "name": "nvidia-install-dir-host", + "mountPath": "/usr/local/nvidia" + }, + { + "name": "dev", + "mountPath": "/dev" + }, + { + "name": "root-mount", + "mountPath": "/root" + } + ] + } + ], + "containers": [ + { + "image": "gcr.io/google-containers/pause:2.0", + "name": "pause" + } + ] + } + } + } + }, + daemonset:: daemonset, + + parts:: self, + all:: [ + self.daemonset, + ], + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/healthcheck_route.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/healthcheck_route.yaml new file mode 100644 index 00000000..d912a566 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/healthcheck_route.yaml @@ -0,0 +1,51 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: default-routes + namespace: kubeflow +spec: + hosts: + - "*" + gateways: + - kubeflow-gateway + http: + - match: + - uri: + exact: /healthz + route: + - destination: + port: + number: 80 + host: whoami-app.kubeflow.svc.cluster.local + - match: + - uri: + exact: /whoami + route: + - destination: + port: + number: 80 + host: whoami-app.kubeflow.svc.cluster.local + - match: + - uri: + prefix: / + route: + - destination: + port: + number: 80 + host: ambassador.kubeflow.svc.cluster.local +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: kubeflow-gateway + namespace: kubeflow +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/iap.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/iap.libsonnet new file mode 100644 index 00000000..97adbf62 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/iap.libsonnet @@ -0,0 +1,1046 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env { + disableJwtChecking: util.toBool(_params.disableJwtChecking), + hostname: if std.objectHas(_params, "hostname") then _params.hostname else "null", + envoyPort: 8080, + envoyAdminPort: 8001, + envoyStatsPort: 8025, + useIstio: util.toBool(_params.useIstio), + ingressName: "envoy-ingress" + }, + local namespace = if params.useIstio then params.istioNamespace else params.namespace, + + // Test if the given hostname is in the form of: "NAME.endpoints.PROJECT.cloud.goog" + local isCloudEndpoint(str) = { + local toks = if std.type(str) == "null" then [] else std.split(str, "."), + result:: + (std.length(toks) == 5 && toks[1] == "endpoints" && toks[3] == "cloud" && toks[4] == "goog"), + }.result, + + local service = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + service: "envoy", + }, + name: "envoy", + namespace: namespace, + annotations: { + "beta.cloud.google.com/backend-config": '{"ports": {"envoy":"iap-backendconfig"}}', + }, + }, + spec: { + ports: [ + { + name: "envoy", + port: params.envoyPort, + targetPort: params.envoyPort, + }, + ], + selector: { + service: "envoy", + }, + // NodePort because this will be the backend for our ingress. + type: "NodePort", + }, + }, // service + service:: service, + + local initServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "envoy", + namespace: namespace, + }, + }, // initServiceAccount + initServiceAccount:: initServiceAccount, + + local initClusterRoleBinding = { + kind: "ClusterRoleBinding", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "envoy", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "envoy", + namespace: namespace, + }, + ], + roleRef: { + kind: "ClusterRole", + name: "envoy", + apiGroup: "rbac.authorization.k8s.io", + }, + }, // initClusterRoleBinding + initClusterRoleBinding:: initClusterRoleBinding, + + local initClusterRole = { + kind: "ClusterRole", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "envoy", + namespace: namespace, + }, + rules: [ + { + apiGroups: [""], + resources: ["services", "configmaps", "secrets"], + verbs: ["get", "list", "patch", "update"], + }, + { + apiGroups: ["extensions"], + resources: ["ingresses"], + verbs: ["get", "list", "update", "patch"], + }, + ] + if params.useIstio then [ + { + apiGroups: ["authentication.istio.io"], + resources: ["policies"], + verbs: ["*"], + }, + { + apiGroups: ["networking.istio.io"], + resources: ["gateways", "virtualservices"], + verbs: ["*"], + }, + ] else [], + }, // initClusterRoleBinding + initClusterRole:: initClusterRole, + + local deploy = { + local envoyContainer(params) = { + image: params.image, + command: [ + "/usr/local/bin/envoy", + "-c", + params.configPath, + "--log-level", + "info", + // Since we are running multiple instances of envoy on the same host we need to set a unique baseId + "--base-id", + params.baseId, + ], + imagePullPolicy: "Always", + name: params.name, + livenessProbe: { + httpGet: { + path: params.healthPath, + port: params.healthPort, + }, + initialDelaySeconds: 30, + periodSeconds: 30, + }, + readinessProbe: { + httpGet: { + path: params.healthPath, + port: params.healthPort, + }, + initialDelaySeconds: 30, + periodSeconds: 30, + }, + ports: std.map(function(p) + { + containerPort: p, + } + , params.ports), + resources: { + limits: { + cpu: 1, + memory: "400Mi", + }, + requests: { + cpu: "200m", + memory: "100Mi", + }, + }, + volumeMounts: [ + { + mountPath: "/etc/envoy", + name: "shared", + }, + ], + }, // envoyContainer + + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "envoy", + namespace: params.namespace, + }, + spec: { + replicas: 3, + template: { + metadata: { + labels: { + service: "envoy", + }, + }, + spec: { + serviceAccountName: "envoy", + containers: [ + envoyContainer({ + image: params.envoyImage, + name: "envoy", + // We use the admin port for the health, readiness check because the main port will require a valid JWT. + // healthPath: "/server_info", + healthPath: "/healthz", + healthPort: params.envoyPort, + configPath: "/etc/envoy/envoy-config.json", + baseId: "27000", + ports: [params.envoyPort, params.envoyAdminPort, params.envoyStatsPort], + }), + { + name: "iap", + image: params.ingressSetupImage, + command: [ + "sh", + "/var/envoy-config/configure_envoy_for_iap.sh", + ], + env: [ + { + name: "NAMESPACE", + value: params.namespace, + }, + { + name: "SERVICE", + value: "envoy", + }, + { + name: "ENVOY_ADMIN", + value: "http://localhost:" + params.envoyAdminPort, + }, + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/admin-gcp-sa.json", + }, + ], + volumeMounts: [ + { + mountPath: "/var/envoy-config/", + name: "config-volume", + }, + { + mountPath: "/var/shared/", + name: "shared", + }, + { + name: "sa-key", + readOnly: true, + mountPath: "/var/run/secrets/sa", + }, + ], + }, + ], + restartPolicy: "Always", + volumes: [ + { + configMap: { + name: "envoy-config", + }, + name: "config-volume", + }, + { + emptyDir: { + medium: "Memory", + }, + name: "shared", + }, + { + name: "sa-key", + secret: { + secretName: "admin-gcp-sa", + }, + }, + ], + }, + }, + }, + }, // deploy + deploy:: deploy, + + // Run the process to update the backend service + local backendUpdater = { + apiVersion: "apps/v1", + kind: "StatefulSet", + metadata: { + name: "backend-updater", + namespace: namespace, + labels: { + service: "backend-updater", + }, + }, + spec: { + selector: { + matchLabels: { + service: "backend-updater", + }, + }, + template: { + metadata: { + labels: { + service: "backend-updater", + }, + }, + spec: { + serviceAccountName: "envoy", + containers: [ + { + name: "backend-updater", + image: params.ingressSetupImage, + command: [ + "bash", + "/var/envoy-config/update_backend.sh", + ], + env: [ + { + name: "NAMESPACE", + value: namespace, + }, + { + name: "SERVICE", + value: if params.useIstio then "istio-ingressgateway" else "envoy", + }, + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/admin-gcp-sa.json", + }, + ] + if params.useIstio then [ + { + name: "USE_ISTIO", + value: "true", + }, + ] else [], + volumeMounts: [ + { + mountPath: "/var/envoy-config/", + name: "config-volume", + }, + { + name: "sa-key", + readOnly: true, + mountPath: "/var/run/secrets/sa", + }, + ], + }, + ], + volumes: [ + { + configMap: { + name: "envoy-config", + }, + name: "config-volume", + }, + { + name: "sa-key", + secret: { + secretName: "admin-gcp-sa", + }, + }, + ], + }, + }, + }, + + }, // backendUpdater + backendUpdater:: backendUpdater, + + // Run the process to enable iap + local iapEnabler = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "iap-enabler", + namespace: namespace, + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + service: "iap-enabler", + }, + }, + spec: { + serviceAccountName: "envoy", + containers: [ + { + name: "iap", + image: params.ingressSetupImage, + command: [ + "bash", + "/var/envoy-config/setup_backend.sh", + ], + env: [ + { + name: "NAMESPACE", + value: namespace, + }, + { + name: "SERVICE", + value: if params.useIstio then "istio-ingressgateway" else "envoy", + }, + { + name: "INGRESS_NAME", + value: params.ingressName, + }, + { + name: "ENVOY_ADMIN", + value: "http://localhost:" + params.envoyAdminPort, + }, + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/admin-gcp-sa.json", + }, + ] + if params.useIstio then [ + { + name: "USE_ISTIO", + value: "true", + }, + ] else [], + volumeMounts: [ + { + mountPath: "/var/envoy-config/", + name: "config-volume", + }, + { + name: "sa-key", + readOnly: true, + mountPath: "/var/run/secrets/sa", + }, + ], + }, + ], + restartPolicy: "Always", + volumes: [ + { + configMap: { + name: "envoy-config", + }, + name: "config-volume", + }, + { + name: "sa-key", + secret: { + secretName: "admin-gcp-sa", + }, + }, + ], + }, + }, + }, + }, // iapEnabler + iapEnabler:: iapEnabler, + + local configMap = { + // This is the config for the secondary envoy proxy which does JWT verification + // and actually routes requests to the appropriate backend. + local envoyConfig(params) = { + listeners: [ + { + address: "tcp://0.0.0.0:" + params.envoyPort, + filters: [ + { + type: "read", + name: "http_connection_manager", + config: { + codec_type: "auto", + stat_prefix: "ingress_http", + access_log: [ + { + format: 'ACCESS [%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%"\n', + path: "/dev/fd/1", + }, + ], + route_config: { + virtual_hosts: [ + { + name: "backend", + domains: ["*"], + routes: [ + // First route that matches is picked. + { + timeout_ms: 10000, + path: "/healthz", + prefix_rewrite: "/server_info", + weighted_clusters: { + clusters: [ + + { name: "cluster_healthz", weight: 100.0 }, + + ], + }, + }, + // Provide access to the whoami app skipping JWT verification. + // this is useful for debugging. + { + timeout_ms: 10000, + prefix: "/noiap/whoami", + prefix_rewrite: "/", + weighted_clusters: { + clusters: [ + { + name: "cluster_iap_app", + weight: 100.0, + }, + ], + }, + }, + { + timeout_ms: 10000, + prefix: "/whoami", + prefix_rewrite: "/", + weighted_clusters: { + clusters: [ + { + name: "cluster_iap_app", + weight: 100.0, + }, + ], + }, + }, + // Jupyter uses the prefixes /hub & /user + { + // Jupyter requires the prefix /hub + // Use a 10 minute timeout because downloading + // images for jupyter notebook can take a while + timeout_ms: 600000, + prefix: "/hub", + prefix_rewrite: "/hub", + use_websocket: true, + weighted_clusters: { + clusters: [ + { + name: "cluster_jupyter", + weight: 100.0, + }, + ], + }, + }, + { + // Jupyter requires the prefix /user + // Use a 10 minute timeout because downloading + // images for jupyter notebook can take a while + timeout_ms: 600000, + prefix: "/user", + prefix_rewrite: "/user", + use_websocket: true, + weighted_clusters: { + clusters: [ + { + name: "cluster_jupyter", + weight: 100.0, + }, + ], + }, + }, + // TFJob uses the prefix /tfjobs/ + { + timeout_ms: 10000, + prefix: "/tfjobs", + prefix_rewrite: "/tfjobs", + weighted_clusters: { + clusters: [ + { + name: "cluster_tfjobs", + weight: 100.0, + }, + ], + }, + }, + // Routing with Istio + { + timeout_ms: 10000, + prefix: "/istio", + weighted_clusters: { + clusters: [ + { + name: "cluster_istiogateway", + weight: 100.0, + }, + ], + }, + }, + { + // Route remaining traffic to Ambassador which supports dynamically adding + // routes based on service annotations. + timeout_ms: 10000, + prefix: "/", + prefix_rewrite: "/", + use_websocket: true, + weighted_clusters: { + clusters: [ + { + name: "cluster_ambassador", + weight: 100.0, + }, + ], + }, + }, + ], + }, + ], + }, + local authFilter = if params.disableJwtChecking then + [] + else [{ + type: "decoder", + name: "jwt-auth", + config: { + jwts: [ + { + issuer: "https://cloud.google.com/iap", + audiences: "{{JWT_AUDIENCE}}", + jwks_uri: "https://www.gstatic.com/iap/verify/public_key-jwk", + jwks_uri_envoy_cluster: "iap_issuer", + jwt_headers: ["x-goog-iap-jwt-assertion"], + }, + ], + bypass_jwt: [ + { + http_method: "GET", + path_exact: "/healthz", + }, + { + http_method: "GET", + path_exact: "/noiap/whoami", + }, + ], + }, + }], + filters: + authFilter + + [ + { + type: "decoder", + name: "router", + config: {}, + }, + ], + }, + }, + ], + }, + ], + admin: { + // We use 0.0.0.0 and not 127.0.0.1 because we want the admin server to be available on all devices + // so that it can be used for health checking. + address: "tcp://0.0.0.0:" + params.envoyAdminPort, + access_log_path: "/tmp/admin_access_log", + }, + cluster_manager: { + clusters: [ + { + name: "cluster_healthz", + connect_timeout_ms: 3000, + type: "strict_dns", + lb_type: "round_robin", + hosts: [ + { + // We just use the admin server for the health check + url: "tcp://127.0.0.1:" + params.envoyAdminPort, + }, + + ], + }, + { + name: "iap_issuer", + connect_timeout_ms: 5000, + type: "strict_dns", + circuit_breakers: { + default: { + max_pending_requests: 10000, + max_requests: 10000, + }, + }, + lb_type: "round_robin", + hosts: [ + { + url: "tcp://www.gstatic.com:80", + }, + ], + }, + { + name: "cluster_iap_app", + connect_timeout_ms: 3000, + type: "strict_dns", + lb_type: "round_robin", + hosts: [ + { + url: "tcp://whoami-app." + params.namespace + ":80", + }, + ], + }, + { + name: "cluster_jupyter", + connect_timeout_ms: 3000, + type: "strict_dns", + lb_type: "round_robin", + hosts: [ + { + url: "tcp://jupyter-lb." + params.namespace + ":80", + }, + + ], + }, + { + name: "cluster_tfjobs", + connect_timeout_ms: 3000, + type: "strict_dns", + lb_type: "round_robin", + hosts: [ + { + url: "tcp://tf-job-dashboard." + params.namespace + ":80", + }, + + ], + }, + // Istio's gateway + { + name: "cluster_istiogateway", + connect_timeout_ms: 3000, + type: "strict_dns", + lb_type: "round_robin", + hosts: [ + { + url: "tcp://istio-ingressgateway.istio-system:80", + }, + ], + }, + { + name: "cluster_ambassador", + connect_timeout_ms: 3000, + type: "strict_dns", + lb_type: "round_robin", + hosts: [ + { + url: "tcp://ambassador." + params.namespace + ":80", + }, + + ], + }, + ], + }, + statsd_udp_ip_address: "127.0.0.1:" + params.envoyStatsPort, + stats_flush_interval_ms: 1000, + }, // envoyConfig + + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "envoy-config", + namespace: namespace, + }, + data: { + "setup_backend.sh": importstr "setup_backend.sh", + "update_backend.sh": importstr "update_backend.sh", + } + if params.useIstio then { + "jwt-policy-template.yaml": importstr "jwt-policy-template.yaml", + "healthcheck_route.yaml": importstr "healthcheck_route.yaml", + } else { + "envoy-config.json": std.manifestJson(envoyConfig(params)), + "configure_envoy_for_iap.sh": importstr "configure_envoy_for_iap.sh", + }, + }, + configMap:: configMap, + + local whoamiService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "whoami", + }, + name: "whoami-app", + namespace: params.namespace, + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8081, + }, + ], + selector: { + app: "whoami", + }, + type: "ClusterIP", + }, + }, // whoamiService + whoamiService:: whoamiService, + + local whoamiApp = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "whoami-app", + namespace: params.namespace, + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "whoami", + }, + }, + spec: { + containers: [ + { + env: [ + { + name: "PORT", + value: "8081", + }, + ], + image: params.espSampleAppImage, + name: "app", + ports: [ + { + containerPort: 8081, + }, + ], + readinessProbe: { + failureThreshold: 2, + httpGet: { + path: "/healthz", + port: 8081, + scheme: "HTTP", + }, + periodSeconds: 10, + successThreshold: 1, + timeoutSeconds: 5, + }, + }, + ], + }, + }, + }, + }, + whoamiApp:: whoamiApp, + + local backendConfig = { + apiVersion: "cloud.google.com/v1beta1", + kind: "BackendConfig", + metadata: { + name: "iap-backendconfig", + namespace: namespace, + }, + spec: { + iap: { + enabled: true, + oauthclientCredentials: { + secretName: params.oauthSecretName, + }, + }, + }, + }, // backendConfig + backendConfig:: backendConfig, + + // TODO(danisla): Remove afer https://github.com/kubernetes/ingress-gce/pull/388 is resolved per #1327. + local ingressBootstrapConfigMap = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "ingress-bootstrap-config", + namespace: namespace, + }, + data: { + "ingress_bootstrap.sh": importstr "ingress_bootstrap.sh", + }, + }, + ingressBootstrapConfigMap:: ingressBootstrapConfigMap, + + local ingressBootstrapJob = { + apiVersion: "batch/v1", + kind: "Job", + metadata: { + name: "ingress-bootstrap", + namespace: namespace, + }, + spec: { + template: { + spec: { + restartPolicy: "OnFailure", + serviceAccountName: "envoy", + containers: [ + { + name: "bootstrap", + image: params.ingressSetupImage, + command: ["/var/ingress-config/ingress_bootstrap.sh"], + env: [ + { + name: "NAMESPACE", + value: namespace, + }, + { + name: "TLS_SECRET_NAME", + value: params.secretName, + }, + { + name: "TLS_HOST_NAME", + value: params.hostname, + }, + { + name: "INGRESS_NAME", + value: params.ingressName, + }, + ], + volumeMounts: [ + { + mountPath: "/var/ingress-config/", + name: "ingress-config", + }, + ], + }, + ], + volumes: [ + { + configMap: { + name: "ingress-bootstrap-config", + // TODO(danisla): replace with std.parseOctal("0755") after upgrading to ksonnet 0.12 + defaultMode: 493, + }, + name: "ingress-config", + }, + ], + }, + }, + }, + }, // ingressBootstrapJob + ingressBootstrapJob:: ingressBootstrapJob, + + local ingress = { + apiVersion: "extensions/v1beta1", + kind: "Ingress", + metadata: { + name: params.ingressName, + namespace: namespace, + annotations: { + "kubernetes.io/tls-acme": "true", + "ingress.kubernetes.io/ssl-redirect": "true", + "kubernetes.io/ingress.global-static-ip-name": params.ipName, + "certmanager.k8s.io/issuer": params.issuer, + }, + }, + spec: { + rules: [ + { + [if params.hostname != "null" then "host"]: params.hostname, + http: { + paths: [ + { + backend: { + // Due to https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/examples/health_checks/README.md#limitations + // Keep port the servicePort the same as the port we are targeting on the backend so that servicePort will be the same as targetPort for the purpose of + // health checking. + serviceName: if params.useIstio then "istio-ingressgateway" else "envoy", + servicePort: if params.useIstio then 80 else params.envoyPort, + }, + path: "/*", + }, + ], + }, + }, + ], + }, + }, // iapIngress + ingress:: ingress, + + local certificate = { + apiVersion: "certmanager.k8s.io/v1alpha1", + kind: "Certificate", + metadata: { + name: params.secretName, + namespace: namespace, + }, + + spec: { + secretName: params.secretName, + issuerRef: { + name: params.issuer, + kind: "ClusterIssuer", + }, + commonName: params.hostname, + dnsNames: [ + params.hostname, + ], + acme: { + config: [ + { + http01: { + ingress: params.ingressName, + }, + domains: [ + params.hostname, + ], + }, + ], + }, + }, + }, // certificate + certificate:: certificate, + + local cloudEndpoint = { + local makeEndpointParams(str) = { + local toks = std.split(str, "."), + result:: { + name: toks[0], + project: toks[2], + }, + }.result, + local endpointParams = makeEndpointParams(params.hostname), + apiVersion: "ctl.isla.solutions/v1", + kind: "CloudEndpoint", + metadata: { + name: endpointParams.name, + namespace: namespace, + }, + spec: { + project: endpointParams.project, + targetIngress: { + name: params.ingressName, + namespace: namespace, + }, + }, + }, // cloudEndpoint + cloudEndpoint:: cloudEndpoint, + + parts:: self, + all:: [ + self.initServiceAccount, + self.initClusterRoleBinding, + self.initClusterRole, + self.iapEnabler, + self.backendUpdater, + self.configMap, + self.whoamiService, + self.whoamiApp, + self.backendConfig, + self.ingressBootstrapConfigMap, + self.ingressBootstrapJob, + self.ingress, + ] + ( + if params.privateGKECluster == "false" then [ + self.certificate, + ] else [] + ) + ( + if isCloudEndpoint(params.hostname) then [ + self.cloudEndpoint, + ] else [] + ) + ( + if !params.useIstio then [ + self.service, + self.deploy, + ] else [] + ), + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/ingress_bootstrap.sh b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/ingress_bootstrap.sh new file mode 100644 index 00000000..46a98359 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/ingress_bootstrap.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +set -x +set -e + +# This is a workaround until this is resolved: https://github.com/kubernetes/ingress-gce/pull/388 +# The long-term solution is to use a managed SSL certificate on GKE once the feature is GA. + +# The ingress is initially created without a tls spec. +# Wait until cert-manager generates the certificate using the http-01 challenge on the GCLB ingress. +# After the certificate is obtained, patch the ingress with the tls spec to enable SSL on the GCLB. + +# Wait for certificate. +until kubectl -n ${NAMESPACE} get secret ${TLS_SECRET_NAME} 2>/dev/null; do + echo "Waiting for certificate..." + sleep 2 +done + +kubectl -n ${NAMESPACE} patch ingress ${INGRESS_NAME} --type='json' -p '[{"op": "add", "path": "/spec/tls", "value": [{"secretName": "'${TLS_SECRET_NAME}'", "hosts":["'${TLS_HOST_NAME}'"]}]}]' + +echo "Done" diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/jwt-policy-template.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/jwt-policy-template.yaml new file mode 100644 index 00000000..44e81ec9 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/jwt-policy-template.yaml @@ -0,0 +1,23 @@ +apiVersion: authentication.istio.io/v1alpha1 +kind: Policy +metadata: + name: ingress-jwt + namespace: istio-system +spec: + targets: + - name: istio-ingressgateway + ports: + - number: 80 + origins: + - jwt: + issuer: https://cloud.google.com/iap + jwksUri: https://www.gstatic.com/iap/verify/public_key-jwk + audiences: + - {{JWT_AUDIENCE}} + jwtHeaders: + - x-goog-iap-jwt-assertion + trigger_rules: + - excluded_paths: + - exact: /healthz + - prefix: /.well-known/acme-challenge + principalBinding: USE_ORIGIN diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/metric-collector.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/metric-collector.libsonnet new file mode 100644 index 00000000..87fcc2ec --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/metric-collector.libsonnet @@ -0,0 +1,189 @@ +{ + local k = import "k.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local metricServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + namespace: params.namespace, + }, + }, + metricServiceAccount:: metricServiceAccount, + + local metricRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "services", + "events", + ], + verbs: [ + "*", + ], + }, + ], + }, + metricRole:: metricRole, + + local metricRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "metric-collector", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "metric-collector", + namespace: params.namespace, + }, + ], + }, + metricRoleBinding:: metricRoleBinding, + + local service = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + service: "metric-collector", + }, + name: "metric-collector", + namespace: params.namespace, + annotations: { + "prometheus.io/scrape": "true", + "prometheus.io/path": "/", + "prometheus.io/port": "8000", + }, + }, + spec: { + ports: [ + { + name: "metric-collector", + port: 8000, + targetPort: 8000, + protocol: "TCP", + }, + ], + selector: { + app: "metric-collector", + }, + type: "ClusterIP", + }, + }, + service:: service, + + local deploy = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + namespace: params.namespace, + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: "metric-collector", + }, + }, + template: { + metadata: { + labels: { + app: "metric-collector", + }, + namespace: params.namespace, + }, + spec: { + containers: [ + { + env: [ + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/admin-gcp-sa.json", + }, + { + name: "CLIENT_ID", + valueFrom: { + secretKeyRef: { + name: params.oauthSecretName, + key: "client_id", + }, + }, + }, + ], + command: [ + "python3", + "/opt/kubeflow-readiness.py", + ], + args: [ + "--url=" + params.targetUrl, + "--client_id=$(CLIENT_ID)", + ], + volumeMounts: [ + { + name: "sa-key", + readOnly: true, + mountPath: "/var/run/secrets/sa", + }, + ], + image: params.metricImage, + name: "exporter", + }, + ], + serviceAccountName: "metric-collector", + restartPolicy: "Always", + volumes: [ + { + name: "sa-key", + secret: { + secretName: "admin-gcp-sa", + }, + }, + ], + }, + }, + }, + }, // deploy + deploy:: deploy, + + parts:: self, + all:: [ + self.metricServiceAccount, + self.metricRole, + self.metricRoleBinding, + self.service, + self.deploy, + ], + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/parts.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/parts.yaml new file mode 100644 index 00000000..e9f36029 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/parts.yaml @@ -0,0 +1,35 @@ +{ + "name": "gcp", + "apiVersion": "0.0.1", + "kind": "ksonnet.io/parts", + "description": "Core components of Kubeflow.\n", + "author": "kubeflow team ", + "contributors": [ + { + "name": "Jeremy Lewi", + "email": "jlewi@google.com" + } + ], + "repository": { + "type": "git", + "url": "https://github.com/kubeflow/kubeflow" + }, + "bugs": { + "url": "https://github.com/kubeflow/kubeflow/issues" + }, + "keywords": [ + "kubeflow", + "tensorflow" + ], + "quickStart": { + "prototype": "io.ksonnet.pkg.kubeflow", + "componentName": "gcp", + "flags": { + "name": "gcp", + "namespace": "default", + "disks": "" + }, + "comment": "GCP specific Kubeflow components." + }, + "license": "Apache 2.0" +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prometheus.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prometheus.libsonnet new file mode 100644 index 00000000..c5d1c86a --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prometheus.libsonnet @@ -0,0 +1,221 @@ +{ + local k = import "k.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local namespace = { + apiVersion: "v1", + kind: "Namespace", + metadata: { + name: "stackdriver", + }, + }, + namespace:: namespace, + + local clusterRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + name: "prometheus", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "nodes", + "nodes/proxy", + "services", + "endpoints", + "pods", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [ + "extensions", + ], + resources: [ + "ingresses", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + nonResourceURLs: [ + "/metrics", + ], + verbs: [ + "get", + ], + }, + ], + }, + clusterRole:: clusterRole, + + local serviceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "prometheus", + namespace: "stackdriver", + }, + }, + serviceAccount:: serviceAccount, + + local clusterRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + name: "prometheus-stackdriver", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "prometheus", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "prometheus", + namespace: "stackdriver", + }, + ], + }, + clusterRoleBinding:: clusterRoleBinding, + + local service = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + name: "prometheus", + }, + name: "prometheus", + namespace: "stackdriver", + }, + spec: { + ports: [ + { + name: "prometheus", + port: 9090, + protocol: "TCP", + }, + ], + selector: { + app: "prometheus", + }, + type: "ClusterIP", + }, + }, + service:: service, + + local configMap = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "prometheus", + namespace: "stackdriver", + }, + data: { + "prometheus.yml": (importstr "prometheus.yml") % { + "project-id-placeholder": params.projectId, + "cluster-name-placeholder": params.clusterName, + "zone-placeholder": params.zone, + }, + }, + }, + configMap:: configMap, + + local deployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "prometheus", + namespace: "stackdriver", + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: "prometheus", + }, + }, + template: { + metadata: { + annotations: { + "prometheus.io/scrape": "true", + }, + labels: { + app: "prometheus", + }, + name: "prometheus", + namespace: "stackdriver", + }, + spec: { + containers: [ + { + image: "gcr.io/stackdriver-prometheus/stackdriver-prometheus:release-0.4.2", + imagePullPolicy: "Always", + name: "prometheus", + ports: [ + { + containerPort: 9090, + name: "web", + }, + ], + resources: { + limits: { + cpu: "400m", + memory: "1000Mi", + }, + requests: { + cpu: "20m", + memory: "50Mi", + }, + }, + volumeMounts: [ + { + mountPath: "/etc/prometheus", + name: "config-volume", + }, + ], + }, + ], + serviceAccountName: "prometheus", + volumes: [ + { + configMap: { + name: "prometheus", + }, + name: "config-volume", + }, + ], + }, + }, + }, + }, + deployment:: deployment, + + parts:: self, + all:: [ + self.namespace, + self.clusterRole, + self.serviceAccount, + self.clusterRoleBinding, + self.service, + self.configMap, + self.deployment, + ], + + list(obj=self.all):: k.core.v1.list.new(obj,), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prometheus.yml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prometheus.yml new file mode 100644 index 00000000..15f7530c --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prometheus.yml @@ -0,0 +1,140 @@ +# Source: https://github.com/stackdriver/prometheus/blob/master/documentation/examples/prometheus.yml +global: + external_labels: + _stackdriver_project_id: %(project-id-placeholder)s + _kubernetes_cluster_name: %(cluster-name-placeholder)s + _kubernetes_location: %(zone-placeholder)s + +# Scrape config for nodes (kubelet). +# +# Rather than connecting directly to the node, the scrape is proxied though the +# Kubernetes apiserver. This means it will work if Prometheus is running out of +# cluster, or can't connect to nodes for some other reason (e.g. because of +# firewalling). +scrape_configs: +- job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics + +# Example scrape config for pods +# +# The relabeling allows the actual pod scrape endpoint to be configured via the +# following annotations: +# +# * `prometheus.io/scrape`: Only scrape pods that have a value of `true` +# * `prometheus.io/path`: If the metrics path is not `/metrics` override this. +# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the +# pod's declared ports (default is a port-free target if none are declared). +- job_name: 'kubernetes-pods-containers' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + +# Scrape config for service endpoints. +# +# The relabeling allows the actual service scrape endpoint to be configured +# via the following annotations: +# +# * `prometheus.io/scrape`: Only scrape services that have a value of `true` +# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need +# to set this to `https` & most likely set the `tls_config` of the scrape config. +# * `prometheus.io/path`: If the metrics path is not `/metrics` override this. +# * `prometheus.io/port`: If the metrics are exposed on a different port to the +# service then set this appropriately. +- job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + + +# Scrape config for k8s services +- job_name: 'kubernetes-services' + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + - source_labels: [__address__,__meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+)(?::\d+);(\d+) + replacement: $1:$2 + +remote_write: +- url: "https://monitoring.googleapis.com:443/" + queue_config: + # Capacity should be 2*max_samples_per_send. + capacity: 2000 + max_samples_per_send: 1000 + max_shards: 10000 + write_relabel_configs: + # These labels are generally redundant with the Stackdriver monitored resource labels. + - source_labels: [job] + target_label: job + replacement: "" + - source_labels: [instance] + target_label: instance + replacement: "" diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/basic-auth-ingress.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/basic-auth-ingress.jsonnet new file mode 100644 index 00000000..3ad2e5e6 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/basic-auth-ingress.jsonnet @@ -0,0 +1,15 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.basic-auth-ingress +// @description Provides ingress prototypes for setting up basic auth on GKE. +// @shortDescription Ingress for IAP on GKE. +// @param name string Name for the component +// @param ipName string The name of the global ip address to use. +// @optionalParam secretName string envoy-ingress-tls The name of the secret containing the SSL certificates. +// @optionalParam hostname string null The hostname associated with this ingress. Eg: mykubeflow.example.com +// @optionalParam issuer string letsencrypt-prod The cert-manager issuer name. +// @optionalParam ingressSetupImage string gcr.io/kubeflow-images-public/ingress-setup:latest The image for setting up ingress. +// @optionalParam privateGKECluster string false Is the k8s cluster a private GKE cluster + +local basicauth = import "kubeflow/gcp/basic-auth-ingress.libsonnet"; +local instance = basicauth.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/cert-manager.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/cert-manager.jsonnet new file mode 100644 index 00000000..72a12cbe --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/cert-manager.jsonnet @@ -0,0 +1,12 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.cert-manager +// @description Provides cert-manager prototypes for generating SSL certificates. +// @shortDescription Certificate generation on GKE. +// @param name string Name for the component +// @param acmeEmail string The Lets Encrypt account email address +// @optionalParam acmeUrl string https://acme-v02.api.letsencrypt.org/directory The ACME server URL, set to https://acme-staging-v02.api.letsencrypt.org/directory for staging API. +// @optionalParam certManagerImage string quay.io/jetstack/cert-manager-controller:v0.4.0 certManagerImage + +local certManager = import "kubeflow/gcp/cert-manager.libsonnet"; +local instance = certManager.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/cloud-endpoints.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/cloud-endpoints.jsonnet new file mode 100644 index 00000000..b5d5e950 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/cloud-endpoints.jsonnet @@ -0,0 +1,11 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.cloud-endpoints +// @description Provides cloud-endpoints prototypes for creating Cloud Endpoints services and DNS records. +// @shortDescription Cloud Endpoint domain creation. +// @param name string Name for the component +// @optionalParam secretName string admin-gcp-sa Name of secret containing the json service account key. +// @optionalParam secretKey string admin-gcp-sa.json Name of the key in the secret containing the JSON service account key. + +local cloudEndpoints = import "kubeflow/gcp/cloud-endpoints.libsonnet"; +local instance = cloudEndpoints.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/google-cloud-filestore-pv.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/google-cloud-filestore-pv.jsonnet new file mode 100644 index 00000000..0b41c765 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/google-cloud-filestore-pv.jsonnet @@ -0,0 +1,13 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.google-cloud-filestore-pv +// @description Creates PV and PVC based on Google Cloud Filestore NFS +// @shortDescription Creates PV and PVC based on Google Cloud Filestore NFS +// @param name string Name for the component +// @optionalParam storageCapacity string 1T Storage Capacity +// @optionalParam path string /kubeflow Path in NFS server +// @param serverIP string Google Cloud Filestore Server IP +// @optionalParam image string gcr.io/kubeflow-images-public/ubuntu:18.04 The docker image to use + +local google_cloud_file_store_pv = import "kubeflow/gcp/google-cloud-filestore-pv.libsonnet"; +local instance = google_cloud_file_store_pv.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/gpu-driver.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/gpu-driver.jsonnet new file mode 100644 index 00000000..5d9caa4c --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/gpu-driver.jsonnet @@ -0,0 +1,9 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.gpu-driver +// @description Provides gpu-driver prototype in kubeflow gcp package +// @shortDescription Gpu Driver. +// @param name string Name for the component + +local gpuDriver = import "kubeflow/gcp/gpu-driver.libsonnet"; +local instance = gpuDriver.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/iap-ingress.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/iap-ingress.jsonnet new file mode 100644 index 00000000..c025b06d --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/iap-ingress.jsonnet @@ -0,0 +1,21 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.iap-ingress +// @description Provides ingress prototypes for setting up IAP on GKE. +// @shortDescription Ingress for IAP on GKE. +// @param name string Name for the component +// @param ipName string The name of the global ip address to use. +// @optionalParam secretName string envoy-ingress-tls The name of the secret containing the SSL certificates. +// @optionalParam hostname string null The hostname associated with this ingress. Eg: mykubeflow.example.com +// @optionalParam issuer string letsencrypt-prod The cert-manager issuer name. +// @optionalParam envoyImage string gcr.io/kubeflow-images-public/envoy:v20180309-0fb4886b463698702b6a08955045731903a18738 The image for envoy. +// @optionalParam ingressSetupImage string gcr.io/kubeflow-images-public/ingress-setup:latest The image for setting up ingress. +// @optionalParam disableJwtChecking string false Disable JWT checking. +// @optionalParam oauthSecretName string kubeflow-oauth The name of the secret containing the OAuth client_id and client_secret. +// @optionalParam privateGKECluster string false Is the k8s cluster a private GKE cluster +// @optionalParam useIstio string false The namespace where Istio is installed +// @optionalParam istioNamespace string istio-system The namespace where Istio is installed +// @optionalParam espSampleAppImage string gcr.io/cloud-solutions-group/esp-sample-app:1.0.0 The sample app used with IAP + +local iap = import "kubeflow/gcp/iap.libsonnet"; +local instance = iap.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/metric-collector.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/metric-collector.jsonnet new file mode 100644 index 00000000..52479e6c --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/metric-collector.jsonnet @@ -0,0 +1,13 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.metric-collector +// @description Provides metric-collector prototypes for monitoring kubeflow availability on GCP. +// @shortDescription Service monitor for kubeflow on GCP. +// @param name string Name for the component +// @param targetUrl string Https url of kubeflow service on GCP; target of monitoring. +// @optionalParam namespace string null Namespace to use for the components. It is automatically inherited from the environment if not set. +// @optionalParam metricImage string gcr.io/kubeflow-images-public/metric-collector:latest Image for running metric exporter of kubeflow availability. +// @optionalParam oauthSecretName string kubeflow-oauth The name of the secret containing the OAuth client_id and client_secret. + +local metricCollector = import "kubeflow/gcp/metric-collector.libsonnet"; +local instance = metricCollector.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/prometheus.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/prometheus.jsonnet new file mode 100644 index 00000000..54fb37fb --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/prometheus.jsonnet @@ -0,0 +1,12 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.prometheus +// @description Provides prometheus prototype in kubeflow gcp package +// @shortDescription Prometheus Service. +// @param name string Name for the component +// @param projectId string GCP project id. +// @param clusterName string GKE cluster name. +// @param zone string GKE cluster zone. + +local prometheus = import "kubeflow/gcp/prometheus.libsonnet"; +local instance = prometheus.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/webhook.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/webhook.jsonnet new file mode 100644 index 00000000..601715fe --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/prototypes/webhook.jsonnet @@ -0,0 +1,11 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.gcp-credentials-admission-webhook +// @description This prototype creates a admission controller which injects credentials into pods +// @shortDescription This prototype creates a admission controller which injects credentials into pods +// @param name string Name to give to each of the components +// @optionalParam image string gcr.io/kubeflow-images-public/gcp-admission-webhook:v20190401-v0.4.0-rc.1-309-g4014fa2e-dirty-be6212 The image for the webhook. +// @optionalParam webhookSetupImage string gcr.io/kubeflow-images-public/ingress-setup:latest The image for setting up ingress. + +local webhook = import "kubeflow/gcp/webhook.libsonnet"; +local instance = webhook.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/setup_backend.sh b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/setup_backend.sh new file mode 100644 index 00000000..6987f891 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/setup_backend.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +# +# A simple shell script to configure the backend timeouts and health checks by using gcloud. +[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1 +[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1 +[ -z ${INGRESS_NAME} ] && echo Error INGRESS_NAME must be set && exit 1 + +PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id) +if [ -z ${PROJECT} ]; then + echo Error unable to fetch PROJECT from compute metadata + exit 1 +fi + +PROJECT_NUM=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/numeric-project-id) +if [ -z ${PROJECT_NUM} ]; then + echo Error unable to fetch PROJECT_NUM from compute metadata + exit 1 +fi + +# Activate the service account +gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} +# Print out the config for debugging +gcloud config list + +NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}') +echo "node port is ${NODE_PORT}" + +while [[ -z ${BACKEND_NAME} ]]; do + BACKENDS=$(kubectl --namespace=${NAMESPACE} get ingress ${INGRESS_NAME} -o jsonpath='{.metadata.annotations.ingress\.kubernetes\.io/backends}') + echo "fetching backends info with ${INGRESS_NAME}: ${BACKENDS}" + BACKEND_NAME=$(echo $BACKENDS | grep -o "k8s-be-${NODE_PORT}--[0-9a-z]\+") + echo "backend name is ${BACKEND_NAME}" + sleep 2 +done + +while [[ -z ${BACKEND_ID} ]]; do + BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~${BACKEND_NAME} --format='value(id)') + echo "Waiting for backend id PROJECT=${PROJECT} NAMESPACE=${NAMESPACE} SERVICE=${SERVICE} filter=name~${BACKEND_NAME}" + sleep 2 +done +echo BACKEND_ID=${BACKEND_ID} + +JWT_AUDIENCE="/projects/${PROJECT_NUM}/global/backendServices/${BACKEND_ID}" + +# For healthcheck compare. +mkdir -p /var/shared +echo "JWT_AUDIENCE=${JWT_AUDIENCE}" > /var/shared/healthz.env +echo "NODE_PORT=${NODE_PORT}" >> /var/shared/healthz.env +echo "BACKEND_ID=${BACKEND_ID}" >> /var/shared/healthz.env + +if [[ -z ${USE_ISTIO} ]]; then + # TODO(https://github.com/kubeflow/kubeflow/issues/942): We should publish the modified envoy + # config as a config map and use that in the envoy sidecars. + kubectl get configmap -n ${NAMESPACE} envoy-config -o jsonpath='{.data.envoy-config\.json}' | + sed -e "s|{{JWT_AUDIENCE}}|${JWT_AUDIENCE}|g" > /var/shared/envoy-config.json +else + # Apply the jwt validation policy + cat /var/envoy-config/jwt-policy-template.yaml | sed -e "s|{{JWT_AUDIENCE}}|${JWT_AUDIENCE}|g" > /var/shared/jwt-policy.yaml + kubectl apply -f /var/shared/jwt-policy.yaml +fi + +echo "Clearing lock on service annotation" +kubectl patch svc "${SERVICE}" -p "{\"metadata\": { \"annotations\": {\"backendlock\": \"\" }}}" + +checkBackend() { + # created by init container. + . /var/shared/healthz.env + + # If node port or backend id change, so does the JWT audience. + CURR_NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}') + read -ra toks <<<"$(gcloud compute --project=${PROJECT} backend-services list --filter=name~k8s-be-${CURR_NODE_PORT}- --format='value(id,timeoutSec)')" + CURR_BACKEND_ID="${toks[0]}" + CURR_BACKEND_TIMEOUT="${toks[1]}" + [[ "$BACKEND_ID" == "$CURR_BACKEND_ID" && "${CURR_BACKEND_TIMEOUT}" -eq 3600 ]] +} + +# Verify configuration every 10 seconds. +while true; do + if ! checkBackend; then + echo "$(date) WARN: Backend check failed, restarting container." + exit 1 + fi + sleep 10 +done diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/cert-manager_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/cert-manager_test.jsonnet new file mode 100644 index 00000000..fe0b4b67 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/cert-manager_test.jsonnet @@ -0,0 +1,219 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local certManager = import "kubeflow/gcp/cert-manager.libsonnet"; + +local params = { + name: "cert-manager", + acmeEmail: "joe@acme.com", + acmeUrl: "https://acme-v02.api.letsencrypt.org/directory", + certManagerImage: "quay.io/jetstack/cert-manager-controller:v0.4.0", +}; +local env = { + namespace: "kf-001", +}; + +local instance = certManager.new(env, params); + +local testCases = [ + { + actual: instance.parts.certificateCRD, + expected: { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "certificates.certmanager.k8s.io", + }, + spec: { + group: "certmanager.k8s.io", + names: { + kind: "Certificate", + plural: "certificates", + }, + scope: "Namespaced", + version: "v1alpha1", + }, + }, + }, + { + actual: instance.parts.clusterIssuerCRD, + expected: { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "clusterissuers.certmanager.k8s.io", + }, + spec: { + group: "certmanager.k8s.io", + names: { + kind: "ClusterIssuer", + plural: "clusterissuers", + }, + scope: "Cluster", + version: "v1alpha1", + }, + }, + }, + { + actual: instance.parts.issuerCRD, + expected: { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "issuers.certmanager.k8s.io", + }, + spec: { + group: "certmanager.k8s.io", + names: { + kind: "Issuer", + plural: "issuers", + }, + scope: "Namespaced", + version: "v1alpha1", + }, + }, + }, + { + actual: instance.parts.serviceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "cert-manager", + namespace: "kf-001", + }, + }, + }, + { + actual: instance.parts.clusterRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + name: "cert-manager", + }, + rules: [ + { + apiGroups: [ + "certmanager.k8s.io", + ], + resources: [ + "certificates", + "issuers", + "clusterissuers", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "secrets", + "events", + "endpoints", + "services", + "pods", + "configmaps", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "extensions", + ], + resources: [ + "ingresses", + ], + verbs: [ + "*", + ], + }, + ], + }, + }, + { + actual: instance.parts.clusterRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + name: "cert-manager", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "cert-manager", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "cert-manager", + namespace: "kf-001", + }, + ], + }, + }, + { + actual: instance.parts.deploy, + expected: { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "cert-manager", + }, + name: "cert-manager", + namespace: "kf-001", + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "cert-manager", + }, + }, + spec: { + containers: [ + { + args: [ + "--cluster-resource-namespace=kf-001", + "--leader-election-namespace=kf-001", + ], + image: "quay.io/jetstack/cert-manager-controller:v0.4.0", + imagePullPolicy: "IfNotPresent", + name: "cert-manager", + }, + ], + serviceAccountName: "cert-manager", + }, + }, + }, + }, + }, + { + actual: instance.parts.issuerLEProd, + expected: { + apiVersion: "certmanager.k8s.io/v1alpha1", + kind: "ClusterIssuer", + metadata: { + name: "letsencrypt-prod", + }, + spec: { + acme: { + email: "joe@acme.com", + http01: {}, + privateKeySecretRef: { + name: "letsencrypt-prod-secret", + }, + server: "https://acme-v02.api.letsencrypt.org/directory", + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/cloud-endpoints_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/cloud-endpoints_test.jsonnet new file mode 100644 index 00000000..4162ae65 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/cloud-endpoints_test.jsonnet @@ -0,0 +1,232 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local cloudEndpoints = import "kubeflow/gcp/cloud-endpoints.libsonnet"; + +local params = { + name: "cloud-endpoints", + secretName: "admin-gcp-sa", + secretKey: "admin-gcp-sa.json", +}; +local env = { + namespace: "kf-001", +}; + +local instance = cloudEndpoints.new(env, params); + +local testCases = [ + { + actual: instance.parts.endpointsCRD, + expected: { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "cloudendpoints.ctl.isla.solutions", + }, + spec: { + group: "ctl.isla.solutions", + names: { + kind: "CloudEndpoint", + plural: "cloudendpoints", + shortNames: [ + "cloudep", + "ce", + ], + singular: "cloudendpoint", + }, + scope: "Namespaced", + version: "v1", + }, + }, + }, + { + actual: instance.parts.endpointsClusterRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + name: "cloud-endpoints-controller", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "services", + "configmaps", + ], + verbs: [ + "get", + "list", + ], + }, + { + apiGroups: [ + "extensions", + ], + resources: [ + "ingresses", + ], + verbs: [ + "get", + "list", + ], + }, + ], + }, + }, + { + actual: instance.parts.endpointsClusterRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + name: "cloud-endpoints-controller", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "cloud-endpoints-controller", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "cloud-endpoints-controller", + namespace: "kf-001", + }, + ], + }, + }, + { + actual: instance.parts.endpointsService, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + name: "cloud-endpoints-controller", + namespace: "kf-001", + }, + spec: { + ports: [ + { + name: "http", + port: 80, + }, + ], + selector: { + app: "cloud-endpoints-controller", + }, + type: "ClusterIP", + }, + }, + }, + { + actual: instance.parts.endpointsServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "cloud-endpoints-controller", + namespace: "kf-001", + }, + }, + }, + { + actual: instance.parts.endpointsDeploy, + expected: { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "cloud-endpoints-controller", + namespace: "kf-001", + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "cloud-endpoints-controller", + }, + }, + spec: { + containers: [ + { + env: [ + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/admin-gcp-sa.json", + }, + ], + image: "gcr.io/cloud-solutions-group/cloud-endpoints-controller:0.2.1", + imagePullPolicy: "Always", + name: "cloud-endpoints-controller", + readinessProbe: { + failureThreshold: 2, + httpGet: { + path: "/healthz", + port: 80, + scheme: "HTTP", + }, + periodSeconds: 5, + successThreshold: 1, + timeoutSeconds: 5, + }, + volumeMounts: [ + { + mountPath: "/var/run/secrets/sa", + name: "sa-key", + readOnly: true, + }, + ], + }, + ], + serviceAccountName: "cloud-endpoints-controller", + terminationGracePeriodSeconds: 5, + volumes: [ + { + name: "sa-key", + secret: { + secretName: "admin-gcp-sa", + }, + }, + ], + }, + }, + }, + }, + }, + { + actual: instance.parts.endpointsCompositeController, + expected: { + apiVersion: "metacontroller.k8s.io/v1alpha1", + kind: "CompositeController", + metadata: { + name: "cloud-endpoints-controller", + }, + spec: { + childResources: [], + clientConfig: { + service: { + caBundle: "...", + name: "cloud-endpoints-controller", + namespace: "kf-001", + }, + }, + generateSelector: true, + resyncPeriodSeconds: 2, + hooks: { + sync: { + webhook: { + url: "http://cloud-endpoints-controller.kf-001/sync", + }, + }, + }, + parentResource: { + apiVersion: "ctl.isla.solutions/v1", + resource: "cloudendpoints", + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/google-cloud-filestore-pv_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/google-cloud-filestore-pv_test.jsonnet new file mode 100644 index 00000000..405c2212 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/google-cloud-filestore-pv_test.jsonnet @@ -0,0 +1,110 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local googleCloudFilestorePv = import "kubeflow/gcp/google-cloud-filestore-pv.libsonnet"; + +local params = { + name: "google-cloud-filestore-pv", + storageCapacity: "1T", + path: "/kubeflow", + serverIP: "10.10.10.10", + image: "gcr.io/kubeflow-images-public/ubuntu:18.04", +}; +local env = { + namespace: "kf-001", +}; + +local instance = googleCloudFilestorePv.new(env, params); + +local testCases = [ + { + actual: instance.parts.persistentVolume, + expected: { + apiVersion: "v1", + kind: "PersistentVolume", + metadata: { + name: "google-cloud-filestore-pv", + namespace: "kf-001", + }, + spec: { + accessModes: [ + "ReadWriteMany", + ], + capacity: { + storage: "1T", + }, + nfs: { + path: "/kubeflow", + server: "10.10.10.10", + }, + }, + }, + }, + { + actual: instance.parts.persistentVolumeClaim, + expected: { + apiVersion: "v1", + kind: "PersistentVolumeClaim", + metadata: { + name: "google-cloud-filestore-pv", + namespace: "kf-001", + }, + spec: { + accessModes: [ + "ReadWriteMany", + ], + resources: { + requests: { + storage: "1T", + }, + }, + storageClassName: "nfs-storage", + volumeName: "google-cloud-filestore-pv", + }, + }, + }, + { + actual: instance.parts.gcfsPersmissions, + expected: { + apiVersion: "batch/v1", + kind: "Job", + metadata: { + name: "set-gcfs-permissions", + namespace: "kf-001", + }, + spec: { + template: { + spec: { + containers: [ + { + command: [ + "chmod", + "777", + "/kubeflow-gcfs", + ], + image: "gcr.io/kubeflow-images-public/ubuntu:18.04", + name: "set-gcfs-permissions", + volumeMounts: [ + { + mountPath: "/kubeflow-gcfs", + name: "google-cloud-filestore-pv", + }, + ], + }, + ], + restartPolicy: "OnFailure", + volumes: [ + { + name: "google-cloud-filestore-pv", + persistentVolumeClaim: { + claimName: "google-cloud-filestore-pv", + readOnly: false, + }, + }, + ], + }, + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/iap_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/iap_test.jsonnet new file mode 100644 index 00000000..67ffad8e --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/iap_test.jsonnet @@ -0,0 +1,275 @@ +local iap = import "../iap.libsonnet"; +local testSuite = import "kubeflow/common/testsuite.libsonnet"; + +local testCases = [ + { + actual: iap.new( + { namespace: "namespace" }, + { + envoyPort: 8080, + useIstio: "false", + espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0", + } + ).service, + expected: { + + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + service: "envoy", + }, + annotations: { + "beta.cloud.google.com/backend-config": '{"ports": {"envoy":"iap-backendconfig"}}', + }, + name: "envoy", + namespace: "namespace", + }, + spec: { + ports: [ + { + name: "envoy", + port: 8080, + targetPort: 8080, + }, + ], + selector: { + service: "envoy", + }, + type: "NodePort", + }, + }, + }, + { + actual: iap.new( + { namespace: "namespace" }, + { + envoyPort: 8080, + ipName: "ipName", + hostname: "hostname", + issuer: "issuer", + useIstio: "false", + espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0", + } + ).ingress, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Ingress", + metadata: { + name: "envoy-ingress", + namespace: "namespace", + annotations: { + "kubernetes.io/tls-acme": "true", + "ingress.kubernetes.io/ssl-redirect": "true", + "kubernetes.io/ingress.global-static-ip-name": "ipName", + "certmanager.k8s.io/issuer": "issuer", + }, + }, + spec: { + rules: [ + { + host: "hostname", + http: { + paths: [ + { + backend: { + serviceName: "envoy", + servicePort: 8080, + }, + path: "/*", + }, + ], + }, + }, + ], + }, + }, + }, + { + actual: iap.new( + { + namespace: "namespace", + }, + { + envoyPort: 8080, + ipName: "ipName", + hostname: "null", + issuer: "issuer", + useIstio: "false", + espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0", + } + ).ingress, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Ingress", + metadata: { + name: "envoy-ingress", + namespace: "namespace", + annotations: { + "kubernetes.io/tls-acme": "true", + "ingress.kubernetes.io/ssl-redirect": "true", + "kubernetes.io/ingress.global-static-ip-name": "ipName", + "certmanager.k8s.io/issuer": "issuer", + }, + }, + spec: { + rules: [ + { + http: { + paths: [ + { + backend: { + serviceName: "envoy", + servicePort: 8080, + }, + path: "/*", + }, + ], + }, + }, + ], + }, + }, + }, + { + actual: iap.new( + { + namespace: "namespace", + }, + { + secretName: "secretName", + hostname: "hostname", + issuer: "issuer", + privateGKECluster: "false", + useIstio: "false", + espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0", + } + ).certificate, + expected: { + apiVersion: "certmanager.k8s.io/v1alpha1", + kind: "Certificate", + metadata: { + name: "secretName", + namespace: "namespace", + }, + spec: { + secretName: "secretName", + issuerRef: { + name: "issuer", + kind: "ClusterIssuer", + }, + commonName: "hostname", + dnsNames: [ + "hostname", + ], + acme: { + config: [ + { + http01: { + ingress: "envoy-ingress", + }, + domains: [ + "hostname", + ], + }, + ], + }, + }, + }, + }, + { + actual: iap.new( + { + namespace: "namespace", + }, + { + useIstio: "false", + espSampleAppImage: "cloud-solutions-group/esp-sample-app:5.0.0", + } + ).whoamiApp, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "whoami-app", + namespace: "namespace", + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "whoami", + }, + }, + spec: { + containers: [ + { + env: [ + { + name: "PORT", + value: "8081", + }, + ], + image: "cloud-solutions-group/esp-sample-app:5.0.0", + name: "app", + ports: [ + { + containerPort: 8081, + }, + ], + readinessProbe: { + failureThreshold: 2, + httpGet: { + path: "/healthz", + port: 8081, + scheme: "HTTP", + }, + periodSeconds: 10, + successThreshold: 1, + timeoutSeconds: 5, + }, + }, + ], + }, + }, + }, + }, + }, + { + actual: iap.new( + { + namespace: "namespace", + }, + { + useIstio: "false", + espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0", + } + ).whoamiService, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "whoami", + }, + name: "whoami-app", + namespace: "namespace", + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8081, + }, + ], + selector: { + app: "whoami", + }, + type: "ClusterIP", + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/metric-collector_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/metric-collector_test.jsonnet new file mode 100644 index 00000000..b1c623ae --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/metric-collector_test.jsonnet @@ -0,0 +1,196 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local metricCollector = import "kubeflow/gcp/metric-collector.libsonnet"; + +local params = { + name: "metric-collector", + targetUrl: "https://foo.com", + metricImage: "gcr.io/kubeflow-images-public/metric-collector:latest", + oauthSecretName: "bar", +}; +local env = { + namespace: "kf-001", +}; + +local instance = metricCollector.new(env, params); + +local testCases = [ + { + actual: instance.parts.metricServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + namespace: "kf-001", + }, + }, + }, + { + actual: instance.parts.metricRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "services", + "events", + ], + verbs: [ + "*", + ], + }, + ], + }, + }, + { + actual: instance.parts.metricRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "metric-collector", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "metric-collector", + namespace: "kf-001", + }, + ], + }, + }, + { + actual: instance.parts.service, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "prometheus.io/path": "/", + "prometheus.io/port": "8000", + "prometheus.io/scrape": "true", + }, + labels: { + service: "metric-collector", + }, + name: "metric-collector", + namespace: "kf-001", + }, + spec: { + ports: [ + { + name: "metric-collector", + port: 8000, + protocol: "TCP", + targetPort: 8000, + }, + ], + selector: { + app: "metric-collector", + }, + type: "ClusterIP", + }, + }, + }, + { + actual: instance.parts.deploy, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "metric-collector", + }, + name: "metric-collector", + namespace: "kf-001", + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: "metric-collector", + }, + }, + template: { + metadata: { + labels: { + app: "metric-collector", + }, + namespace: "kf-001", + }, + spec: { + containers: [ + { + args: [ + "--url=https://foo.com", + "--client_id=$(CLIENT_ID)", + ], + command: [ + "python3", + "/opt/kubeflow-readiness.py", + ], + env: [ + { + name: "GOOGLE_APPLICATION_CREDENTIALS", + value: "/var/run/secrets/sa/admin-gcp-sa.json", + }, + { + name: "CLIENT_ID", + valueFrom: { + secretKeyRef: { + key: "client_id", + name: "bar", + }, + }, + }, + ], + image: "gcr.io/kubeflow-images-public/metric-collector:latest", + name: "exporter", + volumeMounts: [ + { + mountPath: "/var/run/secrets/sa", + name: "sa-key", + readOnly: true, + }, + ], + }, + ], + restartPolicy: "Always", + serviceAccountName: "metric-collector", + volumes: [ + { + name: "sa-key", + secret: { + secretName: "admin-gcp-sa", + }, + }, + ], + }, + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/prometheus_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/prometheus_test.jsonnet new file mode 100644 index 00000000..044a5fee --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/tests/prometheus_test.jsonnet @@ -0,0 +1,228 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local prometheus = import "kubeflow/gcp/prometheus.libsonnet"; + +// @param name string Name for the component +// @param projectId string GCP project id. +// @param clusterName string GKE cluster name. +// @param zone string GKE cluster zone. +local params = { + name: "prometheus", + projectId: "foo-173123", + clusterName: "prometheus", + zone: "west1a-a", +}; +local env = { + namespace: "kf-001", +}; + +local instance = prometheus.new(env, params); + +local testCases = [ + { + actual: instance.parts.namespace, + expected: { + apiVersion: "v1", + kind: "Namespace", + metadata: { + name: "stackdriver", + }, + }, + }, + { + actual: instance.parts.clusterRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRole", + metadata: { + name: "prometheus", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "nodes", + "nodes/proxy", + "services", + "endpoints", + "pods", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + apiGroups: [ + "extensions", + ], + resources: [ + "ingresses", + ], + verbs: [ + "get", + "list", + "watch", + ], + }, + { + nonResourceURLs: [ + "/metrics", + ], + verbs: [ + "get", + ], + }, + ], + }, + }, + { + actual: instance.parts.serviceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "prometheus", + namespace: "stackdriver", + }, + }, + }, + { + actual: instance.parts.clusterRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "ClusterRoleBinding", + metadata: { + name: "prometheus-stackdriver", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "prometheus", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "prometheus", + namespace: "stackdriver", + }, + ], + }, + }, + { + actual: instance.parts.service, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + name: "prometheus", + }, + name: "prometheus", + namespace: "stackdriver", + }, + spec: { + ports: [ + { + name: "prometheus", + port: 9090, + protocol: "TCP", + }, + ], + selector: { + app: "prometheus", + }, + type: "ClusterIP", + }, + }, + }, + { + actual: instance.parts.configMap, + expected: { + apiVersion: "v1", + data: { + "prometheus.yml": "# Source: https://github.com/stackdriver/prometheus/blob/master/documentation/examples/prometheus.yml\nglobal:\n external_labels:\n _stackdriver_project_id: foo-173123\n _kubernetes_cluster_name: prometheus\n _kubernetes_location: west1a-a\n\n# Scrape config for nodes (kubelet).\n#\n# Rather than connecting directly to the node, the scrape is proxied though the\n# Kubernetes apiserver. This means it will work if Prometheus is running out of\n# cluster, or can't connect to nodes for some other reason (e.g. because of\n# firewalling).\nscrape_configs:\n- job_name: 'kubernetes-nodes'\n\n # Default to scraping over https. If required, just disable this or change to\n # `http`.\n scheme: https\n\n # This TLS & bearer token file config is used to connect to the actual scrape\n # endpoints for cluster components. This is separate to discovery auth\n # configuration because discovery & scraping are two separate concerns in\n # Prometheus. The discovery auth config is automatic if Prometheus runs inside\n # the cluster. Otherwise, more config options have to be provided within the\n # .\n tls_config:\n ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token\n\n kubernetes_sd_configs:\n - role: node\n\n relabel_configs:\n - target_label: __address__\n replacement: kubernetes.default.svc:443\n - source_labels: [__meta_kubernetes_node_name]\n regex: (.+)\n target_label: __metrics_path__\n replacement: /api/v1/nodes/${1}/proxy/metrics\n\n# Example scrape config for pods\n#\n# The relabeling allows the actual pod scrape endpoint to be configured via the\n# following annotations:\n#\n# * `prometheus.io/scrape`: Only scrape pods that have a value of `true`\n# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the\n# pod's declared ports (default is a port-free target if none are declared).\n- job_name: 'kubernetes-pods-containers'\n\n kubernetes_sd_configs:\n - role: pod\n\n relabel_configs:\n - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]\n action: keep\n regex: true\n - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]\n action: replace\n target_label: __metrics_path__\n regex: (.+)\n - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]\n action: replace\n regex: ([^:]+)(?::\\d+)?;(\\d+)\n replacement: $1:$2\n target_label: __address__\n\n# Scrape config for service endpoints.\n#\n# The relabeling allows the actual service scrape endpoint to be configured\n# via the following annotations:\n#\n# * `prometheus.io/scrape`: Only scrape services that have a value of `true`\n# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need\n# to set this to `https` & most likely set the `tls_config` of the scrape config.\n# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.\n# * `prometheus.io/port`: If the metrics are exposed on a different port to the\n# service then set this appropriately.\n- job_name: 'kubernetes-service-endpoints'\n\n kubernetes_sd_configs:\n - role: endpoints\n\n relabel_configs:\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]\n action: keep\n regex: true\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]\n action: replace\n target_label: __scheme__\n regex: (https?)\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]\n action: replace\n target_label: __metrics_path__\n regex: (.+)\n - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]\n action: replace\n target_label: __address__\n regex: ([^:]+)(?::\\d+)?;(\\d+)\n replacement: $1:$2\n\n\n# Scrape config for k8s services\n- job_name: 'kubernetes-services'\n\n kubernetes_sd_configs:\n - role: service\n\n relabel_configs:\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]\n action: keep\n regex: true\n - action: labelmap\n regex: __meta_kubernetes_service_label_(.+)\n - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]\n action: replace\n target_label: __metrics_path__\n - source_labels: [__address__,__meta_kubernetes_service_annotation_prometheus_io_port]\n action: replace\n target_label: __address__\n regex: (.+)(?::\\d+);(\\d+)\n replacement: $1:$2\n\nremote_write:\n- url: \"https://monitoring.googleapis.com:443/\"\n queue_config:\n # Capacity should be 2*max_samples_per_send.\n capacity: 2000\n max_samples_per_send: 1000\n max_shards: 10000\n write_relabel_configs:\n # These labels are generally redundant with the Stackdriver monitored resource labels.\n - source_labels: [job]\n target_label: job\n replacement: \"\"\n - source_labels: [instance]\n target_label: instance\n replacement: \"\"\n", + }, + kind: "ConfigMap", + metadata: { + name: "prometheus", + namespace: "stackdriver", + }, + }, + }, + { + actual: instance.parts.deployment, + expected: { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "prometheus", + namespace: "stackdriver", + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: "prometheus", + }, + }, + template: { + metadata: { + annotations: { + "prometheus.io/scrape": "true", + }, + labels: { + app: "prometheus", + }, + name: "prometheus", + namespace: "stackdriver", + }, + spec: { + containers: [ + { + image: "gcr.io/stackdriver-prometheus/stackdriver-prometheus:release-0.4.2", + imagePullPolicy: "Always", + name: "prometheus", + ports: [ + { + containerPort: 9090, + name: "web", + }, + ], + resources: { + limits: { + cpu: "400m", + memory: "1000Mi", + }, + requests: { + cpu: "20m", + memory: "50Mi", + }, + }, + volumeMounts: [ + { + mountPath: "/etc/prometheus", + name: "config-volume", + }, + ], + }, + ], + serviceAccountName: "prometheus", + volumes: [ + { + configMap: { + name: "prometheus", + }, + name: "config-volume", + }, + ], + }, + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/update_backend.sh b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/update_backend.sh new file mode 100644 index 00000000..683762d5 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/update_backend.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# +# A simple shell script to configure the backend timeouts and health checks by using gcloud. + +[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1 +[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1 + +PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id) +if [ -z ${PROJECT} ]; then + echo Error unable to fetch PROJECT from compute metadata + exit 1 +fi + +# Activate the service account, allow 5 retries +for i in {1..5}; do gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} && break || sleep 10; done + +NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}') +while [[ -z ${BACKEND_SERVICE} ]]; +do BACKEND_SERVICE=$(gcloud --project=${PROJECT} compute backend-services list --filter=name~k8s-be-${NODE_PORT}- --uri); +echo "Waiting for the backend-services resource PROJECT=${PROJECT} NODEPORT=${NODE_PORT} SERVICE=${SERVICE}..."; +sleep 2; +done + +while [[ -z ${HEALTH_CHECK_URI} ]]; +do HEALTH_CHECK_URI=$(gcloud compute --project=${PROJECT} health-checks list --filter=name~k8s-be-${NODE_PORT}- --uri); +echo "Waiting for the healthcheck resource PROJECT=${PROJECT} NODEPORT=${NODE_PORT} SERVICE=${SERVICE}..."; +sleep 2; +done + +# Since we create the envoy-ingress ingress object before creating the envoy +# deployment object, healthcheck will not be configured correctly in the GCP +# load balancer. It will default the healthcheck request path to a value of +# / instead of the intended /healthz. +# Manually update the healthcheck request path to /healthz +if [[ ${HEALTHCHECK_PATH} ]]; then + gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --request-path=${HEALTHCHECK_PATH} +else + gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --request-path=/healthz +fi + +if [[ ${USE_ISTIO} ]]; then + # Create the route so healthcheck can pass + kubectl apply -f /var/envoy-config/healthcheck_route.yaml +fi + +# Since JupyterHub uses websockets we want to increase the backend timeout +echo Increasing backend timeout for JupyterHub +gcloud --project=${PROJECT} compute backend-services update --global ${BACKEND_SERVICE} --timeout=3600 + +echo "Backend updated successfully. Waiting 1 hour before updating again." +sleep 3600 diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/webhook.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/webhook.libsonnet new file mode 100644 index 00000000..e38a3cf7 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/gcp/webhook.libsonnet @@ -0,0 +1,243 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env { + hostname: if std.objectHas(_params, "hostname") then _params.hostname else "null", + }, + local namespace = params.namespace, + + local deployment = { + apiVersion: "extensions/v1beta1", + kind: "Deployment", + metadata: { + name: "gcp-cred-webhook", + namespace: namespace, + }, + spec: { + template: { + metadata: { + labels: { + app: "gcp-cred-webhook" + }, + }, + spec: { + containers: [ + { + name: "gcp-cred-webhook", + image: params.image, + volumeMounts: [{ + name: "webhook-cert", + mountPath: "/etc/webhook/certs", + readOnly: true, + }], + }, + ], + volumes: [ + { + name: "webhook-cert", + secret: { + secretName: "gcp-cred-webhook-certs", + }, + }, + ], + }, + }, + }, + }, // deployment + deployment:: deployment, + + local service = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "gcp-cred-webhook", + }, + name: "gcp-cred-webhook", + namespace: namespace, + }, + spec: { + selector: { + app: "gcp-cred-webhook", + }, + ports: [ + { + port: 443, + targetPort: 443, + }, + ], + }, + }, // service + service:: service, + + local webhookConfig = { + apiVersion: "admissionregistration.k8s.io/v1beta1", + kind: "MutatingWebhookConfiguration", + metadata: { + name: "gcp-cred-webhook", + // This is cluster scope. + }, + webhooks: [ + { + // name has to be fully qualified X.X.X + name: "gcp-cred-webhook.kubeflow.org", + clientConfig: { + service: { + name: "gcp-cred-webhook", + namespace: namespace, + path: "/add-cred" + }, + // To be patched. + caBundle: "", + }, + rules: [ + { + operations: ["CREATE"], + apiGroups: [""], + apiVersions: ["v1"], + resources: ["pods"], + }, + ], + }, + ], + }, // webhookConfig + webhookConfig:: webhookConfig, + + local webhookBootstrapJob = { + apiVersion: "apps/v1", + kind: "StatefulSet", + metadata: { + name: "webhook-bootstrap", + namespace: namespace, + }, + spec: { + selector: { + matchLabels: { + service: "webhook-bootstrap", + }, + }, + template: { + metadata: { + labels: { + service: "webhook-bootstrap", + }, + }, + spec: { + restartPolicy: "Always", + serviceAccountName: "webhook-bootstrap", + containers: [ + { + name: "bootstrap", + image: params.webhookSetupImage, + command: [ + "sh", + "/var/webhook-config/create_ca.sh", + ], + env: [ + { + name: "NAMESPACE", + value: namespace, + }, + ], + volumeMounts: [ + { + mountPath: "/var/webhook-config/", + name: "webhook-config", + }, + ], + }, + ], + volumes: [ + { + configMap: { + name: "webhook-bootstrap-config", + }, + name: "webhook-config", + }, + ], + }, + }, + }, + }, // webhookBootstrapJob + webhookBootstrapJob:: webhookBootstrapJob, + + local initServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "webhook-bootstrap", + namespace: namespace, + }, + }, // initServiceAccount + initServiceAccount:: initServiceAccount, + + local initClusterRoleBinding = { + kind: "ClusterRoleBinding", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "webhook-bootstrap", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "webhook-bootstrap", + namespace: namespace, + }, + ], + roleRef: { + kind: "ClusterRole", + name: "webhook-bootstrap", + apiGroup: "rbac.authorization.k8s.io", + }, + }, // initClusterRoleBinding + initClusterRoleBinding:: initClusterRoleBinding, + + local initClusterRole = { + kind: "ClusterRole", + apiVersion: "rbac.authorization.k8s.io/v1beta1", + metadata: { + name: "webhook-bootstrap", + }, + rules: [ + { + apiGroups: ["admissionregistration.k8s.io"], + resources: ["mutatingwebhookconfigurations"], + verbs: ["*"], + }, + { + apiGroups: [""], + resources: ["secrets"], + verbs: ["*"], + }, + ], + }, // initClusterRoleBinding + initClusterRole:: initClusterRole, + + local webhookConfigmap = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "webhook-bootstrap-config", + namespace: namespace, + }, + data: { + "create_ca.sh": importstr "create_ca.sh", + } + }, // webhookConfigmap + webhookConfigmap:: webhookConfigmap, + + all:: [ + self.deployment, + self.service, + self.webhookBootstrapJob, + self.webhookConfigmap, + self.webhookConfig, + self.initServiceAccount, + self.initClusterRole, + self.initClusterRoleBinding, + ], + + list(obj=self.all):: k.core.v1.list.new(obj,), + } +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/OWNERS b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/OWNERS new file mode 100644 index 00000000..b8d4abf7 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/OWNERS @@ -0,0 +1,6 @@ +approvers: + - ioandr + - kkasravi + - pdmack +reviewers: + - vkoukis diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/config.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/config.yaml new file mode 100644 index 00000000..f810a7e5 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/config.yaml @@ -0,0 +1,118 @@ +# Configuration file for the default JupyterHub Spawner UI +# Each key corresponds to a JupyterHub Spawner UI option +# If a key is missing, the respective Spawner UI option will be left untouched +# +# Each Spawner UI option is configured by two keys: `value` and `readOnly` +# - The `value` key contains the default value +# - The `readOnly` key determines if the option will be available to users +# +# If the 'readOnly' key is present and set to 'true', the respective option +# will be disabled for users and only set by the admin +# If the 'readOnly' key is missing (defaults to 'false'), the respective option +# will be available for users +# +# Please note that some values (e.g. {username}) may be templated +# and expanded according to KubeSpawner's rules +# +# For more information regarding JupyterHub KubeSpawner and its configuration: +# https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html + +spawnerFormDefaults: + image: + # The container Image for the user's Jupyter Notebook + # If readonly, this value must be a member of the list below + value: gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-cpu:v0.5.0 + # The list of available standard container Images + options: + - gcr.io/kubeflow-images-public/tensorflow-1.5.1-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.5.1-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.6.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.6.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.7.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.7.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.8.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.8.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.9.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.9.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.11.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.11.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-2.0.0a-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-2.0.0a-notebook-gpu:v0.5.0 + # By default, custom container Images are allowed + # Uncomment the following line to only enable standard container Images + readOnly: false + cpu: + # CPU for user's Notebook + value: '0.5' + # readOnly: true + memory: + # Memory for user's Notebook + value: 1.0Gi + workspaceVolume: + # Workspace Volume to be attached to user's Notebook + # Each Workspace Volume is declared with the following attributes: + # Type, Name, Size, MountPath and Access Mode + value: + type: + # The Type of the Workspace Volume + # Supported values: 'New', 'Existing' + value: New + name: + # The Name of the Workspace Volume + # Note that this is a templated value + # value: {username}-workspace + value: {username}-workspace + size: + # The Size of the Workspace Volume (in Gi) + value: '10' + mountPath: + # The Path that the Workspace Volume will be mounted + readOnly: true + value: /home/jovyan + accessModes: + # The Access Mode of the Workspace Volume + # Supported values: 'ReadWriteOnce', 'ReadWriteMany', 'ReadOnlyMany' + value: ReadWriteOnce + dataVolumes: + # List of additional Data Volumes to be attached to the user's Notebook + value: [] + # Each Data Volume is declared with the following attributes: + # Type, Name, Size, MountPath and Access Mode + # + # For example, a list with 2 Data Volumes: + #value: + # - value: + # type: + # value: New + # name: + # value: {username}-vol-1 + # size: + # value: '10' + # mountPath: + # value: /home/jovyan/{username}-vol-1 + # accessModes: + # value: ReadWriteOnce + # - value: + # type: + # value: New + # name: + # value: {username}-vol-2 + # size: + # value: '5' + # mountPath: + # value: /home/jovyan/{username}-vol-2 + # accessModes: + # value: ReadWriteOnce + # + # Uncomment the following line to make the Data Volumes list readonly + #readOnly: true + extraResources: + # Extra Resource Limits for user's Notebook + # Note that braces are escaped + value: "{{}}" diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter-web-app.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter-web-app.libsonnet new file mode 100644 index 00000000..2df85d62 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter-web-app.libsonnet @@ -0,0 +1,296 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + + new(_env, _params):: { + local params = _env + _params, + + local defaultSpawnerData = { + // Default JH Spawner UI files + "spawner_ui_config.yaml": importstr "./config.yaml", + }, + + local jupyterConfig = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: params.name + "-config", + namespace: params.namespace, + }, + data: defaultSpawnerData, + }, + jupyterConfig:: jupyterConfig, + + serviceAccount:: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: params.name, + }, + name: params.name, + namespace: params.namespace, + }, + }, + + clusterRole:: { + apiVersion: "rbac.authorization.k8s.io/v1", + kind: "ClusterRole", + metadata: { + name: params.name + "-cluster-role", + }, + rules: [ + { + apiGroups: [""], + resources: ["namespaces"], + verbs: ["get", "list", "create", "delete"] + }, + { + apiGroups: ["kubeflow.org"], + resources: ["notebooks"], + verbs: ["get", "list", "create", "delete"], + }, + { + apiGroups: [""], + resources: ["persistentvolumeclaims"], + verbs: ["create", "delete", "get", "list"], + }, + { + apiGroups: ["storage.k8s.io"], + resources: ["storageclasses"], + verbs: ["get", "list"], + }, + { + apiGroups: [""], + resources: ["secrets"], + verbs: ["get", "list"], + }, + ] + }, + + clusterRoleBinding:: { + kind: "ClusterRoleBinding", + apiVersion: "rbac.authorization.k8s.io/v1", + metadata: { + name: params.name + "-binding" + }, + subjects: [ + { + kind: "ServiceAccount", + name: params.name, + namespace: params.namespace + }, + ], + roleRef: { + kind: "ClusterRole", + name: params.name + "-cluster-role", + apiGroup: "rbac.authorization.k8s.io", + }, + }, + + notebookRole:: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "Role", + metadata: { + name: "jupyter-notebook-role", + namespace: params.namespace, + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "pods", + "pods/log", + "secrets", + "services", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "", + "apps", + "extensions", + ], + resources: [ + "deployments", + "replicasets", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "kubeflow.org", + ], + resources: [ + "*", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "batch", + ], + resources: [ + "jobs", + ], + verbs: [ + "*", + ], + }, + ], + }, + + notebookServiceAccount:: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "jupyter-notebook", + namespace: params.namespace, + }, + }, + + notebookRoleBinding:: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "RoleBinding", + metadata: { + name: "jupyter-notebook-role-binding", + namespace: params.namespace, + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: "jupyter-notebook-role", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "jupyter-notebook", + namespace: params.namespace, + }, + ], + }, + + svc:: { + apiVersion: "v1", + kind: "Service", + metadata: { + name: params.name, + namespace: params.namespace, + labels: { + run: params.name + }, + annotations:{ + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: webapp_mapping", + "prefix: /" + params.prefix + "/", + "service: " + params.name + "." + params.namespace, + "add_request_headers: ", + " x-forwarded-prefix: /" + params.prefix + ]), + }, + }, + spec: { + type: "ClusterIP", + ports: [{ + port: 80, + targetPort: 5000, + protocol: "TCP", + name: "http", + }], + selector: { + app: params.name + }, + }, + }, + + depl :: { + apiVersion: "apps/v1", + kind: "Deployment", + metadata: { + name: params.name, + namespace: params.namespace, + labels: { + app: params.name, + }, + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: params.name, + }, + }, + template: { + metadata:{ + labels: { + app: params.name, + }, + }, + spec: { + serviceAccountName: params.name, + containers: [{ + name: params.name, + image: params.image, + env: std.prune([ + { + name: "ROK_SECRET_NAME", + value: params.rokSecretName, + }, + { + name: "UI", + value: params.ui, + }, + ]), + volumeMounts: [ + { + mountPath: "/etc/config", + name: "config-volume", + }, + ], + ports: [{ + containerPort: 5000, + }], + imagePullPolicy: params.policy, + }], + volumes: [ + { + configMap: { + name: params.name + "-config", + }, + name: "config-volume", + }, + ], + }, + }, + }, + }, + + parts:: self, + all:: [ + self.svc, + self.depl, + self.jupyterConfig, + self.serviceAccount, + self.clusterRoleBinding, + self.clusterRole, + self.notebookServiceAccount, + self.notebookRole, + self.notebookRoleBinding, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter.libsonnet new file mode 100644 index 00000000..fed6d80a --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter.libsonnet @@ -0,0 +1,423 @@ +{ + local k = import "k.libsonnet", + local util = import "kubeflow/common/util.libsonnet", + new(_env, _params):: { + local params = _params + _env, + + local defaultSpawnerData = { + // Default JH Spawner UI files + "template.html": importstr "ui/default/template.html", + "script.js": importstr "ui/default/script.js", + "style.css": importstr "ui/default/style.css", + "spawner.py": std.strReplace(importstr "ui/default/spawner.py", "\\\n", ""), + "spawner_ui_config.yaml": importstr "ui/default/config.yaml", + }, + + local rokSpawnerData = { + // Base files that Rok UI extends or overrides + "default_template.html": importstr "ui/default/template.html", + "default_style.css": importstr "ui/default/style.css", + "default_spawner.py": importstr "ui/default/spawner.py", + + // Rok UI files + "template.html": importstr "ui/rok/template.html", + "script.js": importstr "ui/rok/script.js", + "style.css": importstr "ui/rok/style.css", + "spawner.py": std.strReplace(importstr "ui/rok/spawner.py", "\\\n", ""), + "spawner_ui_config.yaml": importstr "ui/rok/config.yaml", + }, + + local kubeSpawnerConfig = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "jupyter-config", + namespace: params.namespace, + }, + // JH config file + local config = { + "jupyter_config.py": std.strReplace(importstr "jupyter_config.py", "\\\n", ""), + }, + data: config + + if params.ui == "rok" then rokSpawnerData + else if params.ui == "default" then defaultSpawnerData, + }, + kubeSpawnerConfig:: kubeSpawnerConfig, + + local notebookService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "jupyter", + }, + name: "jupyter-0", + namespace: params.namespace, + annotations: { + "prometheus.io/scrape": "true", + }, + }, + spec: { + // We want a headless service so we set the ClusterIP to be None. + // This headless server is used by individual Jupyter pods to connect back to the Hub. + clusterIP: "None", + ports: [ + { + name: "hub", + port: 8000, + }, + ], + selector: { + app: "jupyter", + }, + }, + }, + notebookService:: notebookService, + + local hubService = { + apiVersion: "v1", + kind: "Service", + metadata: { + labels: { + app: "jupyter-lb", + }, + name: "jupyter-lb", + namespace: params.namespace, + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: jupyter-lb-hub-mapping", + "prefix: /hub/", + "rewrite: /hub/", + "timeout_ms: 300000", + "service: jupyter-lb." + params.namespace, + "use_websocket: true", + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: jupyter-lb-user-mapping", + "prefix: /user/", + "rewrite: /user/", + "timeout_ms: 300000", + "service: jupyter-lb." + params.namespace, + "use_websocket: true", + ]), + }, //annotations + }, + spec: { + ports: [ + { + name: "hub", + port: 80, + targetPort: 8000, + }, + ], + selector: { + app: "jupyter", + }, + type: params.serviceType, + }, + }, + hubService:: hubService, + + local hubStatefulSet = { + apiVersion: "apps/v1beta1", + kind: "StatefulSet", + metadata: { + name: "jupyter", + namespace: params.namespace, + }, + spec: { + replicas: 1, + serviceName: "", + template: { + metadata: { + labels: { + app: "jupyter", + }, + }, + spec: { + containers: [ + { + command: [ + "jupyterhub", + "-f", + "/etc/config/jupyter_config.py", + ], + image: params.image, + name: "jupyter", + volumeMounts: [ + { + mountPath: "/etc/config", + name: "config-volume", + }, + ], + ports: [ + // Port 8000 is used by the hub to accept incoming requests. + { + containerPort: 8000, + }, + // Port 8081 accepts callbacks from the individual Jupyter pods. + { + containerPort: 8081, + }, + ], + env: std.prune([ + { + name: "KF_AUTHENTICATOR", + value: params.jupyterHubAuthenticator, + }, + { + name: "DEFAULT_JUPYTERLAB", + value: params.useJupyterLabAsDefault, + }, + { + name: "STORAGE_CLASS", + value: params.storageClass, + }, + { + name: "ROK_SECRET_NAME", + value: params.rokSecretName, + }, + if params.platform == "gke" then + { + name: "GCP_SECRET_NAME", + value: params.gcpSecretName, + }, + if params.platform == "minikube" && std.toString(params.notebookUid) != "-1" then + { + name: "NOTEBOOK_UID", + value: std.toString(params.notebookUid), + }, + if params.platform == "minikube" && std.toString(params.notebookGid) != "-1" then + { + name: "NOTEBOOK_GID", + value: std.toString(params.notebookGid), + }, + if params.platform == "minikube" then + { + name: "ACCESS_LOCAL_FS", + value: std.toString(params.accessLocalFs), + }, + ]), + }, // jupyter container + ], + serviceAccountName: "jupyter", + volumes: [ + { + configMap: { + name: "jupyter-config", + }, + name: "config-volume", + }, + ], + }, + }, + updateStrategy: { + type: "RollingUpdate", + }, + }, + }, + hubStatefulSet:: hubStatefulSet, + + // contents based on https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/jupyterhub/templates/hub/rbac.yaml + local hubRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "Role", + metadata: { + name: "jupyter-role", + namespace: params.namespace, + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "pods", + "persistentvolumeclaims", + ], + verbs: [ + "get", + "watch", + "list", + "create", + "delete", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "events", + "secrets", + ], + verbs: [ + "get", + "watch", + "list", + ], + }, + ], + }, + hubRole:: hubRole, + + local notebookRole = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "Role", + metadata: { + name: "jupyter-notebook-role", + namespace: params.namespace, + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "pods", + "pods/log", + "secrets", + "services", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "", + "apps", + "extensions", + ], + resources: [ + "deployments", + "replicasets", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "kubeflow.org", + ], + resources: [ + "*", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "batch", + ], + resources: [ + "jobs", + ], + verbs: [ + "*", + ], + }, + ], + }, + notebookRole:: notebookRole, + + local hubServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: "jupyter", + }, + name: "jupyter", + namespace: params.namespace, + }, + }, + hubServiceAccount:: hubServiceAccount, + + local notebookServiceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "jupyter-notebook", + namespace: params.namespace, + }, + }, + notebookServiceAccount:: notebookServiceAccount, + + local hubRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "RoleBinding", + metadata: { + name: "jupyter-role", + namespace: params.namespace, + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: "jupyter-role", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "jupyter", + namespace: params.namespace, + }, + ], + }, + hubRoleBinding:: hubRoleBinding, + + local notebookRoleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "RoleBinding", + metadata: { + name: "jupyter-notebook-role", + namespace: params.namespace, + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: "jupyter-notebook-role", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "jupyter-notebook", + namespace: params.namespace, + }, + ], + }, + notebookRoleBinding:: notebookRoleBinding, + + local localstorage = (import "localstorage.libsonnet"), + pv:: localstorage.pv, + pvclaim:: localstorage.pvclaim, + + parts:: self, + all:: [ + self.kubeSpawnerConfig, + self.notebookService, + self.hubStatefulSet, + self.hubRole, + self.notebookRole, + self.hubService, + self.hubServiceAccount, + self.notebookServiceAccount, + self.hubRoleBinding, + self.notebookRoleBinding, + ] + std.flattenArrays([ + if params.accessLocalFs == "true" then [ + self.pv, + self.pvclaim, + ] else [], + ]), + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter_config.py b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter_config.py new file mode 100644 index 00000000..bb85e5da --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/jupyter_config.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +""" +Configuration file for JupyterHub. + +Kubeflow uses this file as the configuration file for JupyterHub. It contains +all glue code necessary to integrate JupyterHub with the remaining Kubeflow +components. + +Note that this file is also responsible for importing the UI-specific Spawner +class from /spawner.py, and setting the `spawner_class` configuration +option. +""" + +import os +from importlib.util import spec_from_file_location, module_from_spec +from jhub_remote_user_authenticator.remote_user_auth import \ + RemoteUserAuthenticator + +SERVICE_ACCOUNT_SECRET_MOUNT = '/var/run/secrets/sa' + +# Import the UI-specific Spawner +spec = spec_from_file_location('spawner', '/etc/config/spawner.py') +spawner = module_from_spec(spec) +spec.loader.exec_module(spawner) + +################################################### +# JupyterHub Options +################################################### +c.JupyterHub.ip = '0.0.0.0' +c.JupyterHub.hub_ip = '0.0.0.0' +# Don't try to cleanup servers on exit - since in general for k8s, we want +# the hub to be able to restart without losing user containers +c.JupyterHub.cleanup_servers = False +################################################### + +################################################### +# Spawner Options +################################################### +c.JupyterHub.spawner_class = spawner.KubeFormSpawner + +c.KubeSpawner.cmd = 'start-singleuser.sh' +c.KubeSpawner.args = ['--allow-root'] +# gpu images are very large ~15GB. need a large timeout. +c.KubeSpawner.start_timeout = 60 * 30 +# Increase timeout to 5 minutes to avoid HTTP 500 errors on JupyterHub +c.KubeSpawner.http_timeout = 60 * 5 + +# Volume setup +c.KubeSpawner.singleuser_uid = 1000 +c.KubeSpawner.singleuser_fs_gid = 100 +c.KubeSpawner.singleuser_working_dir = '/home/jovyan' + +# Allow environment vars to override uid and gid. +# This allows local host path mounts to be read/writable +env_uid = os.environ.get('NOTEBOOK_UID') +if env_uid: + c.KubeSpawner.singleuser_uid = int(env_uid) +env_gid = os.environ.get('NOTEBOOK_GID') +if env_gid: + c.KubeSpawner.singleuser_fs_gid = int(env_gid) +access_local_fs = os.environ.get('ACCESS_LOCAL_FS') +if access_local_fs == 'true': + + def modify_pod_hook(spawner, pod): + pod.spec.containers[0].lifecycle = { + 'postStart': { + 'exec': { + 'command': [ + 'ln', '-s', '/mnt/local-notebooks', + '/home/jovyan/local-notebooks' + ] + } + } + } + return pod + + c.KubeSpawner.modify_pod_hook = modify_pod_hook + +################################################### +# Persistent volume options +################################################### + +# Set user_storage_pvc_ensure to False to prevent KubeSpawner from handling PVCs +# We natively handle PVCs via KubeFormSpawner and its dedicated methods + +# NOTE: user_storage_pvc_ensure has been deprecated in a future release +c.KubeSpawner.storage_pvc_ensure = False +c.KubeSpawner.user_storage_pvc_ensure = False + +volumes = [] +volume_mounts = [] + +gcp_secret_name = os.environ.get('GCP_SECRET_NAME') +if gcp_secret_name: + volumes.append({ + 'name': gcp_secret_name, + 'secret': { + 'secretName': gcp_secret_name, + } + }) + volume_mounts.append({ + 'name': gcp_secret_name, + 'mountPath': SERVICE_ACCOUNT_SECRET_MOUNT + }) + +c.KubeSpawner.volumes = volumes +c.KubeSpawner.volume_mounts = volume_mounts + +storage_class = None +if os.environ.get('STORAGE_CLASS') != 'null': + storage_class = os.environ.get('STORAGE_CLASS') + +rok_secret_name = '' +if os.environ.get('ROK_SECRET_NAME') != 'null': + rok_secret_name = os.environ.get('ROK_SECRET_NAME') + +# Set both service_account and singleuser_service_account because +# singleuser_service_account has been deprecated in a future release +c.KubeSpawner.service_account = 'jupyter-notebook' +c.KubeSpawner.singleuser_service_account = 'jupyter-notebook' +# Authenticator +if os.environ.get('KF_AUTHENTICATOR') == 'iap': + c.JupyterHub.authenticator_class = RemoteUserAuthenticator + c.RemoteUserAuthenticator.header_name = 'x-goog-authenticated-user-email' +else: + c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator' + +if os.environ.get('DEFAULT_JUPYTERLAB').lower() == 'true': + c.KubeSpawner.default_url = '/lab' + +# Set extra spawner configuration variables +c.KubeSpawner.extra_spawner_config = { + 'gcp_secret_name': gcp_secret_name, + 'storage_class': storage_class, + 'rok_secret_name': rok_secret_name, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/localstorage.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/localstorage.libsonnet new file mode 100644 index 00000000..b0b56b7b --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/localstorage.libsonnet @@ -0,0 +1,64 @@ +{ + local pv = { + kind: 'PersistentVolume', + apiVersion: 'v1', + metadata: { + name: 'local-volume', + labels: { + type: 'local', + }, + }, + spec: { + persistentVolumeReclaimPolicy: 'Delete', + storageClassName: 'local-storage', + capacity: { + storage: '10Gi', + }, + accessModes: [ + 'ReadWriteOnce', + ], + 'local': { + path: '/mnt/local', + }, + nodeAffinity: { + required: { + nodeSelectorTerms: [ + { + matchExpressions: [ + { + key: 'kubernetes.io/hostname', + operator: 'In', + values: [ + 'minikube', + ], + }, + ], + }, + ], + }, + }, + }, + }, + pv:: pv, + + local pvclaim = { + kind: 'PersistentVolumeClaim', + apiVersion: 'v1', + metadata: { + name: 'local-notebooks', + }, + spec: { + storageClassName: 'local-storage', + accessModes: [ + 'ReadWriteOnce', + ], + resources: { + requests: { + storage: '10Gi', + }, + }, + volumeName: 'local-volume', + }, + }, + pvclaim:: pvclaim, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebook_controller.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebook_controller.libsonnet new file mode 100644 index 00000000..c63751e6 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebook_controller.libsonnet @@ -0,0 +1,193 @@ +{ + local util = import "kubeflow/common/util.libsonnet", + + new(_env, _params):: { + local params = _env + _params, + + local notebooksCRD = { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "notebooks.kubeflow.org", + }, + spec: { + group: "kubeflow.org", + version: "v1alpha1", + scope: "Namespaced", + subresources: { + status: {}, + }, + names: { + plural: "notebooks", + singular: "notebook", + kind: "Notebook", + }, + }, + status: { + acceptedNames: { + kind: "", + plural: "", + }, + conditions: [], + storedVersions: [], + }, + }, + notebooksCRD:: notebooksCRD, + + local controllerService = { + apiVersion: "v1", + kind: "Service", + metadata: { + name: "notebooks-controller", + namespace: params.namespace, + }, + spec: { + selector: { + app: "notebooks-controller", + }, + ports: [ + { + port: 443, + }, + ], + }, + }, + controllerService:: controllerService, + + local controllerDeployment = { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "notebooks-controller", + namespace: params.namespace, + }, + spec: { + selector: { + matchLabels: { + app: "notebooks-controller", + }, + }, + template: { + metadata: { + labels: { + app: "notebooks-controller", + }, + }, + spec: { + serviceAccountName: "notebook-controller", + containers: [ + { + name: "manager", + image: params.controllerImage, + imagePullPolicy: "Always", + command: [ + "/manager", + ], + env: if util.toBool(params.injectGcpCredentials) then [ + { + name: "POD_LABELS", + value: "gcp-cred-secret=user-gcp-sa,gcp-cred-secret-filename=user-gcp-sa.json", + }, + ] else [], + }, + ], + }, + }, + }, + }, + controllerDeployment:: controllerDeployment, + + local serviceAccount = { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: "notebook-controller", + }, + name: "notebook-controller", + namespace: params.namespace, + }, + }, + serviceAccount:: serviceAccount, + + local role = { + apiVersion: "rbac.authorization.k8s.io/v1", + kind: "ClusterRole", + metadata: { + name: "notebooks-controller", + }, + rules: [ + { + apiGroups: [ + "apps", + ], + resources: [ + "statefulsets", + "deployments", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "services", + "pods", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "kubeflow.org", + ], + resources: [ + "notebooks", + "notebooks/status", + ], + verbs: [ + "*", + ], + }, + ], + }, + role:: role, + + local roleBinding = { + apiVersion: "rbac.authorization.k8s.io/v1", + kind: "ClusterRoleBinding", + metadata: { + name: "notebooks-controller", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "ClusterRole", + name: "notebooks-controller", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "notebook-controller", + namespace: params.namespace, + }, + ], + }, + roleBinding:: roleBinding, + + parts:: self, + all:: [ + self.notebooksCRD, + self.controllerService, + self.serviceAccount, + self.controllerDeployment, + self.role, + self.roleBinding, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebooks.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebooks.libsonnet new file mode 100644 index 00000000..2bbfcdd8 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebooks.libsonnet @@ -0,0 +1,157 @@ +{ + local util = import "kubeflow/common/util.libsonnet", + + new(_env, _params):: { + local params = _env + _params, + + local notebooksCRD = { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "notebooks.kubeflow.org", + }, + spec: { + group: "kubeflow.org", + version: "v1alpha1", + scope: "Namespaced", + names: { + plural: "notebooks", + singular: "notebook", + kind: "Notebook", + }, + validation: { + openAPIV3Schema: (import "notebooks.schema"), + }, + }, + }, + notebooksCRD:: notebooksCRD, + + local notebooksConfigMap = { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: "notebooks", + namespace: params.namespace, + }, + data: { + "sync-notebook.jsonnet": (importstr "sync-notebook.jsonnet"), + "util.libsonnet": (importstr "kubeflow/jupyter/util.libsonnet"), + }, + }, + notebooksConfigMap:: notebooksConfigMap, + + local notebooksService = { + apiVersion: "v1", + kind: "Service", + metadata: { + name: "notebooks", + namespace: params.namespace, + }, + spec: { + selector: { + app: "notebooks", + }, + ports: [ + { + port: 80, + targetPort: 8080, + }, + ], + }, + }, + notebooksService:: notebooksService, + + local notebooksDeployment = { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "notebooks", + namespace: params.namespace, + }, + spec: { + selector: { + matchLabels: { + app: "notebooks", + }, + }, + template: { + metadata: { + labels: { + app: "notebooks", + }, + }, + spec: { + containers: [ + { + name: "hooks", + image: "metacontroller/jsonnetd@sha256:25c25f217ad030a0f67e37078c33194785b494569b0c088d8df4f00da8fd15a0", + imagePullPolicy: "Always", + workingDir: "/opt/notebooks/hooks", + volumeMounts: [ + { + name: "hooks", + mountPath: "/opt/notebooks/hooks", + }, + ], + }, + ], + volumes: [ + { + name: "hooks", + configMap: { + name: "notebooks", + }, + }, + ], + }, + }, + }, + }, + notebooksDeployment:: notebooksDeployment, + + local notebooksController = { + apiVersion: "metacontroller.k8s.io/v1alpha1", + kind: "CompositeController", + metadata: { + name: "notebook-controller", + annotations: params, + }, + spec: { + generateSelector: true, + parentResource: { + apiVersion: "kubeflow.org/v1alpha1", + resource: "notebooks", + }, + childResources: [ + { + apiVersion: "v1", + resource: "services", + }, + { + apiVersion: "extensions/v1beta1", + resource: "deployments", + }, + ], + hooks: { + sync: { + webhook: { + url: "http://notebooks." + params.namespace + "/sync-notebook", + }, + }, + }, + }, + }, + notebooksController:: notebooksController, + + parts:: self, + all:: [ + self.notebooksCRD, + self.notebooksService, + self.notebooksConfigMap, + self.notebooksDeployment, + self.notebooksController, + ], + + list(obj=self.all):: util.list(obj), + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebooks.schema b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebooks.schema new file mode 100644 index 00000000..12232c5b --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/notebooks.schema @@ -0,0 +1,446 @@ +{ + properties: { + apiVersion: { + type: 'string', + }, + kind: { + type: 'string', + }, + metadata: { + properties: { + name: { + type: 'string', + }, + labels: { + type: 'object', + }, + namespace: { + type: 'string', + }, + annotations: { + type: 'object', + }, + }, + }, + spec: { + required: [ + 'template', + ], + properties: { + template: { + properties: { + spec: { + required: [ + 'containers', + 'ttlSecondsAfterFinished', + ], + properties: { + securityContext: { + properties: { + runAsNonRoot: { + type: 'boolean', + }, + fsGroup: { + type: 'integer', + format: 'int64', + }, + runAsUser: { + type: 'integer', + format: 'int64', + }, + supplementalGroups: { + items: { + type: 'integer', + format: 'int64', + }, + type: 'array', + }, + }, + }, + lifecycle: { + properties: { + preStop: { + properties: { + httpGet: { + required: [ + 'port', + ], + properties: { + path: { + type: 'string', + }, + host: { + type: 'string', + }, + scheme: { + type: 'string', + }, + httpHeaders: { + items: { + required: [ + 'name', + 'value', + ], + properties: { + name: { + type: 'string', + }, + value: { + type: 'string', + }, + }, + }, + type: 'array', + }, + port: { + oneOf: [ + { + type: 'string', + }, + { + type: 'integer', + }, + ], + }, + }, + }, + tcpSocket: { + required: [ + 'port', + ], + properties: { + host: { + type: 'string', + }, + port: { + oneOf: [ + { + type: 'string' + }, + { + type: 'integer', + } + ] + } + } + }, + exec: { + properties: { + command: { + items: { + type: 'string', + }, + type: 'array', + }, + }, + }, + }, + }, + postStart: { + properties: { + httpGet: { + required: [ + 'port', + ], + properties: { + path: { + type: 'string', + }, + host: { + type: 'string', + }, + scheme: { + type: 'string', + }, + httpHeaders: { + items: { + required: [ + 'name', + 'value', + ], + properties: { + name: { + type: 'string', + }, + value: { + type: 'string', + }, + }, + }, + type: 'array', + }, + port: { + oneOf: [ + { + type: 'string', + }, + { + type: 'integer', + }, + ], + }, + }, + }, + tcpSocket: { + required: [ + 'port', + ], + properties: { + host: { + type: 'string', + }, + port: { + oneOf: [ + { + type: 'string', + }, + { + type: 'integer' + }, + ], + }, + }, + }, + exec: { + properties: { + command: { + items: { + type: 'string', + }, + type: 'array', + }, + }, + }, + }, + }, + }, + }, + serviceAccountName: { + type: 'string', + }, + serviceAccount: { + type: 'string', + }, + ttlSecondsAfterFinished: { + type: 'integer', + format: 'int64', + }, + activeDeadlineSeconds: { + type: 'integer', + format: 'int64', + }, + terminationGracePeriodSeconds: { + type: 'integer', + format: 'int64', + }, + volumes: { + type: 'array', + }, + imagePullSecrets: { + items: { + properties: { + name: { + type: 'string', + }, + }, + }, + type: 'array', + }, + containers: { + items: { + required: [ + 'name', + ], + properties: { + args: { + items: { + type: 'string', + }, + type: 'array', + }, + name: { + type: 'string', + }, + workingDir: { + type: 'string', + }, + image: { + type: 'string', + }, + stdin: { + type: 'boolean', + }, + volumeMounts: { + items: { + required: [ + 'name', + 'mountPath', + ], + properties: { + readOnly: { + type: 'boolean', + }, + mountPath: { + type: 'string', + }, + subPath: { + type: 'string', + }, + name: { + type: 'string', + }, + }, + }, + type: 'array', + }, + command: { + items: { + type: 'string', + }, + type: 'array', + }, + env: { + type: 'array', + }, + imagePullPolicy: { + type: 'string', + }, + securityContext: { + properties: { + readOnlyRootFilesystem: { + type: 'boolean', + }, + runAsUser: { + type: 'integer', + format: 'int64', + }, + capabilities: { + properties: { + add: { + items: { + type: 'string', + }, + type: 'array', + }, + drop: { + items: { + type: 'string', + }, + type: 'array', + }, + }, + }, + runAsNonRoot: { + type: 'boolean', + }, + privileged: { + type: 'boolean', + } + } + }, + ports: { + items: { + properties: { + hostIP: { + type: 'string', + }, + protocol: { + type: 'string', + }, + containerPort: { + type: 'integer', + format: 'int32', + }, + name: { + type: 'string', + }, + hostPort: { + type: 'integer', + format: 'int32', + } + } + }, + type: 'array', + }, + resources: { + properties: { + requests: { + type: 'object', + }, + limits: { + type: 'object', + }, + }, + }, + stdinOnce: { + type: 'boolean', + }, + }, + }, + type: 'array', + }, + }, + }, + }, + }, + selector: { + properties: { + matchLabels: { + type: 'object', + }, + matchExpressions: { + items: { + required: [ + 'key', + 'operator', + ], + properties: { + operator: { + type: 'string', + }, + values: { + items: { + type: 'string', + }, + type: 'array', + }, + key: { + type: 'string', + }, + }, + }, + type: 'array', + }, + }, + }, + }, + }, + status: { + properties: { + observedGeneration: { + type: 'integer', + format: 'int64', + }, + conditions: { + items: { + properties: { + status: { + type: 'string', + }, + reason: { + type: 'string' + }, + message: { + type: 'string', + }, + type: { + type: 'string', + }, + }, + }, + type: 'array', + }, + }, + }, + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/parts.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/parts.yaml new file mode 100644 index 00000000..d8a590a6 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/parts.yaml @@ -0,0 +1,36 @@ +{ + "name": "jupyter", + "apiVersion": "0.0.1", + "kind": "ksonnet.io/parts", + "description": "Notebooks/Jupyter component of Kubeflow.\n", + "author": "kubeflow team ", + "contributors": [ + { + "name": "Jeremy Lewi", + "email": "jlewi@google.com" + } + ], + "repository": { + "type": "git", + "url": "https://github.com/kubeflow/kubeflow" + }, + "bugs": { + "url": "https://github.com/kubeflow/kubeflow/issues" + }, + "keywords": [ + "kubeflow", + "jupyter", + "notebooks" + ], + "quickStart": { + "prototype": "io.ksonnet.pkg.jupyter", + "componentName": "jupyter", + "flags": { + "name": "jupyter", + "namespace": "default", + "disks": "" + }, + "comment": "Jupyter Kubeflow component." + }, + "license": "Apache 2.0" +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/README.md b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/README.md new file mode 100644 index 00000000..8620c717 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/README.md @@ -0,0 +1,137 @@ + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [Goals](#goals) +- [Design](#design) + - [User Interaction](#user-interaction) + + + +## Goals + +- provide a k8 native mechanism to spawning jupyter notebooks for users + +- set a serviceAccountName within the spawning pod similar to jupyter-notebook + +- allow similar params as kubeflow/common/prototypes/jupyterhub.jsonnet, including PVCs but use the kubernetes PodTemplateSpec + + +## Design + +The Notebooks component is an alternative to jupyterhub. The component defines a Notebook CRD and provides a notebook-controller (based on metacontroller's [CompositeController](https://metacontroller.app/api/compositecontroller/)). The Notebook CRD schema is similar to a kubernetes [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#creating-a-deployment) where it defines a [Pod Spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#podspec-v1-core) within its [template section](https://github.com/kubeflow/kubeflow/blob/111975f3886d058a112c7970dce209714ddcfb2e/kubeflow/notebooks/notebooks.schema#L32) that is used by the notebook-controller to create a Deployment. Launching a notebook is nothing more than submitting a Notebook yaml to the api-server using kubectl. An example Notebook yaml is shown below: + +```yaml +apiVersion: kubeflow.org/v1alpha1 +kind: Notebook +metadata: + name: training + namespace: resnet50 +spec: + template: + spec: + containers: + - name: notebook + image: gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-cpu:v0.3.0 + resources: + requests: + cpu: 500m + memory: 1Gi + workingDir: "/home/jovyan" + ttlSecondsAfterFinished: 300 + securityContext: + - fsGroup: 100 + runAsUser: 1000 +``` + +### User Interaction + +The user submits a Notebook yaml either through a UI or CLI (eg `kubectl apply -f notebook.yaml`) and the Notebook yaml is handled by the notebook-controller. The notebook-controller will create a Service and Deployment within the namespaceset in the Notebook yaml. Note: the namespace must exist. The Service uses ambassador to create a reverse proxy that will route subsequent browser requests to the Pod. An example Service is shown below: + +```yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + getambassador.io/config: |- + --- + apiVersion: ambassador/v0 + kind: Mapping + name: resnet50_training_mapping + prefix: /resnet50/training + rewrite: /resnet50/training + timeout_ms: 300000 + service: training.resnet50 + labels: + controller-uid: 038a2a66-e3e1-11e8-b1ac-42010a8a01b5 + name: training + namespace: resnet50 + ownerReferences: + - apiVersion: kubeflow.org/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Notebook + name: training + uid: 038a2a66-e3e1-11e8-b1ac-42010a8a01b5 +spec: + clusterIP: 10.103.254.68 + ports: + - port: 80 + protocol: TCP + targetPort: 8888 + selector: + app: training + type: ClusterIP +``` + +Subsequent browser requests to `https:////` are routed to the Service and Pod as shown below: + +![Jupyter Notebook](./docs/jupyter_notebook.png "Jupyter Notebook") + +The notebook component can use the [profiles component](https://github.com/kubeflow/kubeflow/tree/master/kubeflow/profiles/prototypes) to provide a protected namespace for the user. It can also spawn the notebook within the kubeflow namespace. + +```yaml +apiVersion: kubeflow.org/v1alpha1 +kind: Profile +metadata: + name: resnet50 + namespace: kubeflow +spec: + template: + metadata: + namespace: resnet50 + spec: + owner: + kind: ServiceAccount + name: dean + namespace: kubeflow +``` + +If the user were a GKE IAM user, the Profile yaml would look like: + +```yaml +apiVersion: kubeflow.org/v1alpha1 +kind: Profile +metadata: + name: resnet50 + namespace: kubeflow +spec: + template: + metadata: + namespace: resnet50 + spec: + owner: + apiGroup: rbac.authorization.k8s.io + kind: User + name: fred@acme.com +status: + conditions: + - type: Ready + created: true + phase: Active +``` + +In this case the user is Fred and is identified within GKE's IAM by his email `fred@acme.com`. + + + diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/docs/jupyter_notebook.png b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/docs/jupyter_notebook.png new file mode 100644 index 00000000..b2628db2 Binary files /dev/null and b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/docs/jupyter_notebook.png differ diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/jupyter-web-app.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/jupyter-web-app.jsonnet new file mode 100644 index 00000000..4fa84f5b --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/jupyter-web-app.jsonnet @@ -0,0 +1,15 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.jupyter-web-app +// @shortDescription A WebApp that controlls Jupyter Notebooks +// @param name string Name to give to the Jupyter UI +// @optionalParam image string gcr.io/kubeflow-images-public/jupyter-web-app:v0.5.0 Docker Image used for the Jupyter UI +// @optionalParam ui string default Choose the UI to use. Supported: default | rok +// @optionalParam port string 80 Port to expose the UI's Service +// @optionalParam policy string Always imagePullPolicy for the UI's image +// @optionalParam prefix string jupyter The prefix under which the app is accessed +// @optionalParam rokSecretName string secret-rok-{username} The name of the secret containing user's credentials for Arrikto Rok + +local jupyter_ui = import "kubeflow/jupyter/jupyter-web-app.libsonnet"; + +local instance = jupyter_ui.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/jupyter.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/jupyter.jsonnet new file mode 100644 index 00000000..1b212778 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/jupyter.jsonnet @@ -0,0 +1,22 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.jupyter +// @description jupyter Component +// @shortDescription jupyter Component +// @param name string Name to give to each of the components +// @optionalParam platform string none supported platforms {none|gke|minikube} +// @optionalParam serviceType string ClusterIP The service type for Jupyter. +// @optionalParam image string gcr.io/kubeflow/jupyterhub-k8s:v20180531-3bb991b1 The image to use for Jupyter. +// @optionalParam jupyterHubAuthenticator string null The authenticator to use +// @optionalParam useJupyterLabAsDefault string false Set JupterLab interface as the default +// @optionalParam gcpSecretName string user-gcp-sa The name of the secret containing service account credentials for GCP +// @optionalParam disks string null Comma separated list of Google persistent disks to attach to notebook environments. +// @optionalParam notebookUid string -1 UserId of the host user for minikube local fs mount +// @optionalParam notebookGid string -1 GroupID of the host user for minikube local fs mount +// @optionalParam accessLocalFs string false Set true if mounting a local fs directory that needs to be accessed by Jupyter Notebook in Minikube. +// @optionalParam ui string default The JupyterHub Spawner User Interface +// @optionalParam storageClass string null The storageClass to use for PVC management +// @optionalParam rokSecretName string secret-rok-{username} The name of the secret containing user's credentials for Arrikto Rok + +local jupyter = import "kubeflow/jupyter/jupyter.libsonnet"; +local instance = jupyter.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/notebook_controller.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/notebook_controller.jsonnet new file mode 100644 index 00000000..f188c7cd --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/notebook_controller.jsonnet @@ -0,0 +1,11 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.notebook-controller +// @description notebook controller +// @shortDescription notebooks +// @param name string Name +// @optionalParam controllerImage string gcr.io/kubeflow-images-public/notebook-controller:v20190401-v0.4.0-rc.1-308-g33618cc9-e3b0c4 The image to use for the notebook controller +// @optionalParam injectGcpCredentials string true Whether to inject gcp credentials + +local notebooks = import "kubeflow/jupyter/notebook_controller.libsonnet"; +local instance = notebooks.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/notebooks.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/notebooks.jsonnet new file mode 100644 index 00000000..e818db4a --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/prototypes/notebooks.jsonnet @@ -0,0 +1,21 @@ +// @apiVersion 0.1 +// @name io.ksonnet.pkg.notebooks +// @description notebooks Component +// @shortDescription notebooks Component +// @param name string Name +// @optionalParam image string gcr.io/kubeflow/jupyterhub-k8s:v20180531-3bb991b1 The image to use for the notebook +// @optionalParam authenticator string null The authenticator to use +// @optionalParam pvcMount string /home/jovyan Mount path for PVC. Set empty to disable PVC +// @optionalParam registry string gcr.io The docker image registry for notebook +// @optionalParam repoName string kubeflow-images-public The repository name for the notebook +// @optionalParam disks string null Comma separated list of Google persistent disks to attach to notebook environments. +// @optionalParam uid string -1 UserId of the host user for minikube local fs mount +// @optionalParam gid string -1 GroupID of the host user for minikube local fs mount +// @optionalParam accessLocalFs string false Set true if mounting a local fs directory that needs to be accessed by the notebook in Minikube. +// @optionalParam serviceType string ClusterIP type of service {LoadBalancer, ClusterIP, NodePort} +// @optionalParam servicePort string 80 service port +// @optionalParam targetPort string 8888 container port + +local notebooks = import "kubeflow/jupyter/notebooks.libsonnet"; +local instance = notebooks.new(env, params); +instance.list(instance.all) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/sync-notebook.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/sync-notebook.jsonnet new file mode 100644 index 00000000..8790aaa7 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/sync-notebook.jsonnet @@ -0,0 +1,132 @@ +// Controller for resource: notebooks +// Creates 2 child resources +// - Service +// - Pod +function(request) { + local util = import "util.libsonnet", + local sharedNamespace = request.controller.metadata.annotations.namespace, + local templateSpec = request.parent.spec.template.spec, + local podTemplateSpec = { + containers: [ + { + args: [ + "start.sh", + "jupyter", + "lab", + "--LabApp.token=''", + "--LabApp.allow_remote_access='True'", + "--LabApp.allow_root='True'", + "--LabApp.ip='*'", + "--LabApp.base_url=/" + request.parent.metadata.namespace + "/" + request.parent.metadata.name + "/", + "--port=8888", + "--no-browser", + ], + env: [ + { + name: "JUPYTER_ENABLE_LAB", + value: "true", + }, + ], + image: "gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-cpu:v0.3.0", + imagePullPolicy: "IfNotPresent", + name: "notebook", + ports: [ + { + containerPort: 8888, + name: "notebook-port", + protocol: "TCP", + }, + ], + resources: { + requests: { + cpu: "500m", + memory: "1Gi", + }, + }, + workingDir: "/home/jovyan", + }, + ], + restartPolicy: "Always", + serviceAccount:: {}, + // TODO serviceAccount could be the user in the kubeflow namespace + // But should probably be the similar to jupyter. + serviceAccountName:: {}, + automountServiceAccountToken: false, + }, + + local children = [ + { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "getambassador.io/config": + std.join("\n", [ + "---", + "apiVersion: ambassador/v0", + "kind: Mapping", + "name: " + request.parent.metadata.namespace + "_" + request.parent.metadata.name + "_mapping", + "prefix: /" + request.parent.metadata.namespace + "/" + request.parent.metadata.name, + "rewrite: /" + request.parent.metadata.namespace + "/" + request.parent.metadata.name, + "timeout_ms: 300000", + "service: " + request.parent.metadata.name + "." + request.parent.metadata.namespace, + ]), + }, + name: request.parent.metadata.name, + namespace: request.parent.metadata.namespace, + }, + spec: { + selector: { + app: request.parent.metadata.name, + }, + ports: [ + { + port: 80, + protocol: "TCP", + targetPort: 8888, + }, + ], + type: "ClusterIP", + }, + }, + { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: request.parent.metadata.name, + namespace: request.parent.metadata.namespace, + labels: { + app: request.parent.metadata.name, + }, + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: request.parent.metadata.name, + }, + }, + spec: { + containers: [ + templateSpec.containers[0] + podTemplateSpec.containers[0], + ], + }, + }, + }, + }, + ], + local validatedChildren = util.sort(std.filter(util.validateResource, children), util.comparator), + local requestedChildren = std.flattenArrays(std.map(util.extractResources, util.extractGroups(request.children))), + local groupedRequestedChildren = util.groupByResource(requestedChildren), + local missingChildren = util.sort(std.filter(util.curryResources(groupedRequestedChildren, false), validatedChildren), util.comparator), + local desired = requestedChildren + missingChildren, + children: desired, + status: { + phase: "Active", + conditions: [{ + type: "Ready", + }], + created: true, + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/conftest.py b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/conftest.py new file mode 100644 index 00000000..3abc65be --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/conftest.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +import pytest + + +def pytest_addoption(parser): + parser.addoption( + "--namespace", action="store", default="", help="namespace to use") + + parser.addoption( + "--env", + action="store", + default="jupytertest", + help="ksonnet environment") + + +@pytest.fixture +def namespace(request): + return request.config.getoption("--namespace") + + +@pytest.fixture +def env(request): + return request.config.getoption("--env") diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/jupyter_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/jupyter_test.jsonnet new file mode 100644 index 00000000..87cd2b20 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/jupyter_test.jsonnet @@ -0,0 +1,433 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local jupyter = import "kubeflow/jupyter/jupyter.libsonnet"; + +local params = { + name: "jupyter", + platform: "gke", + serviceType: "ClusterIP", + gcpSecretName: "user-gcp-sa", + image: "gcr.io/kubeflow/jupyterhub-k8s:v20180531-3bb991b1", + jupyterHubAuthenticator: "iap", + useJupyterLabAsDefault: false, + notebookUid: "-1", + notebookGid: "-1", + accessLocalFs: "false", + ui: "default", + storageClass: "null", + rokSecretName: "secret-rok-{username}", +}; +local env = { + namespace: "foo", +}; + +local instance = jupyter.new(env, params); + +local testCases = [ + { + actual: instance.parts.kubeSpawnerConfig, + expected: { + apiVersion: "v1", + data: { + "jupyter_config.py": std.strReplace(importstr "kubeflow/jupyter/jupyter_config.py", "\\\n", ""), + "template.html": importstr "kubeflow/jupyter/ui/default/template.html", + "script.js": importstr "kubeflow/jupyter/ui/default/script.js", + "style.css": importstr "kubeflow/jupyter/ui/default/style.css", + "spawner.py": std.strReplace(importstr "kubeflow/jupyter/ui/default/spawner.py", "\\\n", ""), + "spawner_ui_config.yaml": importstr "kubeflow/jupyter/ui/default/config.yaml", + }, + kind: "ConfigMap", + metadata: { + name: "jupyter-config", + namespace: "foo", + }, + }, + }, + { + actual: instance.parts.notebookService, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "prometheus.io/scrape": "true", + }, + labels: { + app: "jupyter", + }, + name: "jupyter-0", + namespace: "foo", + }, + spec: { + clusterIP: "None", + ports: [ + { + name: "hub", + port: 8000, + }, + ], + selector: { + app: "jupyter", + }, + }, + }, + }, + { + actual: instance.parts.hubStatefulSet, + expected: { + apiVersion: "apps/v1beta1", + kind: "StatefulSet", + metadata: { + name: "jupyter", + namespace: "foo", + }, + spec: { + replicas: 1, + serviceName: "", + template: { + metadata: { + labels: { + app: "jupyter", + }, + }, + spec: { + containers: [ + { + command: [ + "jupyterhub", + "-f", + "/etc/config/jupyter_config.py", + ], + env: [ + { + name: "KF_AUTHENTICATOR", + value: "iap", + }, + { + name: "DEFAULT_JUPYTERLAB", + value: false, + }, + { + name: "STORAGE_CLASS", + value: "null", + }, + { + name: "ROK_SECRET_NAME", + value: "secret-rok-{username}", + }, + { + name: "GCP_SECRET_NAME", + value: "user-gcp-sa", + }, + ], + image: "gcr.io/kubeflow/jupyterhub-k8s:v20180531-3bb991b1", + name: "jupyter", + ports: [ + { + containerPort: 8000, + }, + { + containerPort: 8081, + }, + ], + volumeMounts: [ + { + mountPath: "/etc/config", + name: "config-volume", + }, + ], + }, + ], + serviceAccountName: "jupyter", + volumes: [ + { + configMap: { + name: "jupyter-config", + }, + name: "config-volume", + }, + ], + }, + }, + updateStrategy: { + type: "RollingUpdate", + }, + }, + }, + }, + { + actual: instance.parts.hubRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "Role", + metadata: { + name: "jupyter-role", + namespace: "foo", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "pods", + "persistentvolumeclaims", + ], + verbs: [ + "get", + "watch", + "list", + "create", + "delete", + ], + }, + { + apiGroups: [ + "", + ], + resources: [ + "events", + "secrets", + ], + verbs: [ + "get", + "watch", + "list", + ], + }, + ], + }, + }, + { + actual: instance.parts.notebookRole, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "Role", + metadata: { + name: "jupyter-notebook-role", + namespace: "foo", + }, + rules: [ + { + apiGroups: [ + "", + ], + resources: [ + "pods", + "pods/log", + "secrets", + "services", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "", + "apps", + "extensions", + ], + resources: [ + "deployments", + "replicasets", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "kubeflow.org", + ], + resources: [ + "*", + ], + verbs: [ + "*", + ], + }, + { + apiGroups: [ + "batch", + ], + resources: [ + "jobs", + ], + verbs: [ + "*", + ], + }, + ], + }, + }, + { + actual: instance.parts.hubService, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: jupyter-lb-hub-mapping\nprefix: /hub/\nrewrite: /hub/\ntimeout_ms: 300000\nservice: jupyter-lb.foo\nuse_websocket: true\n---\napiVersion: ambassador/v0\nkind: Mapping\nname: jupyter-lb-user-mapping\nprefix: /user/\nrewrite: /user/\ntimeout_ms: 300000\nservice: jupyter-lb.foo\nuse_websocket: true", + }, + labels: { + app: "jupyter-lb", + }, + name: "jupyter-lb", + namespace: "foo", + }, + spec: { + ports: [ + { + name: "hub", + port: 80, + targetPort: 8000, + }, + ], + selector: { + app: "jupyter", + }, + type: "ClusterIP", + }, + }, + }, + { + actual: instance.parts.hubServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + labels: { + app: "jupyter", + }, + name: "jupyter", + namespace: "foo", + }, + }, + }, + { + actual: instance.parts.notebookServiceAccount, + expected: { + apiVersion: "v1", + kind: "ServiceAccount", + metadata: { + name: "jupyter-notebook", + namespace: "foo", + }, + }, + }, + { + actual: instance.parts.hubRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "RoleBinding", + metadata: { + name: "jupyter-role", + namespace: "foo", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: "jupyter-role", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "jupyter", + namespace: "foo", + }, + ], + }, + }, + { + actual: instance.parts.notebookRoleBinding, + expected: { + apiVersion: "rbac.authorization.k8s.io/v1beta1", + kind: "RoleBinding", + metadata: { + name: "jupyter-notebook-role", + namespace: "foo", + }, + roleRef: { + apiGroup: "rbac.authorization.k8s.io", + kind: "Role", + name: "jupyter-notebook-role", + }, + subjects: [ + { + kind: "ServiceAccount", + name: "jupyter-notebook", + namespace: "foo", + }, + ], + }, + }, + { + actual: instance.parts.pv, + expected: + { + apiVersion: "v1", + kind: "PersistentVolume", + metadata: { + labels: { + type: "local", + }, + name: "local-volume", + }, + spec: { + accessModes: [ + "ReadWriteOnce", + ], + capacity: { + storage: "10Gi", + }, + "local": { + path: "/mnt/local", + }, + nodeAffinity: { + required: { + nodeSelectorTerms: [ + { + matchExpressions: [ + { + key: "kubernetes.io/hostname", + operator: "In", + values: [ + "minikube", + ], + }, + ], + }, + ], + }, + }, + persistentVolumeReclaimPolicy: "Delete", + storageClassName: "local-storage", + }, + }, + }, + { + actual: instance.parts.pvclaim, + expected: + { + apiVersion: "v1", + kind: "PersistentVolumeClaim", + metadata: { + name: "local-notebooks", + }, + spec: { + accessModes: [ + "ReadWriteOnce", + ], + resources: { + requests: { + storage: "10Gi", + }, + }, + storageClassName: "local-storage", + volumeName: "local-volume", + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/jupyter_test.py b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/jupyter_test.py new file mode 100644 index 00000000..1ded1422 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/jupyter_test.py @@ -0,0 +1,157 @@ +"""Test jupyter custom resource. + +This file tests that we can create notebooks using the Jupyter custom resource. + +It is an integration test as it depends on having access to +a Kubeflow cluster with the custom resource test installed. + +We use the pytest framework because + 1. It can output results in junit format for prow/gubernator + 2. It has good support for configuring tests using command line arguments + (https://docs.pytest.org/en/latest/example/simple.html) +Python Path Requirements: + kubeflow/testing/py - https://github.com/kubeflow/testing/tree/master/py + * Provides utilities for testing + +Manually running the test + 1. Configure your KUBECONFIG file to point to the desired cluster +""" + +import logging +import os +import subprocess +import re +import requests +from retrying import retry +import six + +import pytest + +from kubernetes.config import kube_config +from kubernetes import client as k8s_client +from kubeflow.testing import ks_util +from kubeflow.testing import util + +GROUP = "kubeflow.org" +PLURAL = "notebooks" +KIND = "Notebook" +VERSION = "v1alpha1" + +logging.basicConfig( + level=logging.INFO, + format=('%(levelname)s|%(asctime)s' + '|%(pathname)s|%(lineno)d| %(message)s'), + datefmt='%Y-%m-%dT%H:%M:%S', +) +logging.getLogger().setLevel(logging.INFO) + + +def is_retryable_result(r): + if r.status_code in [requests.codes.NOT_FOUND, requests.codes.UNAVAILABLE]: + message = "Request to {0} returned {1}".format(r.url, r.status_code) + logging.error(message) + return True + + return False + + +@retry( + wait_exponential_multiplier=1000, + wait_exponential_max=10000, + stop_max_delay=5 * 60 * 1000, + retry_on_result=is_retryable_result) +def send_request(*args, **kwargs): + """Send a request to the Jupyter server. + + Sends a request to verify we can fetch the main page for the Jupyter + notebook. + """ + # We don't use util.run because that ends up including the access token + # in the logs + token = subprocess.check_output(["gcloud", "auth", "print-access-token"]) + if six.PY3 and hasattr(token, "decode"): + token = token.decode() + token = token.strip() + + headers = { + "Authorization": "Bearer " + token, + } + + if "headers" not in kwargs: + kwargs["headers"] = {} + + kwargs["headers"].update(headers) + + r = requests.get(*args, **kwargs) + + # TODO(https://github.com/kubeflow/testing/issues/288): Use selenium + # to create a proper test. Jupyter returns a 404 because the page is + # using javascript. If we use selenium we can properly fetch the page. + pattern = re.compile(".*Jupyter Notebook.*") + + content = r.content + if six.PY3 and hasattr(content, "decode"): + content = content.decode() + if r.status_code == requests.codes.NOT_FOUND and pattern.findall(content): + r.status_code = 200 + return r + + +def test_jupyter(env, namespace): + app_credentials = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") + if app_credentials: + logging.info("Activate service account") + util.run([ + "gcloud", "auth", "activate-service-account", + "--key-file=" + app_credentials + ]) + + # util.load_kube_config appears to hang on python3 + kube_config.load_kube_config() + api_client = k8s_client.ApiClient() + host = api_client.configuration.host + logging.info("Kubernetes master: %s", host) + master = host.rsplit("/", 1)[-1] + + this_dir = os.path.dirname(__file__) + app_dir = os.path.join(this_dir, "test_app") + + ks_cmd = ks_util.get_ksonnet_cmd(app_dir) + + name = "jupyter-test" + service = "jupyter-test" + component = "jupyter" + params = "" + ks_util.setup_ks_app(app_dir, env, namespace, component, params) + + util.run([ks_cmd, "apply", env, "-c", component], cwd=app_dir) + conditions = ["Ready"] + results = util.wait_for_cr_condition(api_client, GROUP, PLURAL, VERSION, + namespace, name, conditions) + + logging.info("Result of CRD:\n%s", results) + + # We proxy the request through the APIServer so that we can connect + # from outside the cluster. + url = ("https://{master}/api/v1/namespaces/{namespace}/services/{service}:80" + "/proxy/default/jupyter/lab?").format( + master=master, namespace=namespace, service=service) + logging.info("Request: %s", url) + r = send_request(url, verify=False) + + if r.status_code != requests.codes.OK: + msg = "Request to {0} exited with status code: {1} and content: {2}".format( + url, r.status_code, r.content) + logging.error(msg) + raise RuntimeError(msg) + + +if __name__ == "__main__": + logging.basicConfig( + level=logging.INFO, + format=('%(levelname)s|%(asctime)s' + '|%(pathname)s|%(lineno)d| %(message)s'), + datefmt='%Y-%m-%dT%H:%M:%S', + ) + logging.getLogger().setLevel(logging.INFO) + pytest.main() diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/notebooks_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/notebooks_test.jsonnet new file mode 100644 index 00000000..e512b992 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/notebooks_test.jsonnet @@ -0,0 +1,182 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local notebooks = import "kubeflow/jupyter/notebooks.libsonnet"; + +// TODO +// Most of these will go away since that can be set directly in Notebook.spec.template.spec +// +local params = { + image: "gcr.io/kubeflow/jupyterhub-k8s:v20180531-3bb991b1", + useJupyterLabAsDefault: true, + notebookPVCMount: "/home/jovyan", + registry: "gcr.io", + repoName: "kubeflow-images-public", + notebookUid: "-1", + notebookGid: "-1", + accessLocalFs: "true", +}; + +local env = { + namespace: "kf-100", +}; + +local instance = notebooks.new(env, params); + +local testCases = [ + { + actual: instance.parts.notebooksCRD, + expected: { + apiVersion: "apiextensions.k8s.io/v1beta1", + kind: "CustomResourceDefinition", + metadata: { + name: "notebooks.kubeflow.org", + }, + spec: { + group: "kubeflow.org", + names: { + kind: "Notebook", + plural: "notebooks", + singular: "notebook", + }, + scope: "Namespaced", + validation: { + openAPIV3Schema: (import "kubeflow/jupyter/notebooks.schema"), + }, + version: "v1alpha1", + }, + }, + }, + { + actual: instance.parts.notebooksService, + expected: { + apiVersion: "v1", + kind: "Service", + metadata: { + name: "notebooks", + namespace: "kf-100", + }, + spec: { + ports: [ + { + port: 80, + targetPort: 8080, + }, + ], + selector: { + app: "notebooks", + }, + }, + }, + }, + { + actual: instance.parts.notebooksConfigMap, + expected: { + apiVersion: "v1", + data: { + "sync-notebook.jsonnet": (importstr "../sync-notebook.jsonnet"), + "util.libsonnet": (importstr "kubeflow/jupyter/util.libsonnet"), + }, + kind: "ConfigMap", + metadata: { + name: "notebooks", + namespace: "kf-100", + }, + }, + }, + { + actual: instance.parts.notebooksDeployment, + expected: { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + name: "notebooks", + namespace: "kf-100", + }, + spec: { + selector: { + matchLabels: { + app: "notebooks", + }, + }, + template: { + metadata: { + labels: { + app: "notebooks", + }, + }, + spec: { + containers: [ + { + image: "metacontroller/jsonnetd@sha256:25c25f217ad030a0f67e37078c33194785b494569b0c088d8df4f00da8fd15a0", + imagePullPolicy: "Always", + name: "hooks", + volumeMounts: [ + { + mountPath: "/opt/notebooks/hooks", + name: "hooks", + }, + ], + workingDir: "/opt/notebooks/hooks", + }, + ], + volumes: [ + { + configMap: { + name: "notebooks", + }, + name: "hooks", + }, + ], + }, + }, + }, + }, + }, + { + actual: instance.parts.notebooksController, + expected: + { + apiVersion: "metacontroller.k8s.io/v1alpha1", + kind: "CompositeController", + metadata: { + annotations: { + accessLocalFs: "true", + image: "gcr.io/kubeflow/jupyterhub-k8s:v20180531-3bb991b1", + namespace: "kf-100", + notebookGid: "-1", + notebookPVCMount: "/home/jovyan", + notebookUid: "-1", + registry: "gcr.io", + repoName: "kubeflow-images-public", + useJupyterLabAsDefault: true, + }, + name: "notebook-controller", + }, + spec: { + childResources: [ + { + apiVersion: "v1", + resource: "services", + }, + { + apiVersion: "extensions/v1beta1", + resource: "deployments", + }, + ], + generateSelector: true, + hooks: { + sync: { + webhook: { + url: "http://notebooks.kf-100/sync-notebook", + }, + }, + }, + parentResource: { + apiVersion: "kubeflow.org/v1alpha1", + resource: "notebooks", + }, + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/sync-notebook_test.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/sync-notebook_test.jsonnet new file mode 100644 index 00000000..c6192932 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/sync-notebook_test.jsonnet @@ -0,0 +1,202 @@ +local testSuite = import "kubeflow/common/testsuite.libsonnet"; +local params = { + // TODO + // Most of these will go away since that can be set directly in Notebook.spec.template.spec + // + image: "tensorflow-1.10.1-notebook-cpu:v0.3.0", + useJupyterLabAsDefault: true, + notebookPVCMount: "/home/jovyan", + registry: "gcr.io", + repoName: "kubeflow-images-public", + notebookUid: "-1", + notebookGid: "-1", + serviceType: "ClusterIP", + targetPort: "8888", + servicePort: "80", +}; + +local env = { + namespace: "kubeflow", +}; + +local syncNotebook = import "../sync-notebook.jsonnet"; + +local notebook = { + apiVersion: "kubeflow.org/v1alpha1", + kind: "Notebook", + metadata: { + name: "training", + namespace: "resnet50", + }, + spec: { + template: { + spec: { + ttlSecondsAfterFinished: 300, + containers: [ + { + image: params.registry + "/" + params.repoName + "/" + params.image, + ports: [ + { + containerPort: params.targetPort, + name: "notebook-port", + protocol: "TCP", + }, + ], + resources: { + requests: { + cpu: "500m", + memory: "1Gi", + }, + }, + volumeMounts: [ + { + mountPath: params.notebookPVCMount, + name: "volume-training", + }, + ], + workingDir: params.notebookPVCMount, + }, + ], + securityContext: { + fsGroup: params.notebookGid, + runAsUser: params.notebookUid, + }, + serviceAccountName: "system:serviceaccount:" + env.namespace + ":notebooks", + volumes: [ + { + name: "volume-training", + persistentVolumeClaim: { + claimName: "claim-training", + }, + }, + ], + }, + }, + }, +}; + +local request = { + controller: { + metadata: { + annotations: params + env, + }, + }, + parent: notebook, + children: { + "Pod.v1": {}, + "Service.v1": {}, + }, +}; + +local testCases = [ + { + actual: syncNotebook(request), + expected: { + children: [ + { + apiVersion: "v1", + kind: "Service", + metadata: { + annotations: { + "getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: resnet50_training_mapping\nprefix: /resnet50/training\nrewrite: /resnet50/training\ntimeout_ms: 300000\nservice: training.resnet50", + }, + name: "training", + namespace: "resnet50", + }, + spec: { + ports: [ + { + port: 80, + protocol: "TCP", + targetPort: 8888, + }, + ], + selector: { + app: "training", + }, + type: "ClusterIP", + }, + }, + { + apiVersion: "apps/v1beta1", + kind: "Deployment", + metadata: { + labels: { + app: "training", + }, + name: "training", + namespace: "resnet50", + }, + spec: { + replicas: 1, + template: { + metadata: { + labels: { + app: "training", + }, + }, + spec: { + containers: [ + { + args: [ + "start.sh", + "jupyter", + "lab", + "--LabApp.token=''", + "--LabApp.allow_remote_access='True'", + "--LabApp.allow_root='True'", + "--LabApp.ip='*'", + "--LabApp.base_url=/resnet50/training/", + "--port=8888", + "--no-browser", + ], + env: [ + { + name: "JUPYTER_ENABLE_LAB", + value: "true", + }, + ], + image: "gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-cpu:v0.3.0", + imagePullPolicy: "IfNotPresent", + name: "notebook", + ports: [ + { + containerPort: 8888, + name: "notebook-port", + protocol: "TCP", + }, + ], + resources: { + requests: { + cpu: "500m", + memory: "1Gi", + }, + }, + volumeMounts: [ + { + mountPath: "/home/jovyan", + name: "volume-training", + }, + ], + workingDir: "/home/jovyan", + }, + ], + }, + }, + }, + }, + ], + status: { + conditions: [ + { + type: "Ready", + }, + ], + created: true, + phase: "Active", + }, + }, + }, +]; + +testSuite.run(testCases) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/.gitignore b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/.gitignore new file mode 100644 index 00000000..f8714d3a --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/.gitignore @@ -0,0 +1,4 @@ +/lib +/.ksonnet/registries +/app.override.yaml +/.ks_environment diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/README.md b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/README.md new file mode 100644 index 00000000..35f6c451 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/README.md @@ -0,0 +1 @@ +This ksonnet app contains some manifests to be used in the E2E test. diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/app.lock b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/app.lock new file mode 100644 index 00000000..e69de29b diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/app.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/app.yaml new file mode 100644 index 00000000..4e66c998 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/app.yaml @@ -0,0 +1,17 @@ +apiVersion: 0.3.0 +environments: + default: + destination: + namespace: kubeflow + server: https://35.196.210.94 + k8sVersion: v1.11.5 + path: default + jlewi: + destination: + namespace: jlewi + server: https://35.237.22.119 + k8sVersion: v1.11.6 + path: jlewi +kind: ksonnet.io/app +name: test_app +version: 0.0.1 diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/components/jupyter.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/components/jupyter.jsonnet new file mode 100644 index 00000000..5437282a --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/components/jupyter.jsonnet @@ -0,0 +1,56 @@ +// Component to launch a jupyter notebook +// +local k = import "k.libsonnet"; +local env = std.extVar("__ksonnet/environments"); +local params = std.extVar("__ksonnet/params").components.jupyter; + + +local jupyter = { + "apiVersion": "kubeflow.org/v1alpha1", + "kind": "Notebook", + "metadata": { + "name": params.name, + "namespace": env.namespace, + }, + "spec": { + "template": { + "spec": { + "containers": [ + { + "image": "gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-cpu:v0.3.0", + "name": "notebook", + args: [ + "start.sh", + "jupyter", + "lab", + "--LabApp.token=''", + "--LabApp.allow_remote_access='True'", + "--LabApp.allow_root='True'", + "--LabApp.ip='*'", + "--LabApp.base_url=/" + env.namespace + "/" + params.name + "/", + "--port=8888", + "--no-browser", + ], + env: [ + { + name: "JUPYTER_ENABLE_LAB", + value: "true", + }, + ], + "resources": { + "requests": { + "cpu": "500m", + "memory": "1Gi" + } + }, + "workingDir": "/home/jovyan" + } + ], + } + } + } +}; + +k.core.v1.list.new([ + jupyter, +]) diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/components/params.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/components/params.libsonnet new file mode 100644 index 00000000..1eebb1ac --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/components/params.libsonnet @@ -0,0 +1,8 @@ +{ + global: {}, + components: { + jupyter: { + name: 'jupyter-test', + }, + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/base.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/base.libsonnet new file mode 100644 index 00000000..a129affb --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/base.libsonnet @@ -0,0 +1,4 @@ +local components = std.extVar("__ksonnet/components"); +components + { + // Insert user-specified overrides here. +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/globals.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/globals.libsonnet new file mode 100644 index 00000000..2c63c085 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/globals.libsonnet @@ -0,0 +1,2 @@ +{ +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/main.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/main.jsonnet new file mode 100644 index 00000000..1d4f6425 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/main.jsonnet @@ -0,0 +1,9 @@ +local base = import "base.libsonnet"; +// uncomment if you reference ksonnet-lib +// local k = import "k.libsonnet"; +// local deployment = k.apps.v1beta2.deployment; + +base + { + // Insert user-specified overrides here. For example if a component is named \"nginx-deployment\", you might have something like:\n") + // "nginx-deployment"+: deployment.mixin.metadata.withLabels({foo: "bar"}) +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/params.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/params.libsonnet new file mode 100644 index 00000000..b6eb32db --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/default/params.libsonnet @@ -0,0 +1,17 @@ +local params = std.extVar("__ksonnet/params"); +local globals = import "globals.libsonnet"; +local envParams = params + { + components +: { + // Insert component parameter overrides here. Ex: + // guestbook +: { + // name: "guestbook-dev", + // replicas: params.global.replicas, + // }, + }, +}; + +{ + components: { + [x]: envParams.components[x] + globals, for x in std.objectFields(envParams.components) + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/globals.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/globals.libsonnet new file mode 100644 index 00000000..2c63c085 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/globals.libsonnet @@ -0,0 +1,2 @@ +{ +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/main.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/main.jsonnet new file mode 100644 index 00000000..1d4f6425 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/main.jsonnet @@ -0,0 +1,9 @@ +local base = import "base.libsonnet"; +// uncomment if you reference ksonnet-lib +// local k = import "k.libsonnet"; +// local deployment = k.apps.v1beta2.deployment; + +base + { + // Insert user-specified overrides here. For example if a component is named \"nginx-deployment\", you might have something like:\n") + // "nginx-deployment"+: deployment.mixin.metadata.withLabels({foo: "bar"}) +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/params.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/params.libsonnet new file mode 100644 index 00000000..b6eb32db --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/jlewi/params.libsonnet @@ -0,0 +1,17 @@ +local params = std.extVar("__ksonnet/params"); +local globals = import "globals.libsonnet"; +local envParams = params + { + components +: { + // Insert component parameter overrides here. Ex: + // guestbook +: { + // name: "guestbook-dev", + // replicas: params.global.replicas, + // }, + }, +}; + +{ + components: { + [x]: envParams.components[x] + globals, for x in std.objectFields(envParams.components) + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/globals.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/globals.libsonnet new file mode 100644 index 00000000..2c63c085 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/globals.libsonnet @@ -0,0 +1,2 @@ +{ +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/main.jsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/main.jsonnet new file mode 100644 index 00000000..1d4f6425 --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/main.jsonnet @@ -0,0 +1,9 @@ +local base = import "base.libsonnet"; +// uncomment if you reference ksonnet-lib +// local k = import "k.libsonnet"; +// local deployment = k.apps.v1beta2.deployment; + +base + { + // Insert user-specified overrides here. For example if a component is named \"nginx-deployment\", you might have something like:\n") + // "nginx-deployment"+: deployment.mixin.metadata.withLabels({foo: "bar"}) +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/params.libsonnet b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/params.libsonnet new file mode 100644 index 00000000..b6eb32db --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/tests/test_app/environments/test2/params.libsonnet @@ -0,0 +1,17 @@ +local params = std.extVar("__ksonnet/params"); +local globals = import "globals.libsonnet"; +local envParams = params + { + components +: { + // Insert component parameter overrides here. Ex: + // guestbook +: { + // name: "guestbook-dev", + // replicas: params.global.replicas, + // }, + }, +}; + +{ + components: { + [x]: envParams.components[x] + globals, for x in std.objectFields(envParams.components) + }, +} diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/ui/default/config.yaml b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/ui/default/config.yaml new file mode 100644 index 00000000..1408eafc --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/ui/default/config.yaml @@ -0,0 +1,118 @@ +# Configuration file for the default JupyterHub Spawner UI +# Each key corresponds to a JupyterHub Spawner UI option +# If a key is missing, the respective Spawner UI option will be left untouched +# +# Each Spawner UI option is configured by two keys: `value` and `readOnly` +# - The `value` key contains the default value +# - The `readOnly` key determines if the option will be available to users +# +# If the 'readOnly' key is present and set to 'true', the respective option +# will be disabled for users and only set by the admin +# If the 'readOnly' key is missing (defaults to 'false'), the respective option +# will be available for users +# +# Please note that some values (e.g. {servername}, {username}) may be templated +# and expanded according to KubeSpawner's rules +# +# For more information regarding JupyterHub KubeSpawner and its configuration: +# https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html + +spawnerFormDefaults: + image: + # The container Image for the user's Jupyter Notebook + # If readonly, this value must be a member of the list below + value: gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-cpu:v0.5.0 + # The list of available standard container Images + options: + - gcr.io/kubeflow-images-public/tensorflow-1.4.1-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.4.1-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.5.1-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.5.1-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.6.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.6.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.7.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.7.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.8.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.8.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.9.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.9.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.11.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.11.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-gpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-2.0.0a-notebook-cpu:v0.5.0 + - gcr.io/kubeflow-images-public/tensorflow-2.0.0a-notebook-gpu:v0.5.0 + # By default, custom container Images are allowed + # Uncomment the following line to only enable standard container Images + #readOnly: true + cpu: + # CPU for user's Notebook + value: '1.0' + memory: + # Memory for user's Notebook + value: 1.0Gi + workspaceVolume: + # Workspace Volume to be attached to user's Notebook + # Each Workspace Volume is declared with the following attributes: + # Type, Name, Size, MountPath and Access Mode + value: + type: + # The Type of the Workspace Volume + # Supported values: 'New', 'Existing' + value: New + name: + # The Name of the Workspace Volume + # Note that this is a templated value + value: {username}{servername}-workspace + size: + # The Size of the Workspace Volume (in Gi) + value: '10' + mountPath: + # The Path that the Workspace Volume will be mounted + readOnly: true + value: /home/jovyan + accessModes: + # The Access Mode of the Workspace Volume + # Supported values: 'ReadWriteOnce', 'ReadWriteMany', 'ReadOnlyMany' + value: ReadWriteOnce + dataVolumes: + # List of additional Data Volumes to be attached to the user's Notebook + value: [] + # Each Data Volume is declared with the following attributes: + # Type, Name, Size, MountPath and Access Mode + # + # For example, a list with 2 Data Volumes: + #value: + # - value: + # type: + # value: New + # name: + # value: {username}{servername}-vol-1 + # size: + # value: '10' + # mountPath: + # value: /home/jovyan/{username}{servername}-vol-1 + # accessModes: + # value: ReadWriteOnce + # - value: + # type: + # value: New + # name: + # value: {username}{servername}-vol-2 + # size: + # value: '5' + # mountPath: + # value: /home/jovyan/{username}{servername}-vol-2 + # accessModes: + # value: ReadWriteOnce + # + # Uncomment the following line to make the Data Volumes list readonly + #readOnly: true + extraResources: + # Extra Resource Limits for user's Notebook + # Note that braces are escaped + value: "{{}}" diff --git a/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/ui/default/script.js b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/ui/default/script.js new file mode 100644 index 00000000..c674042e --- /dev/null +++ b/pipelines/azurepipeline/code/kflow/ks_app/vendor/kubeflow/jupyter/ui/default/script.js @@ -0,0 +1,653 @@ +// This function is executed when the document is ready +$(function() { + + // Toggle advanced options inside the Spawner form + $('#toggle_advanced_options').on('click', function(e) { + $('#advanced_fields').toggle(); + }); + + // Resize Spawner form to take up more page width + $('.row.col-sm-offset-2.col-sm-8').attr({ + 'class': 'row col-sm-offset-1 col-sm-10', + 'style': 'padding: 15px;' + }); + + // Update upper-right sign-out icon to FontAwesome 5 + $('.fa.fa-sign-out').attr('class', 'fas fa-sign-out-alt'); + + // Update Spawn button text upon form submission + if (formDefaults) { + $('#spawn_form').one('submit', function() { + $(this).find('input[type="submit"]') + .attr('disabled', true) + .val('Spawning...'); + }); + } else { + $("h1:contains('Spawner Options')" ).remove(); + $('#spawn_form').find('input[type="submit"]').remove(); + } + + // Configure Image input elements + setImageType(); + + // Dynamically change Workspace form fields behavior + setWorkspaceEventListeners(); + + // Fill the form with values defined in the YAML config file + setDefaultFormValues(); + + // Set tooltip to readOnly form fields + setTooltipsOnImmutable(); +}); + +// Dynamically update Image input field, based on radio button selection +function setImageType() { + imageType = $('#imageType').find('input:checked').val(); + if (imageType == 'standard') { + $('select[for=standardImages]') + .attr({'id': 'image', 'name': 'image'}).css({'display': ''}); + $('input[for=customImage]') + .attr({'id': '', 'name': ''}).removeAttr('required').css({'display': 'none'}); + } else { + $('input[for=customImage]') + .attr({'id': 'image', 'name': 'image'}).css({'display': ''}); + $('select[for=standardImages]') + .attr({'id': '', 'name': ''}).removeAttr('required').css({'display': 'none'}); + } +} + +// Set default values to form fields +function setDefaultFormValues() { + + // If config.yaml is empty, no need to initialize anything + if (!formDefaults) { + return; + } + + if ('image' in formDefaults) { + // Set Container image dropdown list + if ('options' in formDefaults.image) { + formDefaults.image.options.forEach(function(item) { + $('#image').append($('