mirror of https://github.com/kubeflow/examples.git
Remove deprecated services and unnecessary folders
This commit is contained in:
parent
4dd4d1069a
commit
7b166d211f
|
|
@ -1,87 +0,0 @@
|
|||
# Kubeflow Container Build Pipeline
|
||||
|
||||
trigger:
|
||||
- master
|
||||
|
||||
pr: none
|
||||
|
||||
stages:
|
||||
- stage: ContainerConfig
|
||||
displayName: 'Configure and Register Containers'
|
||||
jobs:
|
||||
- job: Containers
|
||||
pool:
|
||||
name: default
|
||||
steps:
|
||||
- task: AzureCLI@1
|
||||
inputs:
|
||||
azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)'
|
||||
scriptLocation: 'inlineScript'
|
||||
inlineScript: '
|
||||
sudo az acr login -n kubeflowregistry &&
|
||||
cd code &&
|
||||
cd preprocess &&
|
||||
sudo docker build -t kubeflowregistry.azurecr.io/kubeflow/preprocess:$BUILD_SOURCEVERSION . &&
|
||||
sudo docker push kubeflowregistry.azurecr.io/kubeflow/preprocess:$BUILD_SOURCEVERSION '
|
||||
displayName: 'Build & Push Preprocess Image'
|
||||
|
||||
- task: AzureCLI@1
|
||||
inputs:
|
||||
azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)'
|
||||
scriptLocation: 'inlineScript'
|
||||
inlineScript: '
|
||||
cd code &&
|
||||
cd training &&
|
||||
sudo docker build -t kubeflowregistry.azurecr.io/kubeflow/training:$BUILD_SOURCEVERSION . &&
|
||||
sudo docker push kubeflowregistry.azurecr.io/kubeflow/training:$BUILD_SOURCEVERSION '
|
||||
displayName: 'Build & Push Training Image'
|
||||
|
||||
- task: AzureCLI@1
|
||||
inputs:
|
||||
azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)'
|
||||
scriptLocation: 'inlineScript'
|
||||
inlineScript: '
|
||||
cd code &&
|
||||
cd register &&
|
||||
sudo docker build -t kubeflowregistry.azurecr.io/kubeflow/register:$BUILD_SOURCEVERSION . &&
|
||||
sudo docker push kubeflowregistry.azurecr.io/kubeflow/register:$BUILD_SOURCEVERSION '
|
||||
displayName: 'Build & Push Register Image'
|
||||
|
||||
|
||||
# Moved KF step to build
|
||||
- stage: KubeflowTrigger
|
||||
dependsOn: ContainerConfig
|
||||
displayName: 'Trigger Kubeflow Pipeline'
|
||||
variables:
|
||||
- group: kf-variables
|
||||
jobs:
|
||||
- job: Kubeflow
|
||||
pool:
|
||||
name: default
|
||||
steps:
|
||||
- task: AzureCLI@1
|
||||
env:
|
||||
KF_MAPPED_SERVICE_PRINCIPAL_PASSWORD: $(KF_SERVICE_PRINCIPAL_PASSWORD)
|
||||
inputs:
|
||||
azureSubscription: 'Shared Data Platform - R+D (1308a130-d549-44e1-ba66-ce8c487d76e3)'
|
||||
scriptLocation: 'inlineScript'
|
||||
inlineScript: |
|
||||
az aks get-credentials -g kubeflow-mlops-rg -n kubeflow-mlops-cluster
|
||||
kubectl port-forward --namespace kubeflow svc/ml-pipeline 8888:8888 &
|
||||
kubepid=$!
|
||||
|
||||
sudo apt-get install python3-setuptools
|
||||
pip3 install wheel
|
||||
pip3 install kfp
|
||||
|
||||
touch script.py
|
||||
echo "import kfp" >> script.py
|
||||
echo "client = kfp.Client(host='localhost:8888')" >> script.py
|
||||
echo "client.run_pipeline('$KF_EXPERIMENT_ID', 'Run ${BUILD_BUILDID}', params={'imagetag': '${BUILD_SOURCEVERSION}', 'tenant-id': '$KF_TENANT_ID', 'service-principal-id': '$KF_SERVICE_PRINCIPAL_ID', 'service-principal-password': '$KF_MAPPED_SERVICE_PRINCIPAL_PASSWORD', 'subscription-id': '$KF_SUBSCRIPTION_ID', 'resource-group': '$KF_RESOURCE_GROUP', 'workspace': '$KF_WORKSPACE', 'persistent-volume-name': '$KF_PERSISTENT_VOLUME_NAME', 'persistent-volume-path': '$KF_PERSISTENT_VOLUME_PATH', 'data-download': '$KF_DATA_DOWNLOAD', 'epochs': '$KF_EPOCHS', 'batch': '$KF_BATCH', 'learning-rate': '$KF_LEARNING_RATE', 'model-name': '$KF_MODEL_NAME'}, pipeline_id='$KF_PIPELINE_ID')" >> script.py
|
||||
|
||||
cat script.py
|
||||
|
||||
python3 script.py
|
||||
|
||||
kill $kubepid
|
||||
displayName: 'Trigger Kubeflow Pipeline'
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
FROM tensorflow/tensorflow:2.0.0a0-gpu-py3
|
||||
RUN pip install azure-cli
|
||||
RUN az extension add -n azure-cli-ml
|
||||
RUN pip install --upgrade pip
|
||||
COPY profile.sh /scripts/profile.sh
|
||||
COPY inferenceconfig.json /scripts/inferenceconfig.json
|
||||
COPY score.py /scripts/score.py
|
||||
COPY environment.yml /scripts/environment.yml
|
||||
ENTRYPOINT bash
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
apiVersion: kfdef.apps.kubeflow.org/v1alpha1
|
||||
kind: KfDef
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kflow
|
||||
namespace: kubeflow
|
||||
spec:
|
||||
appdir: /home/rebec/kubeflow-and-mlops/code/kflow
|
||||
componentParams:
|
||||
ambassador:
|
||||
- name: ambassadorServiceType
|
||||
value: NodePort
|
||||
components:
|
||||
- ambassador
|
||||
- argo
|
||||
- centraldashboard
|
||||
- jupyter-web-app
|
||||
- katib
|
||||
- metacontroller
|
||||
- notebook-controller
|
||||
- pipeline
|
||||
- pytorch-operator
|
||||
- tensorboard
|
||||
- tf-job-operator
|
||||
packages:
|
||||
- argo
|
||||
- common
|
||||
- examples
|
||||
- gcp
|
||||
- jupyter
|
||||
- katib
|
||||
- metacontroller
|
||||
- modeldb
|
||||
- mpi-job
|
||||
- pipeline
|
||||
- pytorch-job
|
||||
- seldon
|
||||
- tensorboard
|
||||
- tf-serving
|
||||
- tf-training
|
||||
repo: /home/rebec/kubeflow-and-mlops/code/kflow/.cache/v0.5.1/kubeflow
|
||||
useBasicAuth: false
|
||||
useIstio: false
|
||||
version: v0.5.1
|
||||
status: {}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
/lib
|
||||
/.ksonnet/registries
|
||||
/app.override.yaml
|
||||
/.ks_environment
|
||||
|
|
@ -1,76 +0,0 @@
|
|||
apiVersion: 0.3.0
|
||||
environments:
|
||||
default:
|
||||
destination:
|
||||
namespace: kubeflow
|
||||
server: https://taco-cls-taco-rg-1308a1-e98d0802.hcp.eastus.azmk8s.io:443
|
||||
k8sVersion: v1.14.0
|
||||
path: default
|
||||
kind: ksonnet.io/app
|
||||
libraries:
|
||||
kubeflow/argo:
|
||||
name: argo
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/common:
|
||||
name: common
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/examples:
|
||||
name: examples
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/gcp:
|
||||
name: gcp
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/jupyter:
|
||||
name: jupyter
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/katib:
|
||||
name: katib
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/metacontroller:
|
||||
name: metacontroller
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/modeldb:
|
||||
name: modeldb
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/mpi-job:
|
||||
name: mpi-job
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/pipeline:
|
||||
name: pipeline
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/pytorch-job:
|
||||
name: pytorch-job
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/seldon:
|
||||
name: seldon
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/tensorboard:
|
||||
name: tensorboard
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/tf-serving:
|
||||
name: tf-serving
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
kubeflow/tf-training:
|
||||
name: tf-training
|
||||
registry: kubeflow
|
||||
version: ""
|
||||
name: ks_app
|
||||
registries:
|
||||
kubeflow:
|
||||
protocol: fs
|
||||
uri: /home/rebec/kubeflow-and-mlops/code/kflow/.cache/v0.5.1/kubeflow
|
||||
version: 0.0.1
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components.ambassador;
|
||||
|
||||
local ambassador = import "kubeflow/common/ambassador.libsonnet";
|
||||
local instance = ambassador.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components.argo;
|
||||
|
||||
local argo = import "kubeflow/argo/argo.libsonnet";
|
||||
local instance = argo.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components.centraldashboard;
|
||||
|
||||
local centraldashboard = import "kubeflow/common/centraldashboard.libsonnet";
|
||||
local instance = centraldashboard.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components["jupyter-web-app"];
|
||||
|
||||
local jupyter_ui = import "kubeflow/jupyter/jupyter-web-app.libsonnet";
|
||||
|
||||
local instance = jupyter_ui.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components.katib;
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
local studyjobcontroller = import "kubeflow/katib/studyjobcontroller.libsonnet";
|
||||
local suggestion = import "kubeflow/katib/suggestion.libsonnet";
|
||||
local vizier = import "kubeflow/katib/vizier.libsonnet";
|
||||
|
||||
local namespace = env.namespace;
|
||||
|
||||
std.prune(
|
||||
k.core.v1.list.new(vizier.all(params, namespace))
|
||||
+ k.core.v1.list.new(suggestion.all(params, namespace))
|
||||
+ k.core.v1.list.new(studyjobcontroller.all(params, namespace))
|
||||
)
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components.metacontroller;
|
||||
|
||||
local metacontroller = import "kubeflow/metacontroller/metacontroller.libsonnet";
|
||||
local instance = metacontroller.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components["notebook-controller"];
|
||||
|
||||
local notebooks = import "kubeflow/jupyter/notebook_controller.libsonnet";
|
||||
local instance = notebooks.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,93 +0,0 @@
|
|||
{
|
||||
global: {},
|
||||
components: {
|
||||
// Component-level parameters, defined initially from 'ks prototype use ...'
|
||||
// Each object below should correspond to a component in the components/ directory
|
||||
ambassador: {
|
||||
ambassadorImage: 'quay.io/datawire/ambassador:0.37.0',
|
||||
ambassadorNodePort: 0,
|
||||
ambassadorServiceType: 'NodePort',
|
||||
name: 'ambassador',
|
||||
platform: 'none',
|
||||
replicas: 3,
|
||||
},
|
||||
argo: {
|
||||
artifactRepositoryAccessKeySecretKey: 'accesskey',
|
||||
artifactRepositoryAccessKeySecretName: 'mlpipeline-minio-artifact',
|
||||
artifactRepositoryBucket: 'mlpipeline',
|
||||
artifactRepositoryEndpoint: 'minio-service.kubeflow:9000',
|
||||
artifactRepositoryInsecure: 'true',
|
||||
artifactRepositoryKeyPrefix: 'artifacts',
|
||||
artifactRepositorySecretKeySecretKey: 'secretkey',
|
||||
artifactRepositorySecretKeySecretName: 'mlpipeline-minio-artifact',
|
||||
executorImage: 'argoproj/argoexec:v2.2.0',
|
||||
name: 'argo',
|
||||
uiImage: 'argoproj/argoui:v2.2.0',
|
||||
workflowControllerImage: 'argoproj/workflow-controller:v2.2.0',
|
||||
},
|
||||
centraldashboard: {
|
||||
image: 'gcr.io/kubeflow-images-public/centraldashboard:v0.5.0',
|
||||
name: 'centraldashboard',
|
||||
},
|
||||
"jupyter-web-app": {
|
||||
image: 'gcr.io/kubeflow-images-public/jupyter-web-app:v0.5.0',
|
||||
name: 'jupyter-web-app',
|
||||
policy: 'Always',
|
||||
port: '80',
|
||||
prefix: 'jupyter',
|
||||
rokSecretName: 'secret-rok-{username}',
|
||||
ui: 'default',
|
||||
},
|
||||
katib: {
|
||||
katibUIImage: 'gcr.io/kubeflow-images-public/katib/katib-ui:v0.1.2-alpha-156-g4ab3dbd',
|
||||
metricsCollectorImage: 'gcr.io/kubeflow-images-public/katib/metrics-collector:v0.1.2-alpha-156-g4ab3dbd',
|
||||
name: 'katib',
|
||||
studyJobControllerImage: 'gcr.io/kubeflow-images-public/katib/studyjob-controller:v0.1.2-alpha-156-g4ab3dbd',
|
||||
suggestionBayesianOptimizationImage: 'gcr.io/kubeflow-images-public/katib/suggestion-bayesianoptimization:v0.1.2-alpha-156-g4ab3dbd',
|
||||
suggestionGridImage: 'gcr.io/kubeflow-images-public/katib/suggestion-grid:v0.1.2-alpha-156-g4ab3dbd',
|
||||
suggestionHyperbandImage: 'gcr.io/kubeflow-images-public/katib/suggestion-hyperband:v0.1.2-alpha-156-g4ab3dbd',
|
||||
suggestionRandomImage: 'gcr.io/kubeflow-images-public/katib/suggestion-random:v0.1.2-alpha-156-g4ab3dbd',
|
||||
vizierCoreImage: 'gcr.io/kubeflow-images-public/katib/vizier-core:v0.1.2-alpha-156-g4ab3dbd',
|
||||
vizierCoreRestImage: 'gcr.io/kubeflow-images-public/katib/vizier-core-rest:v0.1.2-alpha-156-g4ab3dbd',
|
||||
vizierDbImage: 'mysql:8.0.3',
|
||||
},
|
||||
metacontroller: {
|
||||
image: 'metacontroller/metacontroller:v0.3.0',
|
||||
name: 'metacontroller',
|
||||
},
|
||||
"notebook-controller": {
|
||||
controllerImage: 'gcr.io/kubeflow-images-public/notebook-controller:v20190401-v0.4.0-rc.1-308-g33618cc9-e3b0c4',
|
||||
injectGcpCredentials: 'true',
|
||||
name: 'notebook-controller',
|
||||
},
|
||||
pipeline: {
|
||||
name: 'pipeline',
|
||||
},
|
||||
"pytorch-operator": {
|
||||
cloud: 'null',
|
||||
deploymentNamespace: 'null',
|
||||
deploymentScope: 'cluster',
|
||||
disks: 'null',
|
||||
name: 'pytorch-operator',
|
||||
pytorchDefaultImage: 'null',
|
||||
pytorchJobImage: 'gcr.io/kubeflow-images-public/pytorch-operator:v0.5.0',
|
||||
},
|
||||
tensorboard: {
|
||||
defaultTbImage: 'tensorflow/tensorflow:1.8.0',
|
||||
logDir: 'logs',
|
||||
name: 'tensorboard',
|
||||
servicePort: 9000,
|
||||
serviceType: 'ClusterIP',
|
||||
targetPort: 6006,
|
||||
},
|
||||
"tf-job-operator": {
|
||||
cloud: 'null',
|
||||
deploymentNamespace: 'null',
|
||||
deploymentScope: 'cluster',
|
||||
name: 'tf-job-operator',
|
||||
tfDefaultImage: 'null',
|
||||
tfJobImage: 'gcr.io/kubeflow-images-public/tf_operator:v0.5.0',
|
||||
tfJobUiServiceType: 'ClusterIP',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components.pipeline;
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
local pipelineBase = import "kubeflow/pipeline/pipeline.libsonnet";
|
||||
|
||||
// updatedParams includes the namespace from env by default.
|
||||
local updatedParams = params + env;
|
||||
|
||||
local pipeline = pipelineBase {
|
||||
params+: updatedParams,
|
||||
};
|
||||
|
||||
std.prune(k.core.v1.list.new(pipeline.parts.all))
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components["pytorch-operator"];
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
local operator = import "kubeflow/pytorch-job/pytorch-operator.libsonnet";
|
||||
|
||||
k.core.v1.list.new(operator.all(params, env))
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components.tensorboard;
|
||||
|
||||
local tensorboard = import "kubeflow/tensorboard/tensorboard.libsonnet";
|
||||
local instance = tensorboard.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
local env = std.extVar("__ksonnet/environments");
|
||||
local params = std.extVar("__ksonnet/params").components["tf-job-operator"];
|
||||
|
||||
local tfJobOperator = import "kubeflow/tf-training/tf-job-operator.libsonnet";
|
||||
local instance = tfJobOperator.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
local components = std.extVar("__ksonnet/components");
|
||||
components + {
|
||||
// Insert user-specified overrides here.
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
{
|
||||
}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
local base = import "base.libsonnet";
|
||||
// uncomment if you reference ksonnet-lib
|
||||
// local k = import "k.libsonnet";
|
||||
// local deployment = k.apps.v1beta2.deployment;
|
||||
|
||||
base + {
|
||||
// Insert user-specified overrides here. For example if a component is named \"nginx-deployment\", you might have something like:\n")
|
||||
// "nginx-deployment"+: deployment.mixin.metadata.withLabels({foo: "bar"})
|
||||
}
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
local params = std.extVar("__ksonnet/params");
|
||||
local globals = import "globals.libsonnet";
|
||||
local envParams = params + {
|
||||
components +: {
|
||||
// Insert component parameter overrides here. Ex:
|
||||
// guestbook +: {
|
||||
// name: "guestbook-dev",
|
||||
// replicas: params.global.replicas,
|
||||
// },
|
||||
},
|
||||
};
|
||||
|
||||
{
|
||||
components: {
|
||||
[x]: envParams.components[x] + globals, for x in std.objectFields(envParams.components)
|
||||
},
|
||||
}
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
approvers:
|
||||
- IronPan
|
||||
reviewers:
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
# Argo
|
||||
|
||||
> Prototypes for deploying Argo and running Argo Workflows
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [Quickstart](#quickstart)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Quickstart
|
||||
|
||||
*The following commands use the `io.ksonnet.pkg.argo` prototype to deploy the Argo Workflow operator on your Kubernetes cluster*
|
||||
|
||||
First, create a cluster and install the ksonnet CLI (see root-level [README.md](../../README.md)).
|
||||
|
||||
If you haven't yet created a [ksonnet application](https://ksonnet.io/docs/tutorial#1-initialize-your-app), do so using `ks init <app-name>`.
|
||||
|
||||
Finally, in the ksonnet application directory, run the following:
|
||||
|
||||
```shell
|
||||
# Install the kubeflow argo package
|
||||
$ ks pkg install kubeflow/argo
|
||||
|
||||
# Expand prototype as a Jsonnet file, place in a file in the
|
||||
# `components/` directory. (YAML and JSON are also available.)
|
||||
$ ks prototype use io.ksonnet.pkg.argo argo \
|
||||
--namespace default \
|
||||
--name argo
|
||||
|
||||
# Apply to server.
|
||||
$ ks apply default -c argo
|
||||
```
|
||||
|
|
@ -1,485 +0,0 @@
|
|||
{
|
||||
// TODO(jlewi): Do we need to add parts corresponding to a service account and cluster binding role?
|
||||
// see https://github.com/argoproj/argo/blob/master/cmd/argo/commands/install.go
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
// CRD's are not namespace scoped; see
|
||||
// https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/
|
||||
local workflowCRD = {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "workflows.argoproj.io",
|
||||
},
|
||||
spec: {
|
||||
group: "argoproj.io",
|
||||
names: {
|
||||
kind: "Workflow",
|
||||
listKind: "WorkflowList",
|
||||
plural: "workflows",
|
||||
shortNames: [
|
||||
"wf",
|
||||
],
|
||||
singular: "workflow",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
version: "v1alpha1",
|
||||
},
|
||||
}, // crd
|
||||
workflowCRD:: workflowCRD,
|
||||
|
||||
// Deploy the controller
|
||||
local workflowController = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "workflow-controller",
|
||||
},
|
||||
name: "workflow-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
progressDeadlineSeconds: 600,
|
||||
replicas: 1,
|
||||
revisionHistoryLimit: 10,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "workflow-controller",
|
||||
},
|
||||
},
|
||||
strategy: {
|
||||
rollingUpdate: {
|
||||
maxSurge: "25%",
|
||||
maxUnavailable: "25%",
|
||||
},
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
creationTimestamp: null,
|
||||
labels: {
|
||||
app: "workflow-controller",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"--configmap",
|
||||
"workflow-controller-configmap",
|
||||
],
|
||||
command: [
|
||||
"workflow-controller",
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "ARGO_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
apiVersion: "v1",
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
image: params.workflowControllerImage,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "workflow-controller",
|
||||
resources: {},
|
||||
terminationMessagePath: "/dev/termination-log",
|
||||
terminationMessagePolicy: "File",
|
||||
},
|
||||
],
|
||||
dnsPolicy: "ClusterFirst",
|
||||
restartPolicy: "Always",
|
||||
schedulerName: "default-scheduler",
|
||||
securityContext: {},
|
||||
serviceAccount: "argo",
|
||||
serviceAccountName: "argo",
|
||||
terminationGracePeriodSeconds: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deploy
|
||||
workflowController:: workflowController,
|
||||
|
||||
local argoUI = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
name: "argo-ui",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
progressDeadlineSeconds: 600,
|
||||
replicas: 1,
|
||||
revisionHistoryLimit: 10,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
},
|
||||
strategy: {
|
||||
rollingUpdate: {
|
||||
maxSurge: "25%",
|
||||
maxUnavailable: "25%",
|
||||
},
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
creationTimestamp: null,
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "ARGO_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
apiVersion: "v1",
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "IN_CLUSTER",
|
||||
value: "true",
|
||||
},
|
||||
{
|
||||
name: "BASE_HREF",
|
||||
value: "/argo/",
|
||||
},
|
||||
],
|
||||
image: params.uiImage,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "argo-ui",
|
||||
resources: {},
|
||||
terminationMessagePath: "/dev/termination-log",
|
||||
terminationMessagePolicy: "File",
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/",
|
||||
port: 8001,
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
dnsPolicy: "ClusterFirst",
|
||||
restartPolicy: "Always",
|
||||
schedulerName: "default-scheduler",
|
||||
securityContext: {},
|
||||
serviceAccount: "argo-ui",
|
||||
serviceAccountName: "argo-ui",
|
||||
terminationGracePeriodSeconds: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deployUi
|
||||
argoUI:: argoUI,
|
||||
|
||||
local argUIService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
name: "argo-ui",
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: argo-ui-mapping",
|
||||
"prefix: /argo/",
|
||||
"service: argo-ui." + params.namespace,
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8001,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
sessionAffinity: "None",
|
||||
type: "NodePort",
|
||||
},
|
||||
},
|
||||
argUIService:: argUIService,
|
||||
|
||||
local workflowControllerConfigmap = {
|
||||
apiVersion: "v1",
|
||||
data: {
|
||||
config: std.format(|||
|
||||
{
|
||||
executorImage: %s,
|
||||
artifactRepository:
|
||||
{
|
||||
s3: {
|
||||
bucket: %s,
|
||||
keyPrefix: %s,
|
||||
endpoint: %s,
|
||||
insecure: %s,
|
||||
accessKeySecret: {
|
||||
name: %s,
|
||||
key: %s
|
||||
},
|
||||
secretKeySecret: {
|
||||
name: %s,
|
||||
key: %s
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|||,
|
||||
[
|
||||
params.executorImage,
|
||||
params.artifactRepositoryBucket,
|
||||
params.artifactRepositoryKeyPrefix,
|
||||
params.artifactRepositoryEndpoint,
|
||||
params.artifactRepositoryInsecure,
|
||||
params.artifactRepositoryAccessKeySecretName,
|
||||
params.artifactRepositoryAccessKeySecretKey,
|
||||
params.artifactRepositorySecretKeySecretName,
|
||||
params.artifactRepositorySecretKeySecretKey,
|
||||
]),
|
||||
},
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "workflow-controller-configmap",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
workflowControllerConfigmap:: workflowControllerConfigmap,
|
||||
|
||||
local argoServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "argo",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
}, // service account
|
||||
argoServiceAccount:: argoServiceAccount,
|
||||
|
||||
// Keep in sync with https://github.com/argoproj/argo/blob/master/cmd/argo/commands/const.go#L20
|
||||
// Permissions need to be cluster wide for the workflow controller to be able to process workflows
|
||||
// in other namespaces. We could potentially use the ConfigMap of the workflow-controller to
|
||||
// scope it to a particular namespace in which case we might be able to restrict the permissions
|
||||
// to a particular namespace.
|
||||
local argoClusterRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo",
|
||||
},
|
||||
name: "argo",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/exec",
|
||||
],
|
||||
verbs: [
|
||||
"create",
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
"update",
|
||||
"patch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"configmaps",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"watch",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"persistentvolumeclaims",
|
||||
],
|
||||
verbs: [
|
||||
"create",
|
||||
"delete",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"argoproj.io",
|
||||
],
|
||||
resources: [
|
||||
"workflows",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
"update",
|
||||
"patch",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // operator-role
|
||||
argoClusterRole:: argoClusterRole,
|
||||
|
||||
local argoClusterRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo",
|
||||
},
|
||||
name: "argo",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "argo",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "argo",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
}, // role binding
|
||||
argoClusterRoleBinding:: argoClusterRoleBinding,
|
||||
|
||||
local argoUIServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "argo-ui",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
}, // service account
|
||||
argoUIServiceAccount:: argoUIServiceAccount,
|
||||
|
||||
// Keep in sync with https://github.com/argoproj/argo/blob/master/cmd/argo/commands/const.go#L44
|
||||
// Permissions need to be cluster wide for the workflow controller to be able to process workflows
|
||||
// in other namespaces. We could potentially use the ConfigMap of the workflow-controller to
|
||||
// scope it to a particular namespace in which case we might be able to restrict the permissions
|
||||
// to a particular namespace.
|
||||
local argoUIRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo",
|
||||
},
|
||||
name: "argo-ui",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/exec",
|
||||
"pods/log",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"argoproj.io",
|
||||
],
|
||||
resources: [
|
||||
"workflows",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // operator-role
|
||||
argoUIRole:: argoUIRole,
|
||||
|
||||
local argUIClusterRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
name: "argo-ui",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "argo-ui",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "argo-ui",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
}, // role binding
|
||||
argUIClusterRoleBinding:: argUIClusterRoleBinding,
|
||||
|
||||
parts: self,
|
||||
all:: [
|
||||
self.workflowCRD,
|
||||
self.workflowController,
|
||||
self.argoUI,
|
||||
self.argUIService,
|
||||
self.workflowControllerConfigmap,
|
||||
self.argoServiceAccount,
|
||||
self.argoClusterRole,
|
||||
self.argoClusterRoleBinding,
|
||||
self.argoUIServiceAccount,
|
||||
self.argoUIRole,
|
||||
self.argUIClusterRoleBinding,
|
||||
],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
"name": "argo",
|
||||
"apiVersion": "0.0.1",
|
||||
"kind": "ksonnet.io/parts",
|
||||
"description": "Prototypes for running Argo workflows.\n",
|
||||
"author": "kubeflow team <kubeflow-team@google.com>",
|
||||
"contributors": [
|
||||
{
|
||||
"name": "Jeremy Lewi",
|
||||
"email": "jlewi@google.com"
|
||||
}
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/kubeflow/kubeflow"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/kubeflow/kubeflow/issues"
|
||||
},
|
||||
"keywords": [
|
||||
"kubeflow",
|
||||
"argo",
|
||||
"workflows"
|
||||
],
|
||||
"quickStart": {
|
||||
"prototype": "io.ksonnet.pkg.argo",
|
||||
"componentName": "argo",
|
||||
"flags": {
|
||||
"name": "argo",
|
||||
"namespace": "",
|
||||
},
|
||||
"comment": "Deploy Argo"
|
||||
},
|
||||
"license": "Apache 2.0"
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.argo
|
||||
// @description Deploy Argo workflow engine
|
||||
// @shortDescription Argo workflow engine
|
||||
// @param name string Name to give to the component
|
||||
// @optionalParam workflowControllerImage string argoproj/workflow-controller:v2.2.0 workflowControllerImage
|
||||
// @optionalParam uiImage string argoproj/argoui:v2.2.0 uiImage
|
||||
// @optionalParam executorImage string argoproj/argoexec:v2.2.0 executorImage
|
||||
// @optionalParam artifactRepositoryKeyPrefix string artifacts artifactRepositoryKeyPrefix
|
||||
// @optionalParam artifactRepositoryEndpoint string minio-service.kubeflow:9000 artifactRepositoryEndpoint
|
||||
// @optionalParam artifactRepositoryBucket string mlpipeline artifactRepositoryBucket
|
||||
// @optionalParam artifactRepositoryInsecure string true artifactRepositoryInsecure
|
||||
// @optionalParam artifactRepositoryAccessKeySecretName string mlpipeline-minio-artifact artifactRepositoryAccessKeySecretName
|
||||
// @optionalParam artifactRepositoryAccessKeySecretKey string accesskey artifactRepositoryAccessKeySecretKey
|
||||
// @optionalParam artifactRepositorySecretKeySecretName string mlpipeline-minio-artifact artifactRepositorySecretKeySecretName
|
||||
// @optionalParam artifactRepositorySecretKeySecretKey string secretkey artifactRepositorySecretKeySecretKey
|
||||
|
||||
local argo = import "kubeflow/argo/argo.libsonnet";
|
||||
local instance = argo.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,452 +0,0 @@
|
|||
local argo = import "kubeflow/argo/argo.libsonnet";
|
||||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "argo",
|
||||
workflowControllerImage: "argoproj/workflow-controller:v2.2.0",
|
||||
uiImage: "argoproj/argoui:v2.2.0",
|
||||
executorImage: "argoproj/argoexec:v2.2.0",
|
||||
artifactRepositoryKeyPrefix: "artifacts",
|
||||
artifactRepositoryEndpoint: "minio-service.kubeflow:9000",
|
||||
artifactRepositoryBucket: "mlpipeline",
|
||||
artifactRepositoryInsecure: "true",
|
||||
artifactRepositoryAccessKeySecretName: "mlpipeline-minio-artifact",
|
||||
artifactRepositoryAccessKeySecretKey: "accesskey",
|
||||
artifactRepositorySecretKeySecretName: "mlpipeline-minio-artifact",
|
||||
artifactRepositorySecretKeySecretKey: "secretkey",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kubeflow",
|
||||
};
|
||||
|
||||
local instance = argo.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.workflowCRD,
|
||||
expected: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "workflows.argoproj.io",
|
||||
},
|
||||
spec: {
|
||||
group: "argoproj.io",
|
||||
names: {
|
||||
kind: "Workflow",
|
||||
listKind: "WorkflowList",
|
||||
plural: "workflows",
|
||||
shortNames: [
|
||||
"wf",
|
||||
],
|
||||
singular: "workflow",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
version: "v1alpha1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.workflowController,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "workflow-controller",
|
||||
},
|
||||
name: "workflow-controller",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
progressDeadlineSeconds: 600,
|
||||
replicas: 1,
|
||||
revisionHistoryLimit: 10,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "workflow-controller",
|
||||
},
|
||||
},
|
||||
strategy: {
|
||||
rollingUpdate: {
|
||||
maxSurge: "25%",
|
||||
maxUnavailable: "25%",
|
||||
},
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
creationTimestamp: null,
|
||||
labels: {
|
||||
app: "workflow-controller",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"--configmap",
|
||||
"workflow-controller-configmap",
|
||||
],
|
||||
command: [
|
||||
"workflow-controller",
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "ARGO_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
apiVersion: "v1",
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
image: "argoproj/workflow-controller:v2.2.0",
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "workflow-controller",
|
||||
resources: {},
|
||||
terminationMessagePath: "/dev/termination-log",
|
||||
terminationMessagePolicy: "File",
|
||||
},
|
||||
],
|
||||
dnsPolicy: "ClusterFirst",
|
||||
restartPolicy: "Always",
|
||||
schedulerName: "default-scheduler",
|
||||
securityContext: {},
|
||||
serviceAccount: "argo",
|
||||
serviceAccountName: "argo",
|
||||
terminationGracePeriodSeconds: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argoUI,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
name: "argo-ui",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
progressDeadlineSeconds: 600,
|
||||
replicas: 1,
|
||||
revisionHistoryLimit: 10,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
},
|
||||
strategy: {
|
||||
rollingUpdate: {
|
||||
maxSurge: "25%",
|
||||
maxUnavailable: "25%",
|
||||
},
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
creationTimestamp: null,
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "ARGO_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
apiVersion: "v1",
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "IN_CLUSTER",
|
||||
value: "true",
|
||||
},
|
||||
{
|
||||
name: "BASE_HREF",
|
||||
value: "/argo/",
|
||||
},
|
||||
],
|
||||
image: "argoproj/argoui:v2.2.0",
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "argo-ui",
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/",
|
||||
port: 8001,
|
||||
},
|
||||
},
|
||||
resources: {},
|
||||
terminationMessagePath: "/dev/termination-log",
|
||||
terminationMessagePolicy: "File",
|
||||
},
|
||||
],
|
||||
dnsPolicy: "ClusterFirst",
|
||||
restartPolicy: "Always",
|
||||
schedulerName: "default-scheduler",
|
||||
securityContext: {},
|
||||
serviceAccount: "argo-ui",
|
||||
serviceAccountName: "argo-ui",
|
||||
terminationGracePeriodSeconds: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argUIService,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
annotations: {
|
||||
"getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: argo-ui-mapping\nprefix: /argo/\nservice: argo-ui.kubeflow",
|
||||
},
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
name: "argo-ui",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8001,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
sessionAffinity: "None",
|
||||
type: "NodePort",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.workflowControllerConfigmap,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
data: {
|
||||
config: "{\nexecutorImage: argoproj/argoexec:v2.2.0,\nartifactRepository:\n{\n s3: {\n bucket: mlpipeline,\n keyPrefix: artifacts,\n endpoint: minio-service.kubeflow:9000,\n insecure: true,\n accessKeySecret: {\n name: mlpipeline-minio-artifact,\n key: accesskey\n },\n secretKeySecret: {\n name: mlpipeline-minio-artifact,\n key: secretkey\n }\n }\n}\n}\n",
|
||||
},
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "workflow-controller-configmap",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argoServiceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "argo",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argoClusterRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo",
|
||||
},
|
||||
name: "argo",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/exec",
|
||||
],
|
||||
verbs: [
|
||||
"create",
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
"update",
|
||||
"patch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"configmaps",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"watch",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"persistentvolumeclaims",
|
||||
],
|
||||
verbs: [
|
||||
"create",
|
||||
"delete",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"argoproj.io",
|
||||
],
|
||||
resources: [
|
||||
"workflows",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
"update",
|
||||
"patch",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argoClusterRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo",
|
||||
},
|
||||
name: "argo",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "argo",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "argo",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argoUIServiceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "argo-ui",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argoUIRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo",
|
||||
},
|
||||
name: "argo-ui",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/exec",
|
||||
"pods/log",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"argoproj.io",
|
||||
],
|
||||
resources: [
|
||||
"workflows",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.argUIClusterRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "argo-ui",
|
||||
},
|
||||
name: "argo-ui",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "argo-ui",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "argo-ui",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
approvers:
|
||||
- gaocegege
|
||||
reviewers:
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
# common
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [common](#common)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
This ksonnet package contains kubeflow common prototypes such as ambassador, spartakus, etc. You can install this using `ks pkg install kubeflow/common`. `ks prototype list` should list the available prototypes. `ks prototype describe <name>` should describe the prototype.
|
||||
|
|
@ -1,226 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local ambassadorService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador",
|
||||
},
|
||||
name: "ambassador",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "ambassador",
|
||||
port: 80,
|
||||
targetPort: 80,
|
||||
[if (params.ambassadorServiceType == 'NodePort') &&
|
||||
(params.ambassadorNodePort >= 30000) &&
|
||||
(params.ambassadorNodePort <= 32767)
|
||||
then 'nodePort']: params.ambassadorNodePort,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "ambassador",
|
||||
},
|
||||
type: params.ambassadorServiceType,
|
||||
},
|
||||
}, // service
|
||||
ambassadorService:: ambassadorService,
|
||||
|
||||
local adminService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador-admin",
|
||||
},
|
||||
name: "ambassador-admin",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "ambassador-admin",
|
||||
port: 8877,
|
||||
targetPort: 8877,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "ambassador",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
}, // adminService
|
||||
adminService:: adminService,
|
||||
|
||||
local ambassadorRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"services",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"configmaps",
|
||||
],
|
||||
verbs: [
|
||||
"create",
|
||||
"update",
|
||||
"patch",
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // role
|
||||
ambassadorRole:: ambassadorRole,
|
||||
|
||||
local ambassadorServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
}, // serviceAccount
|
||||
ambassadorServiceAccount:: ambassadorServiceAccount,
|
||||
|
||||
local ambassadorRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "ambassador",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "ambassador",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
}, // roleBinding
|
||||
ambassadorRoleBinding:: ambassadorRoleBinding,
|
||||
|
||||
local ambassadorDeployment = {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: params.replicas,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador",
|
||||
},
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "AMBASSADOR_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
image: params.ambassadorImage,
|
||||
name: "ambassador",
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: 1,
|
||||
memory: "400Mi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "200m",
|
||||
memory: "100Mi",
|
||||
},
|
||||
},
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/ambassador/v0/check_ready",
|
||||
port: 8877,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
livenessProbe: {
|
||||
httpGet: {
|
||||
path: "/ambassador/v0/check_alive",
|
||||
port: 8877,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
},
|
||||
],
|
||||
restartPolicy: "Always",
|
||||
serviceAccountName: "ambassador",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deploy
|
||||
ambassadorDeployment:: ambassadorDeployment,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.ambassadorService,
|
||||
self.adminService,
|
||||
self.ambassadorRole,
|
||||
self.ambassadorServiceAccount,
|
||||
self.ambassadorRoleBinding,
|
||||
self.ambassadorDeployment,
|
||||
],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,197 +0,0 @@
|
|||
{
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local ui_name = params.name + "-login",
|
||||
|
||||
local authService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: params.name,
|
||||
},
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: AuthService",
|
||||
"name: " + params.name,
|
||||
"auth_service: " + params.name + "." + params.namespace + ":8085",
|
||||
'allowed_headers:\n- "x-from-login"',
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 8085,
|
||||
targetPort: 8085,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: params.name,
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
authService:: authService,
|
||||
|
||||
local authDeployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
|
||||
},
|
||||
spec: {
|
||||
// replicas here should always be 1:
|
||||
// we store auth cookies in memory and we don't support share them among pods.
|
||||
replicas: 1,
|
||||
strategy: {
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: params.name,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: params.image,
|
||||
name: "app",
|
||||
workingDir: "/opt/kubeflow",
|
||||
env: [
|
||||
{
|
||||
name: "USERNAME",
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
name: params.authSecretName,
|
||||
key: "username",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "PASSWORDHASH",
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
name: params.authSecretName,
|
||||
key: "passwordhash",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
command: [
|
||||
"/opt/kubeflow/gatekeeper",
|
||||
],
|
||||
args: [
|
||||
"--username=$(USERNAME)",
|
||||
"--pwhash=$(PASSWORDHASH)",
|
||||
],
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8085,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
authDeployment:: authDeployment,
|
||||
|
||||
local loginService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: ui_name,
|
||||
},
|
||||
name: ui_name,
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: kflogin-mapping",
|
||||
"prefix: /kflogin",
|
||||
"rewrite: /kflogin",
|
||||
"timeout_ms: 300000",
|
||||
"service: " + ui_name + "." + params.namespace,
|
||||
"use_websocket: true",
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 5000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: ui_name,
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
loginService:: loginService,
|
||||
|
||||
local loginDeployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: ui_name,
|
||||
namespace: params.namespace,
|
||||
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
strategy: {
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: ui_name,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: params.imageui,
|
||||
name: "app",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 5000,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
loginDeployment:: loginDeployment,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.authService,
|
||||
self.authDeployment,
|
||||
self.loginService,
|
||||
self.loginDeployment,
|
||||
],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,221 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local centralDashboardDeployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: params.image,
|
||||
name: "centraldashboard",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8082,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "centraldashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deployUi
|
||||
centralDashboardDeployment:: centralDashboardDeployment,
|
||||
|
||||
local centralDashboardService = {
|
||||
// Due to https://github.com/ksonnet/ksonnet/issues/670, escaped characters in
|
||||
// jsonnet files are not interpreted correctly by ksonnet, which causes runtime
|
||||
// parsing failures. This is fixed in ksonnet 0.12.0, so we can merge this back
|
||||
// to the jsonnet file when we take a dependency on ksonnet 0.12.0 or later.
|
||||
local annotations = function(namespace) {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: centralui-mapping",
|
||||
"prefix: /",
|
||||
"rewrite: /",
|
||||
"service: centraldashboard." + namespace,
|
||||
]),
|
||||
},
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: params.namespace,
|
||||
annotations: annotations(params.namespace),
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8082,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
sessionAffinity: "None",
|
||||
type: "ClusterIP",
|
||||
},
|
||||
}, //service
|
||||
centralDashboardService:: centralDashboardService,
|
||||
|
||||
local centralDashboardServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "centraldashboard",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
}, // service account
|
||||
centralDashboardServiceAccount:: centralDashboardServiceAccount,
|
||||
|
||||
local centralDashboardRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/exec",
|
||||
"pods/log",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // role
|
||||
centralDashboardRole:: centralDashboardRole,
|
||||
|
||||
local centralDashboardRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: "centraldashboard",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "centraldashboard",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
}, // role binding
|
||||
centralDashboardRoleBinding:: centralDashboardRoleBinding,
|
||||
|
||||
local centralDashboardClusterRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"namespaces",
|
||||
"events"
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
}
|
||||
],
|
||||
}, // clusterrole
|
||||
centralDashboardClusterRole:: centralDashboardClusterRole,
|
||||
|
||||
local centralDashboardClusterRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "centraldashboard",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "centraldashboard",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
}, // clusterrolebinding
|
||||
centralDashboardClusterRoleBinding:: centralDashboardClusterRoleBinding,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.centralDashboardDeployment,
|
||||
self.centralDashboardService,
|
||||
self.centralDashboardServiceAccount,
|
||||
self.centralDashboardRole,
|
||||
self.centralDashboardRoleBinding,
|
||||
self.centralDashboardClusterRole,
|
||||
self.centralDashboardClusterRoleBinding,
|
||||
],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
{
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local service = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: params.name,
|
||||
},
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: " + params.name + "-mapping",
|
||||
"prefix: /" + params.name,
|
||||
"rewrite: /",
|
||||
"service: " + params.name + "." + params.namespace,
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8080,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: params.name,
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
service:: service,
|
||||
|
||||
local deployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: params.name,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: params.image,
|
||||
name: "app",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8080,
|
||||
},
|
||||
],
|
||||
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/headers",
|
||||
port: 8080,
|
||||
},
|
||||
initialDelaySeconds: 5,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
deployment:: deployment,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.service,
|
||||
self.deployment,
|
||||
],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
"name": "common",
|
||||
"apiVersion": "0.0.1",
|
||||
"kind": "ksonnet.io/parts",
|
||||
"description": "Common components of Kubeflow.\n",
|
||||
"author": "kubeflow team <kubeflow-team@google.com>",
|
||||
"contributors": [
|
||||
{
|
||||
"name": "Jeremy Lewi",
|
||||
"email": "jlewi@google.com"
|
||||
}
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/kubeflow/kubeflow"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/kubeflow/kubeflow/issues"
|
||||
},
|
||||
"keywords": [
|
||||
"kubeflow",
|
||||
"tensorflow"
|
||||
],
|
||||
"quickStart": {
|
||||
"prototype": "io.ksonnet.pkg.kubeflow",
|
||||
"componentName": "common",
|
||||
"flags": {
|
||||
"name": "common",
|
||||
"namespace": "default",
|
||||
"disks": ""
|
||||
},
|
||||
"comment": "Common Kubeflow components."
|
||||
},
|
||||
"license": "Apache 2.0"
|
||||
}
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.ambassador
|
||||
// @description Ambassador Component
|
||||
// @shortDescription Ambassador
|
||||
// @param name string Name
|
||||
// @optionalParam platform string none supported platforms {none|gke|minikube}
|
||||
// @optionalParam ambassadorServiceType string ClusterIP The service type for the API Gateway {ClusterIP|NodePort|LoadBalancer}.
|
||||
// @optionalParam ambassadorNodePort number 0 Optional nodePort to use when ambassadorServiceType is NodePort {30000-32767}.
|
||||
// @optionalParam ambassadorImage string quay.io/datawire/ambassador:0.37.0 The image for the API Gateway.
|
||||
// @optionalParam replicas number 3 The number of replicas.
|
||||
|
||||
local ambassador = import "kubeflow/common/ambassador.libsonnet";
|
||||
local instance = ambassador.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.basic-auth
|
||||
// @description Provides http basic auth for all ambassador traffic.
|
||||
// @shortDescription Http basic auth.
|
||||
// @param name string Name for the component
|
||||
// @optionalParam authSecretName string kubeflow-login Contains username and passwordhash for UI/API auth.
|
||||
// @optionalParam image string gcr.io/kubeflow-images-public/gatekeeper:v0.5.0 Auth service image to use.
|
||||
// @optionalParam imageui string gcr.io/kubeflow-images-public/kflogin-ui:v0.5.0 UI image to use.
|
||||
|
||||
local basicauth = import "kubeflow/common/basic-auth.libsonnet";
|
||||
local instance = basicauth.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.centraldashboard
|
||||
// @description centraldashboard Component
|
||||
// @shortDescription centraldashboard
|
||||
// @param name string Name
|
||||
// @optionalParam image string gcr.io/kubeflow-images-public/centraldashboard:v0.5.0 Image for the central dashboard
|
||||
|
||||
local centraldashboard = import "kubeflow/common/centraldashboard.libsonnet";
|
||||
local instance = centraldashboard.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.echo-server
|
||||
// @description Provides a simple server for testing connections; primarily IAP.
|
||||
// @shortDescription A simple echo server.
|
||||
// @param name string Name for the component
|
||||
// @optionalParam image string gcr.io/kubeflow-images-staging/echo-server:v20180628-44f08d31 The image to use.
|
||||
|
||||
local echoserver = import "kubeflow/common/echo-server.libsonnet";
|
||||
local instance = echoserver.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.spartakus
|
||||
// @description spartakus component for usage collection
|
||||
// @shortDescription spartakus component for usage collection
|
||||
// @param name string Name
|
||||
// @optionalParam usageId string unknown_cluster Optional id to use when reporting usage to kubeflow.org
|
||||
// @optionalParam reportUsage string false Whether or not to report Kubeflow usage to kubeflow.org.
|
||||
|
||||
local spartakus = import "kubeflow/common/spartakus.libsonnet";
|
||||
local instance = spartakus.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,122 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env {
|
||||
reportUsageBool: util.toBool(_params.reportUsage),
|
||||
},
|
||||
|
||||
// Spartakus needs to be able to get information about the cluster to create a report.
|
||||
local clusterRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"nodes",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // role
|
||||
clusterRole:: clusterRole,
|
||||
|
||||
local clusterRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "spartakus",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "spartakus",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
}, // operator-role binding
|
||||
clusterRoleBinding:: clusterRoleBinding,
|
||||
|
||||
local serviceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
serviceAccount:: serviceAccount,
|
||||
|
||||
local volunteer = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "spartakus-volunteer",
|
||||
namespace: params.namespace,
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus-volunteer",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: "gcr.io/google_containers/spartakus-amd64:v1.1.0",
|
||||
name: "volunteer",
|
||||
args: [
|
||||
"volunteer",
|
||||
"--cluster-id=" + params.usageId,
|
||||
"--database=https://stats-collector.kubeflow.org",
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "spartakus",
|
||||
}, // spec
|
||||
},
|
||||
},
|
||||
}, // deployment
|
||||
volunteer:: volunteer,
|
||||
|
||||
parts:: self,
|
||||
all:: if params.reportUsageBool then (
|
||||
[
|
||||
self.clusterRole,
|
||||
self.clusterRoleBinding,
|
||||
self.serviceAccount,
|
||||
self.volunteer,
|
||||
]
|
||||
) else [],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,231 +0,0 @@
|
|||
local ambassador = import "kubeflow/common/ambassador.libsonnet";
|
||||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "ambassador",
|
||||
platform: "gke",
|
||||
ambassadorServiceType: "ClusterIP",
|
||||
ambassadorImage: "quay.io/datawire/ambassador:0.37.0",
|
||||
replicas: 3,
|
||||
};
|
||||
local env = {
|
||||
namespace: "kubeflow",
|
||||
};
|
||||
|
||||
local instance = ambassador.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.ambassadorService,
|
||||
expected:
|
||||
{
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador",
|
||||
},
|
||||
name: "ambassador",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "ambassador",
|
||||
port: 80,
|
||||
targetPort: 80,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "ambassador",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.adminService,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador-admin",
|
||||
},
|
||||
name: "ambassador-admin",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "ambassador-admin",
|
||||
port: 8877,
|
||||
targetPort: 8877,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "ambassador",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.ambassadorRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"services",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"configmaps",
|
||||
],
|
||||
verbs: [
|
||||
"create",
|
||||
"update",
|
||||
"patch",
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.ambassadorServiceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.ambassadorRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "ambassador",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "ambassador",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.ambassadorDeployment,
|
||||
expected:
|
||||
{
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
replicas: 3,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador",
|
||||
},
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "AMBASSADOR_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
image: "quay.io/datawire/ambassador:0.37.0",
|
||||
livenessProbe: {
|
||||
httpGet: {
|
||||
path: "/ambassador/v0/check_alive",
|
||||
port: 8877,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
name: "ambassador",
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/ambassador/v0/check_ready",
|
||||
port: 8877,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: 1,
|
||||
memory: "400Mi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "200m",
|
||||
memory: "100Mi",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
restartPolicy: "Always",
|
||||
serviceAccountName: "ambassador",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,167 +0,0 @@
|
|||
local centraldashboard = import "../centraldashboard.libsonnet";
|
||||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
|
||||
local params = {
|
||||
image: "gcr.io/kubeflow-images-public/centraldashboard:v0.3.0",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kftest",
|
||||
};
|
||||
local centraldash = centraldashboard.new(params, env);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: centraldash.centralDashboardDeployment,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: "kftest",
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: "gcr.io/kubeflow-images-public/centraldashboard:v0.3.0",
|
||||
name: "centraldashboard",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8082,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "centraldashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: centraldash.centralDashboardService,
|
||||
expected: {
|
||||
local annotations = function(namespace) {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: centralui-mapping",
|
||||
"prefix: /",
|
||||
"rewrite: /",
|
||||
"service: centraldashboard." + namespace,
|
||||
]),
|
||||
},
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: "kftest",
|
||||
annotations: annotations("kftest"),
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8082,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
sessionAffinity: "None",
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: centraldash.centralDashboardServiceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "centraldashboard",
|
||||
namespace: "kftest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: centraldash.centralDashboardRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: "kftest",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/exec",
|
||||
"pods/log",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: [
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: centraldash.centralDashboardRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "centraldashboard",
|
||||
},
|
||||
name: "centraldashboard",
|
||||
namespace: "kftest",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: "centraldashboard",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "centraldashboard",
|
||||
namespace: "kftest",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
local echoServer = import "kubeflow/common/echo-server.libsonnet";
|
||||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "echo-server",
|
||||
image: "gcr.io/kubeflow-images-staging/echo-server:v20180628-44f08d31",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kubeflow",
|
||||
};
|
||||
|
||||
local instance = echoServer.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.service,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
annotations: {
|
||||
"getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: echo-server-mapping\nprefix: /echo-server\nrewrite: /\nservice: echo-server.kubeflow",
|
||||
},
|
||||
labels: {
|
||||
app: "echo-server",
|
||||
},
|
||||
name: "echo-server",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8080,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "echo-server",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.deployment,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "echo-server",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "echo-server",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: "gcr.io/kubeflow-images-staging/echo-server:v20180628-44f08d31",
|
||||
name: "app",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8080,
|
||||
},
|
||||
],
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/headers",
|
||||
port: 8080,
|
||||
},
|
||||
initialDelaySeconds: 5,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,122 +0,0 @@
|
|||
local spartakus = import "kubeflow/common/spartakus.libsonnet";
|
||||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "spartakus",
|
||||
usageId: "unknown_cluster",
|
||||
reportUsage: "false",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kubeflow",
|
||||
};
|
||||
|
||||
local instance = spartakus.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.clusterRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"nodes",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.clusterRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "spartakus",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "spartakus",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.serviceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.volunteer,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus-volunteer",
|
||||
namespace: "kubeflow",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus-volunteer",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"volunteer",
|
||||
"--cluster-id=unknown_cluster",
|
||||
"--database=https://stats-collector.kubeflow.org",
|
||||
],
|
||||
image: "gcr.io/google_containers/spartakus-amd64:v1.1.0",
|
||||
name: "volunteer",
|
||||
},
|
||||
],
|
||||
serviceAccountName: "spartakus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
local util = import "../util.libsonnet";
|
||||
|
||||
std.assertEqual(util.lower("aTruez"), "atruez") &&
|
||||
std.assertEqual(util.lower("ATrUez"), "atruez") &&
|
||||
std.assertEqual(util.lower("atruez"), "atruez") &&
|
||||
std.assertEqual(util.lower("ATRUEZ"), "atruez") &&
|
||||
std.assertEqual(util.toBool(false), false) &&
|
||||
std.assertEqual(util.toBool(true), true) &&
|
||||
std.assertEqual(util.toBool("true"), true) &&
|
||||
std.assertEqual(util.toBool("True"), true) &&
|
||||
std.assertEqual(util.toBool("TRUE"), true) &&
|
||||
std.assertEqual(util.toBool("false"), false) &&
|
||||
std.assertEqual(util.toBool("False"), false) &&
|
||||
std.assertEqual(util.toBool("FALSE"), false) &&
|
||||
std.assertEqual(util.toBool("random string"), false) &&
|
||||
std.assertEqual(util.toBool(1), true) &&
|
||||
std.assertEqual(util.toBool(0), false) &&
|
||||
std.assertEqual(util.toBool(123), true) &&
|
||||
std.assertEqual(util.toArray("a,b,c,d"), ["a", "b", "c", "d"]) &&
|
||||
std.assertEqual(util.toArray("ca, or,fl, mo"), ["ca", "or", "fl", "mo"]) &&
|
||||
std.assertEqual(std.length(util.toArray(2)), 0) &&
|
||||
std.assertEqual(std.length(util.toArray("hello world")), 1) &&
|
||||
std.assertEqual(std.length(util.toArray([1, 2, 3, 4])), 0) &&
|
||||
std.assertEqual(util.sort(["Craydad", "CCall", "crayon"]), ["CCall", "Craydad", "crayon"]) &&
|
||||
std.assertEqual(
|
||||
{
|
||||
new():: self + {
|
||||
local configMap = {
|
||||
kind: "ConfigMap",
|
||||
},
|
||||
local service = {
|
||||
kind: "Service",
|
||||
},
|
||||
list:: util.list([configMap, service]),
|
||||
},
|
||||
}.new().list,
|
||||
{
|
||||
apiVersion: "v1",
|
||||
items: [
|
||||
{
|
||||
kind: "ConfigMap",
|
||||
},
|
||||
{
|
||||
kind: "Service",
|
||||
},
|
||||
],
|
||||
kind: "List",
|
||||
}
|
||||
) &&
|
||||
std.assertEqual(
|
||||
util.setDiff(
|
||||
util.sort(["CCall", "Craydad", "crayon", "fuzzball"]),
|
||||
util.sort(["CCall", "Craydad", "crayon"])
|
||||
),
|
||||
["fuzzball"]
|
||||
)
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
// Some useful routines.
|
||||
{
|
||||
run:: function(testCases) {
|
||||
local testEqual(x) = x {
|
||||
pass: x.actual == x.expected,
|
||||
},
|
||||
local curry(testCases) = {
|
||||
// For each test case determine whether expected matches equals
|
||||
local testCasesWithResults = std.map(
|
||||
testEqual,
|
||||
testCases,
|
||||
),
|
||||
return::
|
||||
testCasesWithResults,
|
||||
}.return,
|
||||
// Compute test suite.
|
||||
local foldResults(left, right) = {
|
||||
pass: left.pass && right.pass,
|
||||
},
|
||||
local initResult = { pass: true },
|
||||
local suiteResult = std.foldl(foldResults, curry(testCases), initResult),
|
||||
local testSuite = suiteResult {
|
||||
testCases: curry(testCases),
|
||||
},
|
||||
result::
|
||||
testSuite,
|
||||
}.result,
|
||||
|
||||
}
|
||||
|
|
@ -1,207 +0,0 @@
|
|||
// Some useful routines.
|
||||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = self,
|
||||
|
||||
// Convert a string to lower case.
|
||||
lower:: function(x) {
|
||||
local cp(c) = std.codepoint(c),
|
||||
local lowerLetter(c) = if cp(c) >= 65 && cp(c) < 91 then
|
||||
std.char(cp(c) + 32)
|
||||
else c,
|
||||
result:: std.join("", std.map(lowerLetter, std.stringChars(x))),
|
||||
}.result,
|
||||
|
||||
// Convert non-boolean types like string,number to a boolean.
|
||||
// This is primarily intended for dealing with parameters that should be booleans.
|
||||
toBool:: function(x) {
|
||||
result::
|
||||
if std.type(x) == "boolean" then
|
||||
x
|
||||
else if std.type(x) == "string" then
|
||||
std.asciiUpper(x) == "TRUE"
|
||||
else if std.type(x) == "number" then
|
||||
x != 0
|
||||
else
|
||||
false,
|
||||
}.result,
|
||||
|
||||
// Convert a comma-delimited string to an Array
|
||||
toArray:: function(str) {
|
||||
local trim(str) = {
|
||||
rest::
|
||||
if std.startsWith(str, " ") then
|
||||
std.substr(str, 1, std.length(str) - 1)
|
||||
else
|
||||
str,
|
||||
}.rest,
|
||||
result::
|
||||
if std.type(str) == "string" && str != "null" && std.length(str) > 0 then
|
||||
std.map(trim, std.split(str, ","))
|
||||
else [],
|
||||
}.result,
|
||||
|
||||
foldl:: function(key, value, objs) {
|
||||
local aux(arr, i, running) =
|
||||
if i >= std.length(arr) then
|
||||
running
|
||||
else
|
||||
aux(arr, i + 1, running { [key(arr[i])]+: value(arr[i]) }) tailstrict,
|
||||
return:: aux(objs, 0, {},),
|
||||
}.return,
|
||||
|
||||
sort:: function(arr, compare=function(a, b) {
|
||||
return::
|
||||
if a == b then
|
||||
0
|
||||
else
|
||||
if a < b then
|
||||
-1
|
||||
else
|
||||
1,
|
||||
}.return) {
|
||||
local _sort(arr, compare) = {
|
||||
local l = std.length(arr),
|
||||
local f = {
|
||||
local pivot = arr[0],
|
||||
local rest = std.makeArray(l - 1, function(i) arr[i + 1]),
|
||||
local lessorequal(x) = compare(x, pivot) <= 0,
|
||||
local greater(x) = compare(x, pivot) > 0,
|
||||
local left = _sort(std.filter(lessorequal, rest), compare) tailstrict,
|
||||
local right = _sort(std.filter(greater, rest), compare) tailstrict,
|
||||
return:: left + [pivot] + right,
|
||||
}.return,
|
||||
return::
|
||||
if l == 0 then
|
||||
[]
|
||||
else
|
||||
f,
|
||||
}.return,
|
||||
return:: _sort(arr, compare),
|
||||
}.return,
|
||||
|
||||
setDiff:: function(a, b, compare=function(a, b) {
|
||||
return::
|
||||
if a == b then
|
||||
0
|
||||
else if a < b then
|
||||
-1
|
||||
else
|
||||
1,
|
||||
}.return) {
|
||||
local aux(a, b, i, j, acc) =
|
||||
if i >= std.length(a) then
|
||||
acc
|
||||
else
|
||||
if j >= std.length(b) then
|
||||
aux(a, b, i + 1, j, acc + [a[i]]) tailstrict
|
||||
else
|
||||
if compare(a[i], b[j]) == 0 then
|
||||
aux(a, b, i + 1, j + 1, acc) tailstrict
|
||||
else
|
||||
if compare(a[i], b[j]) == -1 then
|
||||
aux(a, b, i + 1, j, acc + [a[i]]) tailstrict
|
||||
else
|
||||
aux(a, b, i, j + 1, acc) tailstrict,
|
||||
return:: aux(a, b, 0, 0, []) tailstrict,
|
||||
}.return,
|
||||
|
||||
getApiVersionKindAndMetadata(resource):: {
|
||||
return::
|
||||
if std.objectHas(resource.metadata, "resourceVersion") then {
|
||||
apiVersion: resource.apiVersion,
|
||||
kind: resource.kind,
|
||||
metadata: {
|
||||
labels: resource.metadata.labels,
|
||||
name: resource.metadata.name,
|
||||
namespace: resource.metadata.namespace,
|
||||
resourceVersion: resource.metadata.resourceVersion,
|
||||
}
|
||||
} else {
|
||||
apiVersion: resource.apiVersion,
|
||||
kind: resource.kind,
|
||||
metadata: {
|
||||
labels: resource.metadata.labels,
|
||||
name: resource.metadata.name,
|
||||
namespace: resource.metadata.namespace,
|
||||
},
|
||||
},
|
||||
}.return,
|
||||
|
||||
groupByResource(resources):: {
|
||||
local getKey(resource) = {
|
||||
return::
|
||||
resource.kind,
|
||||
}.return,
|
||||
local getValue(resource) = {
|
||||
return::
|
||||
{ [resource.metadata.name]+: resource },
|
||||
}.return,
|
||||
return:: util.foldl(getKey, getValue, resources),
|
||||
}.return,
|
||||
|
||||
comparator(a, b):: {
|
||||
return::
|
||||
if a.metadata.name == b.metadata.name then
|
||||
0
|
||||
else
|
||||
if a.metadata.name < b.metadata.name then
|
||||
-1
|
||||
else
|
||||
1,
|
||||
}.return,
|
||||
|
||||
validateResource(resource):: {
|
||||
return::
|
||||
if std.type(resource) == "object" &&
|
||||
std.objectHas(resource, "kind") &&
|
||||
std.objectHas(resource, "apiVersion") &&
|
||||
std.objectHas(resource, "metadata") &&
|
||||
std.objectHas(resource.metadata, "name") then
|
||||
true
|
||||
else
|
||||
false,
|
||||
}.return,
|
||||
|
||||
extractGroups(obj)::
|
||||
if std.type(obj) == "object" then
|
||||
[obj[key] for key in std.objectFields(obj)]
|
||||
else
|
||||
[],
|
||||
|
||||
extractResources(group)::
|
||||
if std.type(group) == "object" then
|
||||
[group[key] for key in std.objectFields(group)]
|
||||
else
|
||||
[],
|
||||
|
||||
curryResources(resources, exists):: {
|
||||
local existingResource(resource) = {
|
||||
local resourceExists(kind, name) = {
|
||||
return::
|
||||
if std.objectHas(resources, kind) &&
|
||||
std.objectHas(resources[kind], name) then
|
||||
true
|
||||
else
|
||||
false,
|
||||
}.return,
|
||||
return::
|
||||
if util.validateResource(resource) then
|
||||
resourceExists(resource.kind, resource.metadata.name)
|
||||
else
|
||||
false,
|
||||
}.return,
|
||||
local missingResource(resource) = {
|
||||
return::
|
||||
existingResource(resource) == false,
|
||||
}.return,
|
||||
return::
|
||||
if exists == true then
|
||||
existingResource
|
||||
else
|
||||
missingResource,
|
||||
}.return,
|
||||
|
||||
// Produce a list of manifests. obj must be an array
|
||||
list(obj):: k.core.v1.list.new(obj,),
|
||||
}
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
"Major": "0",
|
||||
"Minor": "2",
|
||||
"Patch": "devel",
|
||||
"GitCommit": "",
|
||||
"BuildDate": "",
|
||||
"ksonnetVersion": "0.9.2",
|
||||
}
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
{
|
||||
all(params):: [
|
||||
{
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "kubeflow-version",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
data: {
|
||||
"kubeflow-version": importstr "version-info.json",
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
"name": "kubeflow examples",
|
||||
"apiVersion": "0.0.1",
|
||||
"kind": "ksonnet.io/parts",
|
||||
"description": "kubeflow examples.\n",
|
||||
"author": "kubeflow-team <kubeflow-discuss@googlegroups.com>",
|
||||
"contributors": [
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/kubeflow/kubeflow"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/kubeflow/kubeflow/issues"
|
||||
},
|
||||
"keywords": [
|
||||
"kubernetes",
|
||||
"kubeflow",
|
||||
"machine learning"
|
||||
],
|
||||
"license": "Apache 2.0",
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.katib-studyjob-test-v1alpha1
|
||||
// @description katib-studyjob-test
|
||||
// @shortDescription A Katib StudyJob using random suggestion
|
||||
// @param name string Name for the job.
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
local name = params.name;
|
||||
local namespace = env.namespace;
|
||||
|
||||
local studyjob = {
|
||||
apiVersion: "kubeflow.org/v1alpha1",
|
||||
kind: "StudyJob",
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
studyName: name,
|
||||
owner: "crd",
|
||||
optimizationtype: "maximize",
|
||||
objectivevaluename: "Validation-accuracy",
|
||||
optimizationgoal: 0.99,
|
||||
requestcount: 1,
|
||||
metricsnames: ["accuracy"],
|
||||
parameterconfigs: [
|
||||
{
|
||||
name: "--lr",
|
||||
parametertype: "double",
|
||||
feasible: {
|
||||
min: "0.01",
|
||||
max: "0.03",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "--num-layers",
|
||||
parametertype: "int",
|
||||
feasible: {
|
||||
min: "2",
|
||||
max: "5",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "--optimizer",
|
||||
parametertype: "categorical",
|
||||
feasible: {
|
||||
list: ["sgd", "adam", "ftrl"],
|
||||
},
|
||||
},
|
||||
],
|
||||
workerSpec: {
|
||||
goTemplate: {
|
||||
rawTemplate: |||
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: {{.WorkerID}}
|
||||
namespace: {{.NameSpace}}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: {{.WorkerID}}
|
||||
image: katib/mxnet-mnist-example
|
||||
command:
|
||||
- "python"
|
||||
- "/mxnet/example/image-classification/train_mnist.py"
|
||||
- "--batch-size=64"
|
||||
{{- with .HyperParameters}}
|
||||
{{- range .}}
|
||||
- "{{.Name}}={{.Value}}"
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
restartPolicy: Never
|
||||
|||,
|
||||
},
|
||||
},
|
||||
suggestionSpec: {
|
||||
suggestionAlgorithm: "random",
|
||||
requestNumber: 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
k.core.v1.list.new([
|
||||
studyjob,
|
||||
])
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
// @apiVersion 1
|
||||
// @name io.ksonnet.pkg.tensorboard
|
||||
// @description Tensorboard components
|
||||
// @shortDescription ksonnet components for Tensorboard
|
||||
// @param name string Name to give to each of the components
|
||||
// @optionalParam logDir string logs Name of the log directory holding the TF events file
|
||||
// @optionalParam targetPort number 6006 Name of the targetPort
|
||||
// @optionalParam servicePort number 9000 Name of the servicePort
|
||||
// @optionalParam serviceType string ClusterIP The service type for tensorboard service
|
||||
// @optionalParam defaultTbImage string tensorflow/tensorflow:1.8.0 default tensorboard image to use
|
||||
|
||||
local tensorboard = import "kubeflow/tensorboard/tensorboard.libsonnet";
|
||||
local instance = tensorboard.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.tf-job-simple-v1beta1
|
||||
// @description tf-job-simple
|
||||
// @shortDescription A simple TFJob to run CNN benchmark
|
||||
// @param name string Name for the job.
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
local name = params.name;
|
||||
local namespace = env.namespace;
|
||||
local image = "gcr.io/kubeflow/tf-benchmarks-cpu:v20171202-bdab599-dirty-284af3";
|
||||
|
||||
local tfjob = {
|
||||
apiVersion: "kubeflow.org/v1beta1",
|
||||
kind: "TFJob",
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
tfReplicaSpecs: {
|
||||
Worker: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"python",
|
||||
"tf_cnn_benchmarks.py",
|
||||
"--batch_size=32",
|
||||
"--model=resnet50",
|
||||
"--variable_update=parameter_server",
|
||||
"--flush_stdout=true",
|
||||
"--num_gpus=1",
|
||||
"--local_parameter_device=cpu",
|
||||
"--device=cpu",
|
||||
"--data_format=NHWC",
|
||||
],
|
||||
image: image,
|
||||
name: "tensorflow",
|
||||
workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks",
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
},
|
||||
Ps: {
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"python",
|
||||
"tf_cnn_benchmarks.py",
|
||||
"--batch_size=32",
|
||||
"--model=resnet50",
|
||||
"--variable_update=parameter_server",
|
||||
"--flush_stdout=true",
|
||||
"--num_gpus=1",
|
||||
"--local_parameter_device=cpu",
|
||||
"--device=cpu",
|
||||
"--data_format=NHWC",
|
||||
],
|
||||
image: image,
|
||||
name: "tensorflow",
|
||||
workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks",
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
tfReplicaType: "PS",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
k.core.v1.list.new([
|
||||
tfjob,
|
||||
])
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.tf-job-simple-v1beta2
|
||||
// @description tf-job-simple
|
||||
// @shortDescription A simple TFJob to run CNN benchmark
|
||||
// @param name string Name for the job.
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
local name = params.name;
|
||||
local namespace = env.namespace;
|
||||
local image = "gcr.io/kubeflow/tf-benchmarks-cpu:v20171202-bdab599-dirty-284af3";
|
||||
|
||||
local tfjob = {
|
||||
apiVersion: "kubeflow.org/v1beta2",
|
||||
kind: "TFJob",
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
tfReplicaSpecs: {
|
||||
Worker: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"python",
|
||||
"tf_cnn_benchmarks.py",
|
||||
"--batch_size=32",
|
||||
"--model=resnet50",
|
||||
"--variable_update=parameter_server",
|
||||
"--flush_stdout=true",
|
||||
"--num_gpus=1",
|
||||
"--local_parameter_device=cpu",
|
||||
"--device=cpu",
|
||||
"--data_format=NHWC",
|
||||
],
|
||||
image: image,
|
||||
name: "tensorflow",
|
||||
workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks",
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
},
|
||||
Ps: {
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"python",
|
||||
"tf_cnn_benchmarks.py",
|
||||
"--batch_size=32",
|
||||
"--model=resnet50",
|
||||
"--variable_update=parameter_server",
|
||||
"--flush_stdout=true",
|
||||
"--num_gpus=1",
|
||||
"--local_parameter_device=cpu",
|
||||
"--device=cpu",
|
||||
"--data_format=NHWC",
|
||||
],
|
||||
image: image,
|
||||
name: "tensorflow",
|
||||
workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks",
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
tfReplicaType: "PS",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
k.core.v1.list.new([
|
||||
tfjob,
|
||||
])
|
||||
|
|
@ -1,82 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.tf-job-simple
|
||||
// @description tf-job-simple
|
||||
// @shortDescription A simple TFJob to run CNN benchmark
|
||||
// @param name string Name for the job.
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
local name = params.name;
|
||||
local namespace = env.namespace;
|
||||
local image = "gcr.io/kubeflow/tf-benchmarks-cpu:v20171202-bdab599-dirty-284af3";
|
||||
|
||||
local tfjob = {
|
||||
apiVersion: "kubeflow.org/v1alpha2",
|
||||
kind: "TFJob",
|
||||
metadata: {
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
tfReplicaSpecs: {
|
||||
Worker: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"python",
|
||||
"tf_cnn_benchmarks.py",
|
||||
"--batch_size=32",
|
||||
"--model=resnet50",
|
||||
"--variable_update=parameter_server",
|
||||
"--flush_stdout=true",
|
||||
"--num_gpus=1",
|
||||
"--local_parameter_device=cpu",
|
||||
"--device=cpu",
|
||||
"--data_format=NHWC",
|
||||
],
|
||||
image: image,
|
||||
name: "tensorflow",
|
||||
workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks",
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
},
|
||||
Ps: {
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"python",
|
||||
"tf_cnn_benchmarks.py",
|
||||
"--batch_size=32",
|
||||
"--model=resnet50",
|
||||
"--variable_update=parameter_server",
|
||||
"--flush_stdout=true",
|
||||
"--num_gpus=1",
|
||||
"--local_parameter_device=cpu",
|
||||
"--device=cpu",
|
||||
"--data_format=NHWC",
|
||||
],
|
||||
image: image,
|
||||
name: "tensorflow",
|
||||
workingDir: "/opt/tf-benchmarks/scripts/tf_cnn_benchmarks",
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
tfReplicaType: "PS",
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
k.core.v1.list.new([
|
||||
tfjob,
|
||||
])
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.tf-serving-simple
|
||||
// @description tf-serving-simple
|
||||
// @shortDescription tf-serving-simple
|
||||
// @param name string Name to give to each of the components
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
local namespace = "default";
|
||||
local appName = import "param://name";
|
||||
local modelBasePath = "gs://kubeflow-models/inception";
|
||||
local modelName = "inception";
|
||||
local image = "gcr.io/kubeflow-images-public/tf-model-server-cpu:v20180327-995786ec";
|
||||
|
||||
local service = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: appName,
|
||||
},
|
||||
name: appName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "grpc-tf-serving",
|
||||
port: 9000,
|
||||
targetPort: 9000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: appName,
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
};
|
||||
|
||||
local deployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: appName,
|
||||
},
|
||||
name: appName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: appName,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"/usr/bin/tensorflow_model_server",
|
||||
"--port=9000",
|
||||
"--model_name=" + modelName,
|
||||
"--model_base_path=" + modelBasePath,
|
||||
],
|
||||
image: image,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "inception",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 9000,
|
||||
},
|
||||
],
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: "4",
|
||||
memory: "4Gi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "1",
|
||||
memory: "1Gi",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
k.core.v1.list.new([
|
||||
service,
|
||||
deployment,
|
||||
])
|
||||
|
|
@ -1,179 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.tf-serving-with-istio
|
||||
// @description tf-serving-with-istio
|
||||
// @shortDescription tf-serving-with-istio
|
||||
// @param name string Name to give to each of the components
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
local namespace = "default";
|
||||
local appName = import "param://name";
|
||||
local modelBasePath = "gs://kubeflow-models/inception";
|
||||
local modelName = "inception";
|
||||
local image = "gcr.io/kubeflow-images-public/tf-model-server-cpu:v20180327-995786ec";
|
||||
local httpProxyImage = "gcr.io/kubeflow-images-public/tf-model-server-http-proxy:v20180327-995786ec";
|
||||
|
||||
local routeRule = {
|
||||
apiVersion: "config.istio.io/v1alpha2",
|
||||
kind: "RouteRule",
|
||||
metadata: {
|
||||
name: appName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
destination: {
|
||||
name: "tf-serving",
|
||||
},
|
||||
precedence: 0,
|
||||
route: [
|
||||
{
|
||||
labels: {
|
||||
version: "v1",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
local service = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: tfserving-mapping-tf-serving-get",
|
||||
"prefix: /models/tf-serving/",
|
||||
"rewrite: /",
|
||||
"method: GET",
|
||||
"service: tf-serving." + namespace + ":8000",
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: tfserving-mapping-tf-serving-post",
|
||||
"prefix: /models/tf-serving/",
|
||||
"rewrite: /model/tf-serving:predict",
|
||||
"method: POST",
|
||||
"service: tf-serving." + namespace + ":8000",
|
||||
]),
|
||||
},
|
||||
labels: {
|
||||
app: appName,
|
||||
},
|
||||
name: appName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "grpc-tf-serving",
|
||||
port: 9000,
|
||||
targetPort: 9000,
|
||||
},
|
||||
{
|
||||
name: "http-tf-serving-proxy",
|
||||
port: 8000,
|
||||
targetPort: 8000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: appName,
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
};
|
||||
|
||||
local deployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: appName,
|
||||
},
|
||||
name: appName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: appName,
|
||||
},
|
||||
annotations: {
|
||||
"sidecar.istio.io/inject": "true",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"/usr/bin/tensorflow_model_server",
|
||||
"--port=9000",
|
||||
"--model_name=" + modelName,
|
||||
"--model_base_path=" + modelBasePath,
|
||||
],
|
||||
image: image,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "inception",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 9000,
|
||||
},
|
||||
],
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: "4",
|
||||
memory: "4Gi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "1",
|
||||
memory: "1Gi",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: appName + "-http-proxy",
|
||||
image: httpProxyImage,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
command: [
|
||||
"python",
|
||||
"/usr/src/app/server.py",
|
||||
"--port=8000",
|
||||
"--rpc_port=9000",
|
||||
"--rpc_timeout=10.0",
|
||||
],
|
||||
env: [],
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8000,
|
||||
},
|
||||
],
|
||||
resources: {
|
||||
requests: {
|
||||
memory: "1Gi",
|
||||
cpu: "1",
|
||||
},
|
||||
limits: {
|
||||
memory: "4Gi",
|
||||
cpu: "4",
|
||||
},
|
||||
},
|
||||
securityContext: {
|
||||
runAsUser: 1000,
|
||||
fsGroup: 1000,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
k.core.v1.list.new([
|
||||
routeRule,
|
||||
service,
|
||||
deployment,
|
||||
])
|
||||
|
|
@ -1,102 +0,0 @@
|
|||
local tensorboard = import "kubeflow/tensorboard/tensorboard.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "tensorboard",
|
||||
logDir: "logs",
|
||||
targetPort: "6006",
|
||||
servicePort: "9000",
|
||||
serviceType: "LoadBalancer",
|
||||
defaultTbImage: "tensorflow/tensorflow:1.9.0",
|
||||
};
|
||||
local env = {
|
||||
namespace: "test-kf-001",
|
||||
};
|
||||
|
||||
local instance = tensorboard.new(env, params);
|
||||
|
||||
std.assertEqual(
|
||||
instance.tbService,
|
||||
{
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
annotations: {
|
||||
"getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: tb-mapping-tensorboard-get\nprefix: /tensorboard/ tensorboard/\nrewrite: /\nmethod: GET\nservice: tensorboard.test-kf-001:9000",
|
||||
},
|
||||
labels: {
|
||||
app: "tensorboard",
|
||||
},
|
||||
name: "tensorboard",
|
||||
namespace: "test-kf-001",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "tb",
|
||||
port: "9000",
|
||||
targetPort: "6006",
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "tensorboard",
|
||||
},
|
||||
type: "LoadBalancer",
|
||||
},
|
||||
}
|
||||
) &&
|
||||
|
||||
std.assertEqual(
|
||||
instance.tbDeployment,
|
||||
{
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tensorboard",
|
||||
},
|
||||
name: "tensorboard",
|
||||
namespace: "test-kf-001",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tensorboard",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"--logdir=logs",
|
||||
"--port=6006",
|
||||
],
|
||||
command: [
|
||||
"/usr/local/bin/tensorboard",
|
||||
],
|
||||
image: "tensorflow/tensorflow:1.9.0",
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "tensorboard",
|
||||
ports: [
|
||||
{
|
||||
containerPort: "6006",
|
||||
},
|
||||
],
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: "4",
|
||||
memory: "4Gi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "1",
|
||||
memory: "1Gi",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
approvers:
|
||||
- abhi-g
|
||||
- jlewi
|
||||
- kunmingg
|
||||
- lluunn
|
||||
- r2d4
|
||||
- richardsliu
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
# gcp
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [gcp](#gcp)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
|
||||
> This ksonnet package contains GCP specific prototypes.
|
||||
|
|
@ -1,451 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env {
|
||||
hostname: if std.objectHas(_params, "hostname") then _params.hostname else "null",
|
||||
ingressName: "envoy-ingress"
|
||||
},
|
||||
local namespace = params.namespace,
|
||||
|
||||
// Test if the given hostname is in the form of: "NAME.endpoints.PROJECT.cloud.goog"
|
||||
local isCloudEndpoint(str) = {
|
||||
local toks = if std.type(str) == "null" then [] else std.split(str, "."),
|
||||
result::
|
||||
(std.length(toks) == 5 && toks[1] == "endpoints" && toks[3] == "cloud" && toks[4] == "goog"),
|
||||
}.result,
|
||||
|
||||
local initServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
}, // initServiceAccount
|
||||
initServiceAccount:: initServiceAccount,
|
||||
|
||||
local initClusterRoleBinding = {
|
||||
kind: "ClusterRoleBinding",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "envoy",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
roleRef: {
|
||||
kind: "ClusterRole",
|
||||
name: "envoy",
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}, // initClusterRoleBinding
|
||||
initClusterRoleBinding:: initClusterRoleBinding,
|
||||
|
||||
local initClusterRole = {
|
||||
kind: "ClusterRole",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["services", "configmaps", "secrets"],
|
||||
verbs: ["get", "list", "patch", "update"],
|
||||
},
|
||||
{
|
||||
apiGroups: ["extensions"],
|
||||
resources: ["ingresses"],
|
||||
verbs: ["get", "list", "update", "patch"],
|
||||
},
|
||||
],
|
||||
}, // initClusterRoleBinding
|
||||
initClusterRole:: initClusterRole,
|
||||
|
||||
local configMap = {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "envoy-config",
|
||||
namespace: namespace,
|
||||
},
|
||||
data: {
|
||||
"update_backend.sh": importstr "update_backend.sh",
|
||||
},
|
||||
},
|
||||
configMap:: configMap,
|
||||
|
||||
local whoamiService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
name: "whoami-app",
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: whoami-mapping",
|
||||
"prefix: /whoami",
|
||||
"rewrite: /whoami",
|
||||
"service: whoami-app." + namespace,
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8081,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "whoami",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
}, // whoamiService
|
||||
whoamiService:: whoamiService,
|
||||
|
||||
local whoamiApp = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "whoami-app",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "PORT",
|
||||
value: "8081",
|
||||
},
|
||||
],
|
||||
image: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
name: "app",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8081,
|
||||
},
|
||||
],
|
||||
readinessProbe: {
|
||||
failureThreshold: 2,
|
||||
httpGet: {
|
||||
path: "/healthz",
|
||||
port: 8081,
|
||||
scheme: "HTTP",
|
||||
},
|
||||
periodSeconds: 10,
|
||||
successThreshold: 1,
|
||||
timeoutSeconds: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
whoamiApp:: whoamiApp,
|
||||
|
||||
// Run the process to update the backend service
|
||||
local backendUpdater = {
|
||||
apiVersion: "apps/v1",
|
||||
kind: "StatefulSet",
|
||||
metadata: {
|
||||
name: "backend-updater",
|
||||
namespace: namespace,
|
||||
labels: {
|
||||
service: "backend-updater",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
service: "backend-updater",
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "backend-updater",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
serviceAccountName: "envoy",
|
||||
containers: [
|
||||
{
|
||||
name: "backend-updater",
|
||||
image: params.ingressSetupImage,
|
||||
command: [
|
||||
"bash",
|
||||
"/var/envoy-config/update_backend.sh",
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "NAMESPACE",
|
||||
value: namespace,
|
||||
},
|
||||
{
|
||||
name: "SERVICE",
|
||||
value: "ambassador",
|
||||
},
|
||||
{
|
||||
name: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||
value: "/var/run/secrets/sa/admin-gcp-sa.json",
|
||||
},
|
||||
{
|
||||
name: "HEALTHCHECK_PATH",
|
||||
value: "/whoami",
|
||||
},
|
||||
{
|
||||
name: "INGRESS_NAME",
|
||||
value: params.ingressName,
|
||||
},
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/var/envoy-config/",
|
||||
name: "config-volume",
|
||||
},
|
||||
{
|
||||
name: "sa-key",
|
||||
readOnly: true,
|
||||
mountPath: "/var/run/secrets/sa",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "envoy-config",
|
||||
},
|
||||
name: "config-volume",
|
||||
},
|
||||
{
|
||||
name: "sa-key",
|
||||
secret: {
|
||||
secretName: "admin-gcp-sa",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
}, // backendUpdater
|
||||
backendUpdater:: backendUpdater,
|
||||
|
||||
// TODO(danisla): Remove afer https://github.com/kubernetes/ingress-gce/pull/388 is resolved per #1327.
|
||||
local ingressBootstrapConfigMap = {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "ingress-bootstrap-config",
|
||||
namespace: namespace,
|
||||
},
|
||||
data: {
|
||||
"ingress_bootstrap.sh": importstr "ingress_bootstrap.sh",
|
||||
},
|
||||
},
|
||||
ingressBootstrapConfigMap:: ingressBootstrapConfigMap,
|
||||
|
||||
local ingressBootstrapJob = {
|
||||
apiVersion: "batch/v1",
|
||||
kind: "Job",
|
||||
metadata: {
|
||||
name: "ingress-bootstrap",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
spec: {
|
||||
restartPolicy: "OnFailure",
|
||||
serviceAccountName: "envoy",
|
||||
containers: [
|
||||
{
|
||||
name: "bootstrap",
|
||||
image: params.ingressSetupImage,
|
||||
command: ["/var/ingress-config/ingress_bootstrap.sh"],
|
||||
env: [
|
||||
{
|
||||
name: "NAMESPACE",
|
||||
value: namespace,
|
||||
},
|
||||
{
|
||||
name: "TLS_SECRET_NAME",
|
||||
value: params.secretName,
|
||||
},
|
||||
{
|
||||
name: "TLS_HOST_NAME",
|
||||
value: params.hostname,
|
||||
},
|
||||
{
|
||||
name: "INGRESS_NAME",
|
||||
value: "envoy-ingress",
|
||||
},
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/var/ingress-config/",
|
||||
name: "ingress-config",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "ingress-bootstrap-config",
|
||||
// TODO(danisla): replace with std.parseOctal("0755") after upgrading to ksonnet 0.12
|
||||
defaultMode: 493,
|
||||
},
|
||||
name: "ingress-config",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // ingressBootstrapJob
|
||||
ingressBootstrapJob:: ingressBootstrapJob,
|
||||
|
||||
local ingress = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Ingress",
|
||||
metadata: {
|
||||
name: "envoy-ingress",
|
||||
namespace: namespace,
|
||||
annotations: {
|
||||
"kubernetes.io/tls-acme": "true",
|
||||
"ingress.kubernetes.io/ssl-redirect": "true",
|
||||
"kubernetes.io/ingress.global-static-ip-name": params.ipName,
|
||||
"certmanager.k8s.io/issuer": params.issuer,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
rules: [
|
||||
{
|
||||
[if params.hostname != "null" then "host"]: params.hostname,
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
backend: {
|
||||
// Due to https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/examples/health_checks/README.md#limitations
|
||||
// Keep port the servicePort the same as the port we are targeting on the backend so that servicePort will be the same as targetPort for the purpose of
|
||||
// health checking.
|
||||
serviceName: "ambassador",
|
||||
servicePort: 80,
|
||||
},
|
||||
path: "/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
}, // iapIngress
|
||||
ingress:: ingress,
|
||||
|
||||
local certificate = if params.privateGKECluster == "false" then (
|
||||
{
|
||||
apiVersion: "certmanager.k8s.io/v1alpha1",
|
||||
kind: "Certificate",
|
||||
metadata: {
|
||||
name: params.secretName,
|
||||
namespace: namespace,
|
||||
},
|
||||
|
||||
spec: {
|
||||
secretName: params.secretName,
|
||||
issuerRef: {
|
||||
name: params.issuer,
|
||||
kind: "ClusterIssuer",
|
||||
},
|
||||
commonName: params.hostname,
|
||||
dnsNames: [
|
||||
params.hostname,
|
||||
],
|
||||
acme: {
|
||||
config: [
|
||||
{
|
||||
http01: {
|
||||
ingress: "envoy-ingress",
|
||||
},
|
||||
domains: [
|
||||
params.hostname,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
} // certificate
|
||||
),
|
||||
certificate:: certificate,
|
||||
|
||||
local cloudEndpoint = if isCloudEndpoint(params.hostname) then (
|
||||
{
|
||||
local makeEndpointParams(str) = {
|
||||
local toks = std.split(str, "."),
|
||||
result:: {
|
||||
name: toks[0],
|
||||
project: toks[2],
|
||||
},
|
||||
}.result,
|
||||
local endpointParams = makeEndpointParams(params.hostname),
|
||||
apiVersion: "ctl.isla.solutions/v1",
|
||||
kind: "CloudEndpoint",
|
||||
metadata: {
|
||||
name: endpointParams.name,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
project: endpointParams.project,
|
||||
targetIngress: {
|
||||
name: "envoy-ingress",
|
||||
namespace: namespace,
|
||||
},
|
||||
},
|
||||
} // cloudEndpoint
|
||||
),
|
||||
cloudEndpoint:: cloudEndpoint,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.initServiceAccount,
|
||||
self.initClusterRoleBinding,
|
||||
self.initClusterRole,
|
||||
self.whoamiService,
|
||||
self.whoamiApp,
|
||||
self.backendUpdater,
|
||||
self.configMap,
|
||||
self.ingressBootstrapConfigMap,
|
||||
self.ingressBootstrapJob,
|
||||
self.ingress,
|
||||
self.certificate,
|
||||
self.cloudEndpoint,
|
||||
],
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,189 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local certificateCRD = {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "certificates.certmanager.k8s.io",
|
||||
},
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
version: "v1alpha1",
|
||||
names: {
|
||||
kind: "Certificate",
|
||||
plural: "certificates",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
},
|
||||
},
|
||||
certificateCRD:: certificateCRD,
|
||||
|
||||
local clusterIssuerCRD = {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "clusterissuers.certmanager.k8s.io",
|
||||
},
|
||||
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
version: "v1alpha1",
|
||||
names: {
|
||||
kind: "ClusterIssuer",
|
||||
plural: "clusterissuers",
|
||||
},
|
||||
scope: "Cluster",
|
||||
},
|
||||
},
|
||||
clusterIssuerCRD:: clusterIssuerCRD,
|
||||
|
||||
local issuerCRD = {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "issuers.certmanager.k8s.io",
|
||||
},
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
version: "v1alpha1",
|
||||
names: {
|
||||
kind: "Issuer",
|
||||
plural: "issuers",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
},
|
||||
},
|
||||
issuerCRD:: issuerCRD,
|
||||
|
||||
local serviceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
serviceAccount:: serviceAccount,
|
||||
|
||||
local clusterRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: ["certmanager.k8s.io"],
|
||||
resources: ["certificates", "issuers", "clusterissuers"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["secrets", "events", "endpoints", "services", "pods", "configmaps"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
{
|
||||
apiGroups: ["extensions"],
|
||||
resources: ["ingresses"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
],
|
||||
},
|
||||
clusterRole:: clusterRole,
|
||||
|
||||
local clusterRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "cert-manager",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
name: "cert-manager",
|
||||
namespace: params.namespace,
|
||||
kind: "ServiceAccount",
|
||||
},
|
||||
],
|
||||
},
|
||||
clusterRoleBinding:: clusterRoleBinding,
|
||||
|
||||
local deploy = {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
namespace: params.namespace,
|
||||
labels: {
|
||||
app: "cert-manager",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "cert-manager",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
serviceAccountName: "cert-manager",
|
||||
containers: [
|
||||
{
|
||||
name: "cert-manager",
|
||||
image: params.certManagerImage,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
args: [
|
||||
"--cluster-resource-namespace=" + params.namespace,
|
||||
"--leader-election-namespace=" + params.namespace,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
deploy:: deploy,
|
||||
|
||||
local issuerLEProd = {
|
||||
apiVersion: "certmanager.k8s.io/v1alpha1",
|
||||
kind: "ClusterIssuer",
|
||||
metadata: {
|
||||
name: "letsencrypt-prod",
|
||||
},
|
||||
spec: {
|
||||
acme: {
|
||||
server: params.acmeUrl,
|
||||
email: params.acmeEmail,
|
||||
privateKeySecretRef: {
|
||||
name: "letsencrypt-prod-secret",
|
||||
},
|
||||
http01: {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
issuerLEProd:: issuerLEProd,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.certificateCRD,
|
||||
self.clusterIssuerCRD,
|
||||
self.issuerCRD,
|
||||
self.serviceAccount,
|
||||
self.clusterRole,
|
||||
self.clusterRoleBinding,
|
||||
self.deploy,
|
||||
self.issuerLEProd,
|
||||
],
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,214 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = {
|
||||
cloudEndpointsImage: "gcr.io/cloud-solutions-group/cloud-endpoints-controller:0.2.1",
|
||||
} + _params + _env,
|
||||
|
||||
local endpointsCRD = {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "cloudendpoints.ctl.isla.solutions",
|
||||
},
|
||||
spec: {
|
||||
group: "ctl.isla.solutions",
|
||||
version: "v1",
|
||||
scope: "Namespaced",
|
||||
names: {
|
||||
plural: "cloudendpoints",
|
||||
singular: "cloudendpoint",
|
||||
kind: "CloudEndpoint",
|
||||
shortNames: [
|
||||
"cloudep",
|
||||
"ce",
|
||||
],
|
||||
},
|
||||
},
|
||||
}, // endpointsCRD
|
||||
endpointsCRD:: endpointsCRD,
|
||||
|
||||
local endpointsClusterRole = {
|
||||
kind: "ClusterRole",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["services", "configmaps"],
|
||||
verbs: ["get", "list"],
|
||||
},
|
||||
{
|
||||
apiGroups: ["extensions"],
|
||||
resources: ["ingresses"],
|
||||
verbs: ["get", "list"],
|
||||
},
|
||||
],
|
||||
},
|
||||
endpointsClusterRole:: endpointsClusterRole,
|
||||
|
||||
local endpointsClusterRoleBinding = {
|
||||
kind: "ClusterRoleBinding",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
roleRef: {
|
||||
kind: "ClusterRole",
|
||||
name: "cloud-endpoints-controller",
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
},
|
||||
endpointsClusterRoleBinding:: endpointsClusterRoleBinding,
|
||||
|
||||
local endpointsService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
type: "ClusterIP",
|
||||
ports: [
|
||||
{
|
||||
name: "http",
|
||||
port: 80,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "cloud-endpoints-controller",
|
||||
},
|
||||
},
|
||||
}, // endpointsService
|
||||
endpointsService:: endpointsService,
|
||||
|
||||
local endpointsServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
}, // endpointsServiceAccount
|
||||
endpointsServiceAccount:: endpointsServiceAccount,
|
||||
|
||||
local endpointsDeploy = {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "cloud-endpoints-controller",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
serviceAccountName: "cloud-endpoints-controller",
|
||||
terminationGracePeriodSeconds: 5,
|
||||
containers: [
|
||||
{
|
||||
name: "cloud-endpoints-controller",
|
||||
image: params.cloudEndpointsImage,
|
||||
imagePullPolicy: "Always",
|
||||
env: [
|
||||
{
|
||||
name: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||
value: "/var/run/secrets/sa/" + params.secretKey,
|
||||
},
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
name: "sa-key",
|
||||
readOnly: true,
|
||||
mountPath: "/var/run/secrets/sa",
|
||||
},
|
||||
],
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/healthz",
|
||||
port: 80,
|
||||
scheme: "HTTP",
|
||||
},
|
||||
periodSeconds: 5,
|
||||
timeoutSeconds: 5,
|
||||
successThreshold: 1,
|
||||
failureThreshold: 2,
|
||||
},
|
||||
},
|
||||
],
|
||||
volumes: [
|
||||
{
|
||||
name: "sa-key",
|
||||
secret: {
|
||||
secretName: params.secretName,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // endpointsDeploy
|
||||
endpointsDeploy:: endpointsDeploy,
|
||||
|
||||
local endpointsCompositeController = {
|
||||
apiVersion: "metacontroller.k8s.io/v1alpha1",
|
||||
kind: "CompositeController",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
},
|
||||
spec: {
|
||||
generateSelector: true,
|
||||
resyncPeriodSeconds: 2,
|
||||
parentResource: {
|
||||
apiVersion: "ctl.isla.solutions/v1",
|
||||
resource: "cloudendpoints",
|
||||
},
|
||||
childResources: [],
|
||||
clientConfig: {
|
||||
service: {
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: params.namespace,
|
||||
caBundle: "...",
|
||||
},
|
||||
},
|
||||
hooks: {
|
||||
sync: {
|
||||
webhook: {
|
||||
url: "http://cloud-endpoints-controller." + params.namespace + "/sync",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // endpointsCompositeController
|
||||
endpointsCompositeController:: endpointsCompositeController,
|
||||
|
||||
parts:: self,
|
||||
local all = [
|
||||
self.endpointsCRD,
|
||||
self.endpointsClusterRole,
|
||||
self.endpointsClusterRoleBinding,
|
||||
self.endpointsService,
|
||||
self.endpointsServiceAccount,
|
||||
self.endpointsDeploy,
|
||||
self.endpointsCompositeController,
|
||||
],
|
||||
all:: all,
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# A script to modify envoy config to perform JWT validation
|
||||
# given the information for the service.
|
||||
# Script executed by the iap container to configure IAP. When finished, the envoy config is created with the JWT audience.
|
||||
|
||||
[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1
|
||||
[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1
|
||||
|
||||
PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id)
|
||||
if [ -z ${PROJECT} ]; then
|
||||
echo Error unable to fetch PROJECT from compute metadata
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PROJECT_NUM=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/numeric-project-id)
|
||||
if [ -z ${PROJECT_NUM} ]; then
|
||||
echo Error unable to fetch PROJECT_NUM from compute metadata
|
||||
exit 1
|
||||
fi
|
||||
|
||||
checkIAP() {
|
||||
# created by init container.
|
||||
. /var/shared/healthz.env
|
||||
|
||||
# If node port or backend id change, so does the JWT audience.
|
||||
CURR_NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
CURR_BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~k8s-be-${CURR_NODE_PORT}- --format='value(id)')
|
||||
[ "$BACKEND_ID" == "$CURR_BACKEND_ID" ]
|
||||
}
|
||||
|
||||
# Activate the service account
|
||||
for i in $(seq 1 10); do
|
||||
gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} && break || sleep 10
|
||||
done
|
||||
|
||||
# Print out the config for debugging
|
||||
gcloud config list
|
||||
|
||||
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
while [[ -z ${BACKEND_ID} ]]; do
|
||||
BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~k8s-be-${NODE_PORT}- --format='value(id)')
|
||||
echo "Waiting for backend id PROJECT=${PROJECT} NAMESPACE=${NAMESPACE} SERVICE=${SERVICE} filter=name~k8s-be-${NODE_PORT}-..."
|
||||
sleep 2
|
||||
done
|
||||
echo BACKEND_ID=${BACKEND_ID}
|
||||
|
||||
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
BACKEND_SERVICE=$(gcloud --project=${PROJECT} compute backend-services list --filter=name~k8s-be-${NODE_PORT}- --uri)
|
||||
|
||||
JWT_AUDIENCE="/projects/${PROJECT_NUM}/global/backendServices/${BACKEND_ID}"
|
||||
|
||||
# For healthcheck compare.
|
||||
echo "JWT_AUDIENCE=${JWT_AUDIENCE}" > /var/shared/healthz.env
|
||||
echo "NODE_PORT=${NODE_PORT}" >> /var/shared/healthz.env
|
||||
echo "BACKEND_ID=${BACKEND_ID}" >> /var/shared/healthz.env
|
||||
|
||||
kubectl get configmap -n ${NAMESPACE} envoy-config -o jsonpath='{.data.envoy-config\.json}' |
|
||||
sed -e "s|{{JWT_AUDIENCE}}|${JWT_AUDIENCE}|g" >/var/shared/envoy-config.json
|
||||
|
||||
echo "Restarting envoy"
|
||||
curl -s ${ENVOY_ADMIN}/quitquitquit
|
||||
|
||||
# Verify IAP every 10 seconds.
|
||||
while true; do
|
||||
if ! checkIAP; then
|
||||
echo "$(date) WARN: IAP check failed, restarting container."
|
||||
exit 1
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
|
@ -1,115 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Generate certificate suitable for use with an sidecar-injector webhook service.
|
||||
This script uses k8s' CertificateSigningRequest API to a generate a
|
||||
certificate signed by k8s CA suitable for use with sidecar-injector webhook
|
||||
services. This requires permissions to create and approve CSR. See
|
||||
https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster for
|
||||
detailed explantion and additional instructions.
|
||||
The server key/cert k8s CA cert are stored in a k8s secret.
|
||||
usage: ${0} [OPTIONS]
|
||||
The following flags are required.
|
||||
--service Service name of webhook.
|
||||
--namespace Namespace where webhook service and secret reside.
|
||||
--secret Secret name for CA certificate and server certificate/key pair.
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case ${1} in
|
||||
--service)
|
||||
service="$2"
|
||||
shift
|
||||
'
|
||||
--secret)
|
||||
secret="$2"
|
||||
shift
|
||||
'
|
||||
--namespace)
|
||||
namespace="$2"
|
||||
shift
|
||||
'
|
||||
*)
|
||||
usage
|
||||
'
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
[ -z ${service} ] && service=gcp-cred-webhook
|
||||
[ -z ${secret} ] && secret=gcp-cred-webhook-certs
|
||||
[ -z ${namespace} ] && namespace=${NAMESPACE}
|
||||
[ -z ${namespace} ] && namespace=default
|
||||
|
||||
echo ${service}
|
||||
echo ${namespace}
|
||||
echo ${secret}
|
||||
|
||||
if [ ! -x "$(command -v openssl)" ]; then
|
||||
echo "openssl not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
csrName=${service}.${namespace}
|
||||
tmpdir=$(mktemp -d)
|
||||
echo "creating certs in tmpdir ${tmpdir} "
|
||||
|
||||
# x509 outputs a self signed certificate instead of certificate request, later used as self signed root CA
|
||||
openssl req -x509 -newkey rsa:2048 -keyout ${tmpdir}/self_ca.key -out ${tmpdir}/self_ca.crt -days 365 -nodes -subj /C=/ST=/L=/O=/OU=/CN=test-certificate-authority
|
||||
|
||||
cat <<EOF >> ${tmpdir}/csr.conf
|
||||
[req]
|
||||
req_extensions = v3_req
|
||||
distinguished_name = req_distinguished_name
|
||||
[req_distinguished_name]
|
||||
[ v3_req ]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
DNS.1 = ${service}
|
||||
DNS.2 = ${service}.${namespace}
|
||||
DNS.3 = ${service}.${namespace}.svc
|
||||
EOF
|
||||
|
||||
openssl genrsa -out ${tmpdir}/server-key.pem 2048
|
||||
openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf
|
||||
|
||||
# Self sign
|
||||
openssl x509 -req -days 365 -in ${tmpdir}/server.csr -CA ${tmpdir}/self_ca.crt -CAkey ${tmpdir}/self_ca.key -CAcreateserial -out ${tmpdir}/server-cert.pem
|
||||
|
||||
# create the secret with CA cert and server cert/key
|
||||
kubectl create secret generic ${secret} \
|
||||
--from-file=key.pem=${tmpdir}/server-key.pem \
|
||||
--from-file=cert.pem=${tmpdir}/server-cert.pem \
|
||||
--dry-run -o yaml |
|
||||
kubectl -n ${namespace} apply -f -
|
||||
|
||||
cat ${tmpdir}/self_ca.crt
|
||||
# -a means base64 encode
|
||||
caBundle=`cat ${tmpdir}/self_ca.crt | openssl enc -a -A`
|
||||
echo ${caBundle}
|
||||
|
||||
patchString='[{"op": "replace", "path": "/webhooks/0/clientConfig/caBundle", "value":"{{CA_BUNDLE}}"}]'
|
||||
patchString=`echo ${patchString} | sed "s|{{CA_BUNDLE}}|${caBundle}|g"`
|
||||
echo ${patchString}
|
||||
|
||||
checkWebhookConfig() {
|
||||
currentBundle=$(kubectl get mutatingwebhookconfigurations -n ${namespace} gcp-cred-webhook -o jsonpath='{.webhooks[0].clientConfig.caBundle}')
|
||||
[[ "$currentBundle" == "$caBundle" ]]
|
||||
}
|
||||
|
||||
while true; do
|
||||
if ! checkWebhookConfig; then
|
||||
echo "patching ca bundle for webhook configuration..."
|
||||
kubectl patch mutatingwebhookconfiguration gcp-cred-webhook \
|
||||
--type='json' -p="${patchString}"
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local persistentVolume = {
|
||||
apiVersion: "v1",
|
||||
kind: "PersistentVolume",
|
||||
metadata: {
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
capacity: {
|
||||
storage: params.storageCapacity,
|
||||
},
|
||||
accessModes: [
|
||||
"ReadWriteMany",
|
||||
],
|
||||
nfs: {
|
||||
path: params.path,
|
||||
server: params.serverIP,
|
||||
},
|
||||
},
|
||||
},
|
||||
persistentVolume:: persistentVolume,
|
||||
|
||||
local persistentVolumeClaim = {
|
||||
apiVersion: "v1",
|
||||
kind: "PersistentVolumeClaim",
|
||||
metadata: {
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
accessModes: [
|
||||
"ReadWriteMany",
|
||||
],
|
||||
storageClassName: "nfs-storage",
|
||||
volumeName: params.name,
|
||||
resources: {
|
||||
requests: {
|
||||
storage: params.storageCapacity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
persistentVolumeClaim:: persistentVolumeClaim,
|
||||
|
||||
// Set 777 permissions on the GCFS NFS so that non-root users
|
||||
// like jovyan can use that NFS share
|
||||
local gcfsPersmissions = {
|
||||
apiVersion: "batch/v1",
|
||||
kind: "Job",
|
||||
metadata: {
|
||||
name: "set-gcfs-permissions",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
name: "set-gcfs-permissions",
|
||||
image: params.image,
|
||||
command: [
|
||||
"chmod",
|
||||
"777",
|
||||
"/kubeflow-gcfs",
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/kubeflow-gcfs",
|
||||
name: params.name,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
volumes: [
|
||||
{
|
||||
name: params.name,
|
||||
persistentVolumeClaim: {
|
||||
claimName: params.name,
|
||||
readOnly: false,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
gcfsPersmissions:: gcfsPersmissions,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.persistentVolume,
|
||||
self.persistentVolumeClaim,
|
||||
self.gcfsPersmissions,
|
||||
],
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,130 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local daemonset = {
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "DaemonSet",
|
||||
"metadata": {
|
||||
"name": "nvidia-driver-installer",
|
||||
"namespace": "kube-system",
|
||||
"labels": {
|
||||
"k8s-app": "nvidia-driver-installer"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"name": "nvidia-driver-installer",
|
||||
"k8s-app": "nvidia-driver-installer"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"affinity": {
|
||||
"nodeAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [
|
||||
{
|
||||
"matchExpressions": [
|
||||
{
|
||||
"key": "cloud.google.com/gke-accelerator",
|
||||
"operator": "Exists"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tolerations": [
|
||||
{
|
||||
"operator": "Exists"
|
||||
}
|
||||
],
|
||||
"hostNetwork": true,
|
||||
"hostPID": true,
|
||||
"volumes": [
|
||||
{
|
||||
"name": "dev",
|
||||
"hostPath": {
|
||||
"path": "/dev"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "nvidia-install-dir-host",
|
||||
"hostPath": {
|
||||
"path": "/home/kubernetes/bin/nvidia"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "root-mount",
|
||||
"hostPath": {
|
||||
"path": "/"
|
||||
}
|
||||
}
|
||||
],
|
||||
"initContainers": [
|
||||
{
|
||||
"image": "cos-nvidia-installer:fixed",
|
||||
"imagePullPolicy": "Never",
|
||||
"name": "nvidia-driver-installer",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": 0.15
|
||||
}
|
||||
},
|
||||
"securityContext": {
|
||||
"privileged": true
|
||||
},
|
||||
"env": [
|
||||
{
|
||||
"name": "NVIDIA_INSTALL_DIR_HOST",
|
||||
"value": "/home/kubernetes/bin/nvidia"
|
||||
},
|
||||
{
|
||||
"name": "NVIDIA_INSTALL_DIR_CONTAINER",
|
||||
"value": "/usr/local/nvidia"
|
||||
},
|
||||
{
|
||||
"name": "ROOT_MOUNT_DIR",
|
||||
"value": "/root"
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "nvidia-install-dir-host",
|
||||
"mountPath": "/usr/local/nvidia"
|
||||
},
|
||||
{
|
||||
"name": "dev",
|
||||
"mountPath": "/dev"
|
||||
},
|
||||
{
|
||||
"name": "root-mount",
|
||||
"mountPath": "/root"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"containers": [
|
||||
{
|
||||
"image": "gcr.io/google-containers/pause:2.0",
|
||||
"name": "pause"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
daemonset:: daemonset,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.daemonset,
|
||||
],
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: default-routes
|
||||
namespace: kubeflow
|
||||
spec:
|
||||
hosts:
|
||||
- "*"
|
||||
gateways:
|
||||
- kubeflow-gateway
|
||||
http:
|
||||
- match:
|
||||
- uri:
|
||||
exact: /healthz
|
||||
route:
|
||||
- destination:
|
||||
port:
|
||||
number: 80
|
||||
host: whoami-app.kubeflow.svc.cluster.local
|
||||
- match:
|
||||
- uri:
|
||||
exact: /whoami
|
||||
route:
|
||||
- destination:
|
||||
port:
|
||||
number: 80
|
||||
host: whoami-app.kubeflow.svc.cluster.local
|
||||
- match:
|
||||
- uri:
|
||||
prefix: /
|
||||
route:
|
||||
- destination:
|
||||
port:
|
||||
number: 80
|
||||
host: ambassador.kubeflow.svc.cluster.local
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: kubeflow-gateway
|
||||
namespace: kubeflow
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,21 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -x
|
||||
set -e
|
||||
|
||||
# This is a workaround until this is resolved: https://github.com/kubernetes/ingress-gce/pull/388
|
||||
# The long-term solution is to use a managed SSL certificate on GKE once the feature is GA.
|
||||
|
||||
# The ingress is initially created without a tls spec.
|
||||
# Wait until cert-manager generates the certificate using the http-01 challenge on the GCLB ingress.
|
||||
# After the certificate is obtained, patch the ingress with the tls spec to enable SSL on the GCLB.
|
||||
|
||||
# Wait for certificate.
|
||||
until kubectl -n ${NAMESPACE} get secret ${TLS_SECRET_NAME} 2>/dev/null; do
|
||||
echo "Waiting for certificate..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
kubectl -n ${NAMESPACE} patch ingress ${INGRESS_NAME} --type='json' -p '[{"op": "add", "path": "/spec/tls", "value": [{"secretName": "'${TLS_SECRET_NAME}'", "hosts":["'${TLS_HOST_NAME}'"]}]}]'
|
||||
|
||||
echo "Done"
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
apiVersion: authentication.istio.io/v1alpha1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: ingress-jwt
|
||||
namespace: istio-system
|
||||
spec:
|
||||
targets:
|
||||
- name: istio-ingressgateway
|
||||
ports:
|
||||
- number: 80
|
||||
origins:
|
||||
- jwt:
|
||||
issuer: https://cloud.google.com/iap
|
||||
jwksUri: https://www.gstatic.com/iap/verify/public_key-jwk
|
||||
audiences:
|
||||
- {{JWT_AUDIENCE}}
|
||||
jwtHeaders:
|
||||
- x-goog-iap-jwt-assertion
|
||||
trigger_rules:
|
||||
- excluded_paths:
|
||||
- exact: /healthz
|
||||
- prefix: /.well-known/acme-challenge
|
||||
principalBinding: USE_ORIGIN
|
||||
|
|
@ -1,189 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local metricServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
metricServiceAccount:: metricServiceAccount,
|
||||
|
||||
local metricRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"services",
|
||||
"events",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
metricRole:: metricRole,
|
||||
|
||||
local metricRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "metric-collector",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "metric-collector",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
metricRoleBinding:: metricRoleBinding,
|
||||
|
||||
local service = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"prometheus.io/scrape": "true",
|
||||
"prometheus.io/path": "/",
|
||||
"prometheus.io/port": "8000",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "metric-collector",
|
||||
port: 8000,
|
||||
targetPort: 8000,
|
||||
protocol: "TCP",
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
service:: service,
|
||||
|
||||
local deploy = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||
value: "/var/run/secrets/sa/admin-gcp-sa.json",
|
||||
},
|
||||
{
|
||||
name: "CLIENT_ID",
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
name: params.oauthSecretName,
|
||||
key: "client_id",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
command: [
|
||||
"python3",
|
||||
"/opt/kubeflow-readiness.py",
|
||||
],
|
||||
args: [
|
||||
"--url=" + params.targetUrl,
|
||||
"--client_id=$(CLIENT_ID)",
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
name: "sa-key",
|
||||
readOnly: true,
|
||||
mountPath: "/var/run/secrets/sa",
|
||||
},
|
||||
],
|
||||
image: params.metricImage,
|
||||
name: "exporter",
|
||||
},
|
||||
],
|
||||
serviceAccountName: "metric-collector",
|
||||
restartPolicy: "Always",
|
||||
volumes: [
|
||||
{
|
||||
name: "sa-key",
|
||||
secret: {
|
||||
secretName: "admin-gcp-sa",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deploy
|
||||
deploy:: deploy,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.metricServiceAccount,
|
||||
self.metricRole,
|
||||
self.metricRoleBinding,
|
||||
self.service,
|
||||
self.deploy,
|
||||
],
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
"name": "gcp",
|
||||
"apiVersion": "0.0.1",
|
||||
"kind": "ksonnet.io/parts",
|
||||
"description": "Core components of Kubeflow.\n",
|
||||
"author": "kubeflow team <kubeflow-team@google.com>",
|
||||
"contributors": [
|
||||
{
|
||||
"name": "Jeremy Lewi",
|
||||
"email": "jlewi@google.com"
|
||||
}
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/kubeflow/kubeflow"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/kubeflow/kubeflow/issues"
|
||||
},
|
||||
"keywords": [
|
||||
"kubeflow",
|
||||
"tensorflow"
|
||||
],
|
||||
"quickStart": {
|
||||
"prototype": "io.ksonnet.pkg.kubeflow",
|
||||
"componentName": "gcp",
|
||||
"flags": {
|
||||
"name": "gcp",
|
||||
"namespace": "default",
|
||||
"disks": ""
|
||||
},
|
||||
"comment": "GCP specific Kubeflow components."
|
||||
},
|
||||
"license": "Apache 2.0"
|
||||
}
|
||||
|
|
@ -1,221 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local namespace = {
|
||||
apiVersion: "v1",
|
||||
kind: "Namespace",
|
||||
metadata: {
|
||||
name: "stackdriver",
|
||||
},
|
||||
},
|
||||
namespace:: namespace,
|
||||
|
||||
local clusterRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "prometheus",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"nodes",
|
||||
"nodes/proxy",
|
||||
"services",
|
||||
"endpoints",
|
||||
"pods",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"extensions",
|
||||
],
|
||||
resources: [
|
||||
"ingresses",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
nonResourceURLs: [
|
||||
"/metrics",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
clusterRole:: clusterRole,
|
||||
|
||||
local serviceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "prometheus",
|
||||
namespace: "stackdriver",
|
||||
},
|
||||
},
|
||||
serviceAccount:: serviceAccount,
|
||||
|
||||
local clusterRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "prometheus-stackdriver",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "prometheus",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "prometheus",
|
||||
namespace: "stackdriver",
|
||||
},
|
||||
],
|
||||
},
|
||||
clusterRoleBinding:: clusterRoleBinding,
|
||||
|
||||
local service = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
name: "prometheus",
|
||||
},
|
||||
name: "prometheus",
|
||||
namespace: "stackdriver",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "prometheus",
|
||||
port: 9090,
|
||||
protocol: "TCP",
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "prometheus",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
service:: service,
|
||||
|
||||
local configMap = {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "prometheus",
|
||||
namespace: "stackdriver",
|
||||
},
|
||||
data: {
|
||||
"prometheus.yml": (importstr "prometheus.yml") % {
|
||||
"project-id-placeholder": params.projectId,
|
||||
"cluster-name-placeholder": params.clusterName,
|
||||
"zone-placeholder": params.zone,
|
||||
},
|
||||
},
|
||||
},
|
||||
configMap:: configMap,
|
||||
|
||||
local deployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "prometheus",
|
||||
namespace: "stackdriver",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "prometheus",
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
annotations: {
|
||||
"prometheus.io/scrape": "true",
|
||||
},
|
||||
labels: {
|
||||
app: "prometheus",
|
||||
},
|
||||
name: "prometheus",
|
||||
namespace: "stackdriver",
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: "gcr.io/stackdriver-prometheus/stackdriver-prometheus:release-0.4.2",
|
||||
imagePullPolicy: "Always",
|
||||
name: "prometheus",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 9090,
|
||||
name: "web",
|
||||
},
|
||||
],
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: "400m",
|
||||
memory: "1000Mi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "20m",
|
||||
memory: "50Mi",
|
||||
},
|
||||
},
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/etc/prometheus",
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "prometheus",
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "prometheus",
|
||||
},
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
deployment:: deployment,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.namespace,
|
||||
self.clusterRole,
|
||||
self.serviceAccount,
|
||||
self.clusterRoleBinding,
|
||||
self.service,
|
||||
self.configMap,
|
||||
self.deployment,
|
||||
],
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,140 +0,0 @@
|
|||
# Source: https://github.com/stackdriver/prometheus/blob/master/documentation/examples/prometheus.yml
|
||||
global:
|
||||
external_labels:
|
||||
_stackdriver_project_id: %(project-id-placeholder)s
|
||||
_kubernetes_cluster_name: %(cluster-name-placeholder)s
|
||||
_kubernetes_location: %(zone-placeholder)s
|
||||
|
||||
# Scrape config for nodes (kubelet).
|
||||
#
|
||||
# Rather than connecting directly to the node, the scrape is proxied though the
|
||||
# Kubernetes apiserver. This means it will work if Prometheus is running out of
|
||||
# cluster, or can't connect to nodes for some other reason (e.g. because of
|
||||
# firewalling).
|
||||
scrape_configs:
|
||||
- job_name: 'kubernetes-nodes'
|
||||
|
||||
# Default to scraping over https. If required, just disable this or change to
|
||||
# `http`.
|
||||
scheme: https
|
||||
|
||||
# This TLS & bearer token file config is used to connect to the actual scrape
|
||||
# endpoints for cluster components. This is separate to discovery auth
|
||||
# configuration because discovery & scraping are two separate concerns in
|
||||
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
|
||||
# the cluster. Otherwise, more config options have to be provided within the
|
||||
# <kubernetes_sd_config>.
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
|
||||
relabel_configs:
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# Example scrape config for pods
|
||||
#
|
||||
# The relabeling allows the actual pod scrape endpoint to be configured via the
|
||||
# following annotations:
|
||||
#
|
||||
# * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
|
||||
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
|
||||
# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the
|
||||
# pod's declared ports (default is a port-free target if none are declared).
|
||||
- job_name: 'kubernetes-pods-containers'
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
|
||||
# Scrape config for service endpoints.
|
||||
#
|
||||
# The relabeling allows the actual service scrape endpoint to be configured
|
||||
# via the following annotations:
|
||||
#
|
||||
# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
|
||||
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
|
||||
# to set this to `https` & most likely set the `tls_config` of the scrape config.
|
||||
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
|
||||
# * `prometheus.io/port`: If the metrics are exposed on a different port to the
|
||||
# service then set this appropriately.
|
||||
- job_name: 'kubernetes-service-endpoints'
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
|
||||
action: replace
|
||||
target_label: __scheme__
|
||||
regex: (https?)
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
target_label: __address__
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
|
||||
|
||||
# Scrape config for k8s services
|
||||
- job_name: 'kubernetes-services'
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: service
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_service_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
- source_labels: [__address__,__meta_kubernetes_service_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
target_label: __address__
|
||||
regex: (.+)(?::\d+);(\d+)
|
||||
replacement: $1:$2
|
||||
|
||||
remote_write:
|
||||
- url: "https://monitoring.googleapis.com:443/"
|
||||
queue_config:
|
||||
# Capacity should be 2*max_samples_per_send.
|
||||
capacity: 2000
|
||||
max_samples_per_send: 1000
|
||||
max_shards: 10000
|
||||
write_relabel_configs:
|
||||
# These labels are generally redundant with the Stackdriver monitored resource labels.
|
||||
- source_labels: [job]
|
||||
target_label: job
|
||||
replacement: ""
|
||||
- source_labels: [instance]
|
||||
target_label: instance
|
||||
replacement: ""
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.basic-auth-ingress
|
||||
// @description Provides ingress prototypes for setting up basic auth on GKE.
|
||||
// @shortDescription Ingress for IAP on GKE.
|
||||
// @param name string Name for the component
|
||||
// @param ipName string The name of the global ip address to use.
|
||||
// @optionalParam secretName string envoy-ingress-tls The name of the secret containing the SSL certificates.
|
||||
// @optionalParam hostname string null The hostname associated with this ingress. Eg: mykubeflow.example.com
|
||||
// @optionalParam issuer string letsencrypt-prod The cert-manager issuer name.
|
||||
// @optionalParam ingressSetupImage string gcr.io/kubeflow-images-public/ingress-setup:latest The image for setting up ingress.
|
||||
// @optionalParam privateGKECluster string false Is the k8s cluster a private GKE cluster
|
||||
|
||||
local basicauth = import "kubeflow/gcp/basic-auth-ingress.libsonnet";
|
||||
local instance = basicauth.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.cert-manager
|
||||
// @description Provides cert-manager prototypes for generating SSL certificates.
|
||||
// @shortDescription Certificate generation on GKE.
|
||||
// @param name string Name for the component
|
||||
// @param acmeEmail string The Lets Encrypt account email address
|
||||
// @optionalParam acmeUrl string https://acme-v02.api.letsencrypt.org/directory The ACME server URL, set to https://acme-staging-v02.api.letsencrypt.org/directory for staging API.
|
||||
// @optionalParam certManagerImage string quay.io/jetstack/cert-manager-controller:v0.4.0 certManagerImage
|
||||
|
||||
local certManager = import "kubeflow/gcp/cert-manager.libsonnet";
|
||||
local instance = certManager.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.cloud-endpoints
|
||||
// @description Provides cloud-endpoints prototypes for creating Cloud Endpoints services and DNS records.
|
||||
// @shortDescription Cloud Endpoint domain creation.
|
||||
// @param name string Name for the component
|
||||
// @optionalParam secretName string admin-gcp-sa Name of secret containing the json service account key.
|
||||
// @optionalParam secretKey string admin-gcp-sa.json Name of the key in the secret containing the JSON service account key.
|
||||
|
||||
local cloudEndpoints = import "kubeflow/gcp/cloud-endpoints.libsonnet";
|
||||
local instance = cloudEndpoints.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.google-cloud-filestore-pv
|
||||
// @description Creates PV and PVC based on Google Cloud Filestore NFS
|
||||
// @shortDescription Creates PV and PVC based on Google Cloud Filestore NFS
|
||||
// @param name string Name for the component
|
||||
// @optionalParam storageCapacity string 1T Storage Capacity
|
||||
// @optionalParam path string /kubeflow Path in NFS server
|
||||
// @param serverIP string Google Cloud Filestore Server IP
|
||||
// @optionalParam image string gcr.io/kubeflow-images-public/ubuntu:18.04 The docker image to use
|
||||
|
||||
local google_cloud_file_store_pv = import "kubeflow/gcp/google-cloud-filestore-pv.libsonnet";
|
||||
local instance = google_cloud_file_store_pv.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.gpu-driver
|
||||
// @description Provides gpu-driver prototype in kubeflow gcp package
|
||||
// @shortDescription Gpu Driver.
|
||||
// @param name string Name for the component
|
||||
|
||||
local gpuDriver = import "kubeflow/gcp/gpu-driver.libsonnet";
|
||||
local instance = gpuDriver.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.iap-ingress
|
||||
// @description Provides ingress prototypes for setting up IAP on GKE.
|
||||
// @shortDescription Ingress for IAP on GKE.
|
||||
// @param name string Name for the component
|
||||
// @param ipName string The name of the global ip address to use.
|
||||
// @optionalParam secretName string envoy-ingress-tls The name of the secret containing the SSL certificates.
|
||||
// @optionalParam hostname string null The hostname associated with this ingress. Eg: mykubeflow.example.com
|
||||
// @optionalParam issuer string letsencrypt-prod The cert-manager issuer name.
|
||||
// @optionalParam envoyImage string gcr.io/kubeflow-images-public/envoy:v20180309-0fb4886b463698702b6a08955045731903a18738 The image for envoy.
|
||||
// @optionalParam ingressSetupImage string gcr.io/kubeflow-images-public/ingress-setup:latest The image for setting up ingress.
|
||||
// @optionalParam disableJwtChecking string false Disable JWT checking.
|
||||
// @optionalParam oauthSecretName string kubeflow-oauth The name of the secret containing the OAuth client_id and client_secret.
|
||||
// @optionalParam privateGKECluster string false Is the k8s cluster a private GKE cluster
|
||||
// @optionalParam useIstio string false The namespace where Istio is installed
|
||||
// @optionalParam istioNamespace string istio-system The namespace where Istio is installed
|
||||
// @optionalParam espSampleAppImage string gcr.io/cloud-solutions-group/esp-sample-app:1.0.0 The sample app used with IAP
|
||||
|
||||
local iap = import "kubeflow/gcp/iap.libsonnet";
|
||||
local instance = iap.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.metric-collector
|
||||
// @description Provides metric-collector prototypes for monitoring kubeflow availability on GCP.
|
||||
// @shortDescription Service monitor for kubeflow on GCP.
|
||||
// @param name string Name for the component
|
||||
// @param targetUrl string Https url of kubeflow service on GCP; target of monitoring.
|
||||
// @optionalParam namespace string null Namespace to use for the components. It is automatically inherited from the environment if not set.
|
||||
// @optionalParam metricImage string gcr.io/kubeflow-images-public/metric-collector:latest Image for running metric exporter of kubeflow availability.
|
||||
// @optionalParam oauthSecretName string kubeflow-oauth The name of the secret containing the OAuth client_id and client_secret.
|
||||
|
||||
local metricCollector = import "kubeflow/gcp/metric-collector.libsonnet";
|
||||
local instance = metricCollector.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.prometheus
|
||||
// @description Provides prometheus prototype in kubeflow gcp package
|
||||
// @shortDescription Prometheus Service.
|
||||
// @param name string Name for the component
|
||||
// @param projectId string GCP project id.
|
||||
// @param clusterName string GKE cluster name.
|
||||
// @param zone string GKE cluster zone.
|
||||
|
||||
local prometheus = import "kubeflow/gcp/prometheus.libsonnet";
|
||||
local instance = prometheus.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.gcp-credentials-admission-webhook
|
||||
// @description This prototype creates a admission controller which injects credentials into pods
|
||||
// @shortDescription This prototype creates a admission controller which injects credentials into pods
|
||||
// @param name string Name to give to each of the components
|
||||
// @optionalParam image string gcr.io/kubeflow-images-public/gcp-admission-webhook:v20190401-v0.4.0-rc.1-309-g4014fa2e-dirty-be6212 The image for the webhook.
|
||||
// @optionalParam webhookSetupImage string gcr.io/kubeflow-images-public/ingress-setup:latest The image for setting up ingress.
|
||||
|
||||
local webhook = import "kubeflow/gcp/webhook.libsonnet";
|
||||
local instance = webhook.new(env, params);
|
||||
instance.list(instance.all)
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
#
|
||||
# A simple shell script to configure the backend timeouts and health checks by using gcloud.
|
||||
[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1
|
||||
[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1
|
||||
[ -z ${INGRESS_NAME} ] && echo Error INGRESS_NAME must be set && exit 1
|
||||
|
||||
PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id)
|
||||
if [ -z ${PROJECT} ]; then
|
||||
echo Error unable to fetch PROJECT from compute metadata
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PROJECT_NUM=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/numeric-project-id)
|
||||
if [ -z ${PROJECT_NUM} ]; then
|
||||
echo Error unable to fetch PROJECT_NUM from compute metadata
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Activate the service account
|
||||
gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS}
|
||||
# Print out the config for debugging
|
||||
gcloud config list
|
||||
|
||||
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
echo "node port is ${NODE_PORT}"
|
||||
|
||||
while [[ -z ${BACKEND_NAME} ]]; do
|
||||
BACKENDS=$(kubectl --namespace=${NAMESPACE} get ingress ${INGRESS_NAME} -o jsonpath='{.metadata.annotations.ingress\.kubernetes\.io/backends}')
|
||||
echo "fetching backends info with ${INGRESS_NAME}: ${BACKENDS}"
|
||||
BACKEND_NAME=$(echo $BACKENDS | grep -o "k8s-be-${NODE_PORT}--[0-9a-z]\+")
|
||||
echo "backend name is ${BACKEND_NAME}"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
while [[ -z ${BACKEND_ID} ]]; do
|
||||
BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~${BACKEND_NAME} --format='value(id)')
|
||||
echo "Waiting for backend id PROJECT=${PROJECT} NAMESPACE=${NAMESPACE} SERVICE=${SERVICE} filter=name~${BACKEND_NAME}"
|
||||
sleep 2
|
||||
done
|
||||
echo BACKEND_ID=${BACKEND_ID}
|
||||
|
||||
JWT_AUDIENCE="/projects/${PROJECT_NUM}/global/backendServices/${BACKEND_ID}"
|
||||
|
||||
# For healthcheck compare.
|
||||
mkdir -p /var/shared
|
||||
echo "JWT_AUDIENCE=${JWT_AUDIENCE}" > /var/shared/healthz.env
|
||||
echo "NODE_PORT=${NODE_PORT}" >> /var/shared/healthz.env
|
||||
echo "BACKEND_ID=${BACKEND_ID}" >> /var/shared/healthz.env
|
||||
|
||||
if [[ -z ${USE_ISTIO} ]]; then
|
||||
# TODO(https://github.com/kubeflow/kubeflow/issues/942): We should publish the modified envoy
|
||||
# config as a config map and use that in the envoy sidecars.
|
||||
kubectl get configmap -n ${NAMESPACE} envoy-config -o jsonpath='{.data.envoy-config\.json}' |
|
||||
sed -e "s|{{JWT_AUDIENCE}}|${JWT_AUDIENCE}|g" > /var/shared/envoy-config.json
|
||||
else
|
||||
# Apply the jwt validation policy
|
||||
cat /var/envoy-config/jwt-policy-template.yaml | sed -e "s|{{JWT_AUDIENCE}}|${JWT_AUDIENCE}|g" > /var/shared/jwt-policy.yaml
|
||||
kubectl apply -f /var/shared/jwt-policy.yaml
|
||||
fi
|
||||
|
||||
echo "Clearing lock on service annotation"
|
||||
kubectl patch svc "${SERVICE}" -p "{\"metadata\": { \"annotations\": {\"backendlock\": \"\" }}}"
|
||||
|
||||
checkBackend() {
|
||||
# created by init container.
|
||||
. /var/shared/healthz.env
|
||||
|
||||
# If node port or backend id change, so does the JWT audience.
|
||||
CURR_NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
read -ra toks <<<"$(gcloud compute --project=${PROJECT} backend-services list --filter=name~k8s-be-${CURR_NODE_PORT}- --format='value(id,timeoutSec)')"
|
||||
CURR_BACKEND_ID="${toks[0]}"
|
||||
CURR_BACKEND_TIMEOUT="${toks[1]}"
|
||||
[[ "$BACKEND_ID" == "$CURR_BACKEND_ID" && "${CURR_BACKEND_TIMEOUT}" -eq 3600 ]]
|
||||
}
|
||||
|
||||
# Verify configuration every 10 seconds.
|
||||
while true; do
|
||||
if ! checkBackend; then
|
||||
echo "$(date) WARN: Backend check failed, restarting container."
|
||||
exit 1
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
|
|
@ -1,219 +0,0 @@
|
|||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
local certManager = import "kubeflow/gcp/cert-manager.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "cert-manager",
|
||||
acmeEmail: "joe@acme.com",
|
||||
acmeUrl: "https://acme-v02.api.letsencrypt.org/directory",
|
||||
certManagerImage: "quay.io/jetstack/cert-manager-controller:v0.4.0",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kf-001",
|
||||
};
|
||||
|
||||
local instance = certManager.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.certificateCRD,
|
||||
expected: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "certificates.certmanager.k8s.io",
|
||||
},
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
names: {
|
||||
kind: "Certificate",
|
||||
plural: "certificates",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
version: "v1alpha1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.clusterIssuerCRD,
|
||||
expected: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "clusterissuers.certmanager.k8s.io",
|
||||
},
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
names: {
|
||||
kind: "ClusterIssuer",
|
||||
plural: "clusterissuers",
|
||||
},
|
||||
scope: "Cluster",
|
||||
version: "v1alpha1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.issuerCRD,
|
||||
expected: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "issuers.certmanager.k8s.io",
|
||||
},
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
names: {
|
||||
kind: "Issuer",
|
||||
plural: "issuers",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
version: "v1alpha1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.serviceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.clusterRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"certmanager.k8s.io",
|
||||
],
|
||||
resources: [
|
||||
"certificates",
|
||||
"issuers",
|
||||
"clusterissuers",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"secrets",
|
||||
"events",
|
||||
"endpoints",
|
||||
"services",
|
||||
"pods",
|
||||
"configmaps",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"extensions",
|
||||
],
|
||||
resources: [
|
||||
"ingresses",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.clusterRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "cert-manager",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "cert-manager",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.deploy,
|
||||
expected: {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "cert-manager",
|
||||
},
|
||||
name: "cert-manager",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "cert-manager",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"--cluster-resource-namespace=kf-001",
|
||||
"--leader-election-namespace=kf-001",
|
||||
],
|
||||
image: "quay.io/jetstack/cert-manager-controller:v0.4.0",
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "cert-manager",
|
||||
},
|
||||
],
|
||||
serviceAccountName: "cert-manager",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.issuerLEProd,
|
||||
expected: {
|
||||
apiVersion: "certmanager.k8s.io/v1alpha1",
|
||||
kind: "ClusterIssuer",
|
||||
metadata: {
|
||||
name: "letsencrypt-prod",
|
||||
},
|
||||
spec: {
|
||||
acme: {
|
||||
email: "joe@acme.com",
|
||||
http01: {},
|
||||
privateKeySecretRef: {
|
||||
name: "letsencrypt-prod-secret",
|
||||
},
|
||||
server: "https://acme-v02.api.letsencrypt.org/directory",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,232 +0,0 @@
|
|||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
local cloudEndpoints = import "kubeflow/gcp/cloud-endpoints.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "cloud-endpoints",
|
||||
secretName: "admin-gcp-sa",
|
||||
secretKey: "admin-gcp-sa.json",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kf-001",
|
||||
};
|
||||
|
||||
local instance = cloudEndpoints.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.endpointsCRD,
|
||||
expected: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "cloudendpoints.ctl.isla.solutions",
|
||||
},
|
||||
spec: {
|
||||
group: "ctl.isla.solutions",
|
||||
names: {
|
||||
kind: "CloudEndpoint",
|
||||
plural: "cloudendpoints",
|
||||
shortNames: [
|
||||
"cloudep",
|
||||
"ce",
|
||||
],
|
||||
singular: "cloudendpoint",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
version: "v1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.endpointsClusterRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"services",
|
||||
"configmaps",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"extensions",
|
||||
],
|
||||
resources: [
|
||||
"ingresses",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.endpointsClusterRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "cloud-endpoints-controller",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.endpointsService,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "http",
|
||||
port: 80,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "cloud-endpoints-controller",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.endpointsServiceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.endpointsDeploy,
|
||||
expected: {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "cloud-endpoints-controller",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||
value: "/var/run/secrets/sa/admin-gcp-sa.json",
|
||||
},
|
||||
],
|
||||
image: "gcr.io/cloud-solutions-group/cloud-endpoints-controller:0.2.1",
|
||||
imagePullPolicy: "Always",
|
||||
name: "cloud-endpoints-controller",
|
||||
readinessProbe: {
|
||||
failureThreshold: 2,
|
||||
httpGet: {
|
||||
path: "/healthz",
|
||||
port: 80,
|
||||
scheme: "HTTP",
|
||||
},
|
||||
periodSeconds: 5,
|
||||
successThreshold: 1,
|
||||
timeoutSeconds: 5,
|
||||
},
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/var/run/secrets/sa",
|
||||
name: "sa-key",
|
||||
readOnly: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "cloud-endpoints-controller",
|
||||
terminationGracePeriodSeconds: 5,
|
||||
volumes: [
|
||||
{
|
||||
name: "sa-key",
|
||||
secret: {
|
||||
secretName: "admin-gcp-sa",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.endpointsCompositeController,
|
||||
expected: {
|
||||
apiVersion: "metacontroller.k8s.io/v1alpha1",
|
||||
kind: "CompositeController",
|
||||
metadata: {
|
||||
name: "cloud-endpoints-controller",
|
||||
},
|
||||
spec: {
|
||||
childResources: [],
|
||||
clientConfig: {
|
||||
service: {
|
||||
caBundle: "...",
|
||||
name: "cloud-endpoints-controller",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
},
|
||||
generateSelector: true,
|
||||
resyncPeriodSeconds: 2,
|
||||
hooks: {
|
||||
sync: {
|
||||
webhook: {
|
||||
url: "http://cloud-endpoints-controller.kf-001/sync",
|
||||
},
|
||||
},
|
||||
},
|
||||
parentResource: {
|
||||
apiVersion: "ctl.isla.solutions/v1",
|
||||
resource: "cloudendpoints",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,110 +0,0 @@
|
|||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
local googleCloudFilestorePv = import "kubeflow/gcp/google-cloud-filestore-pv.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "google-cloud-filestore-pv",
|
||||
storageCapacity: "1T",
|
||||
path: "/kubeflow",
|
||||
serverIP: "10.10.10.10",
|
||||
image: "gcr.io/kubeflow-images-public/ubuntu:18.04",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kf-001",
|
||||
};
|
||||
|
||||
local instance = googleCloudFilestorePv.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.persistentVolume,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "PersistentVolume",
|
||||
metadata: {
|
||||
name: "google-cloud-filestore-pv",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
accessModes: [
|
||||
"ReadWriteMany",
|
||||
],
|
||||
capacity: {
|
||||
storage: "1T",
|
||||
},
|
||||
nfs: {
|
||||
path: "/kubeflow",
|
||||
server: "10.10.10.10",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.persistentVolumeClaim,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "PersistentVolumeClaim",
|
||||
metadata: {
|
||||
name: "google-cloud-filestore-pv",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
accessModes: [
|
||||
"ReadWriteMany",
|
||||
],
|
||||
resources: {
|
||||
requests: {
|
||||
storage: "1T",
|
||||
},
|
||||
},
|
||||
storageClassName: "nfs-storage",
|
||||
volumeName: "google-cloud-filestore-pv",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.gcfsPersmissions,
|
||||
expected: {
|
||||
apiVersion: "batch/v1",
|
||||
kind: "Job",
|
||||
metadata: {
|
||||
name: "set-gcfs-permissions",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
command: [
|
||||
"chmod",
|
||||
"777",
|
||||
"/kubeflow-gcfs",
|
||||
],
|
||||
image: "gcr.io/kubeflow-images-public/ubuntu:18.04",
|
||||
name: "set-gcfs-permissions",
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/kubeflow-gcfs",
|
||||
name: "google-cloud-filestore-pv",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
restartPolicy: "OnFailure",
|
||||
volumes: [
|
||||
{
|
||||
name: "google-cloud-filestore-pv",
|
||||
persistentVolumeClaim: {
|
||||
claimName: "google-cloud-filestore-pv",
|
||||
readOnly: false,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,275 +0,0 @@
|
|||
local iap = import "../iap.libsonnet";
|
||||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: iap.new(
|
||||
{ namespace: "namespace" },
|
||||
{
|
||||
envoyPort: 8080,
|
||||
useIstio: "false",
|
||||
espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
}
|
||||
).service,
|
||||
expected: {
|
||||
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "envoy",
|
||||
},
|
||||
annotations: {
|
||||
"beta.cloud.google.com/backend-config": '{"ports": {"envoy":"iap-backendconfig"}}',
|
||||
},
|
||||
name: "envoy",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "envoy",
|
||||
port: 8080,
|
||||
targetPort: 8080,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "envoy",
|
||||
},
|
||||
type: "NodePort",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: iap.new(
|
||||
{ namespace: "namespace" },
|
||||
{
|
||||
envoyPort: 8080,
|
||||
ipName: "ipName",
|
||||
hostname: "hostname",
|
||||
issuer: "issuer",
|
||||
useIstio: "false",
|
||||
espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
}
|
||||
).ingress,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Ingress",
|
||||
metadata: {
|
||||
name: "envoy-ingress",
|
||||
namespace: "namespace",
|
||||
annotations: {
|
||||
"kubernetes.io/tls-acme": "true",
|
||||
"ingress.kubernetes.io/ssl-redirect": "true",
|
||||
"kubernetes.io/ingress.global-static-ip-name": "ipName",
|
||||
"certmanager.k8s.io/issuer": "issuer",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
rules: [
|
||||
{
|
||||
host: "hostname",
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
backend: {
|
||||
serviceName: "envoy",
|
||||
servicePort: 8080,
|
||||
},
|
||||
path: "/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: iap.new(
|
||||
{
|
||||
namespace: "namespace",
|
||||
},
|
||||
{
|
||||
envoyPort: 8080,
|
||||
ipName: "ipName",
|
||||
hostname: "null",
|
||||
issuer: "issuer",
|
||||
useIstio: "false",
|
||||
espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
}
|
||||
).ingress,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Ingress",
|
||||
metadata: {
|
||||
name: "envoy-ingress",
|
||||
namespace: "namespace",
|
||||
annotations: {
|
||||
"kubernetes.io/tls-acme": "true",
|
||||
"ingress.kubernetes.io/ssl-redirect": "true",
|
||||
"kubernetes.io/ingress.global-static-ip-name": "ipName",
|
||||
"certmanager.k8s.io/issuer": "issuer",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
rules: [
|
||||
{
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
backend: {
|
||||
serviceName: "envoy",
|
||||
servicePort: 8080,
|
||||
},
|
||||
path: "/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: iap.new(
|
||||
{
|
||||
namespace: "namespace",
|
||||
},
|
||||
{
|
||||
secretName: "secretName",
|
||||
hostname: "hostname",
|
||||
issuer: "issuer",
|
||||
privateGKECluster: "false",
|
||||
useIstio: "false",
|
||||
espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
}
|
||||
).certificate,
|
||||
expected: {
|
||||
apiVersion: "certmanager.k8s.io/v1alpha1",
|
||||
kind: "Certificate",
|
||||
metadata: {
|
||||
name: "secretName",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
secretName: "secretName",
|
||||
issuerRef: {
|
||||
name: "issuer",
|
||||
kind: "ClusterIssuer",
|
||||
},
|
||||
commonName: "hostname",
|
||||
dnsNames: [
|
||||
"hostname",
|
||||
],
|
||||
acme: {
|
||||
config: [
|
||||
{
|
||||
http01: {
|
||||
ingress: "envoy-ingress",
|
||||
},
|
||||
domains: [
|
||||
"hostname",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: iap.new(
|
||||
{
|
||||
namespace: "namespace",
|
||||
},
|
||||
{
|
||||
useIstio: "false",
|
||||
espSampleAppImage: "cloud-solutions-group/esp-sample-app:5.0.0",
|
||||
}
|
||||
).whoamiApp,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "whoami-app",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "PORT",
|
||||
value: "8081",
|
||||
},
|
||||
],
|
||||
image: "cloud-solutions-group/esp-sample-app:5.0.0",
|
||||
name: "app",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8081,
|
||||
},
|
||||
],
|
||||
readinessProbe: {
|
||||
failureThreshold: 2,
|
||||
httpGet: {
|
||||
path: "/healthz",
|
||||
port: 8081,
|
||||
scheme: "HTTP",
|
||||
},
|
||||
periodSeconds: 10,
|
||||
successThreshold: 1,
|
||||
timeoutSeconds: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: iap.new(
|
||||
{
|
||||
namespace: "namespace",
|
||||
},
|
||||
{
|
||||
useIstio: "false",
|
||||
espSampleAppImage: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
}
|
||||
).whoamiService,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
name: "whoami-app",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8081,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "whoami",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
|
|
@ -1,196 +0,0 @@
|
|||
local testSuite = import "kubeflow/common/testsuite.libsonnet";
|
||||
local metricCollector = import "kubeflow/gcp/metric-collector.libsonnet";
|
||||
|
||||
local params = {
|
||||
name: "metric-collector",
|
||||
targetUrl: "https://foo.com",
|
||||
metricImage: "gcr.io/kubeflow-images-public/metric-collector:latest",
|
||||
oauthSecretName: "bar",
|
||||
};
|
||||
local env = {
|
||||
namespace: "kf-001",
|
||||
};
|
||||
|
||||
local instance = metricCollector.new(env, params);
|
||||
|
||||
local testCases = [
|
||||
{
|
||||
actual: instance.parts.metricServiceAccount,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.metricRole,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"services",
|
||||
"events",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.metricRoleBinding,
|
||||
expected: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "metric-collector",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "metric-collector",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.service,
|
||||
expected: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
annotations: {
|
||||
"prometheus.io/path": "/",
|
||||
"prometheus.io/port": "8000",
|
||||
"prometheus.io/scrape": "true",
|
||||
},
|
||||
labels: {
|
||||
service: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "metric-collector",
|
||||
port: 8000,
|
||||
protocol: "TCP",
|
||||
targetPort: 8000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
actual: instance.parts.deploy,
|
||||
expected: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
name: "metric-collector",
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "metric-collector",
|
||||
},
|
||||
namespace: "kf-001",
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"--url=https://foo.com",
|
||||
"--client_id=$(CLIENT_ID)",
|
||||
],
|
||||
command: [
|
||||
"python3",
|
||||
"/opt/kubeflow-readiness.py",
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "GOOGLE_APPLICATION_CREDENTIALS",
|
||||
value: "/var/run/secrets/sa/admin-gcp-sa.json",
|
||||
},
|
||||
{
|
||||
name: "CLIENT_ID",
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
key: "client_id",
|
||||
name: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
image: "gcr.io/kubeflow-images-public/metric-collector:latest",
|
||||
name: "exporter",
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/var/run/secrets/sa",
|
||||
name: "sa-key",
|
||||
readOnly: true,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
restartPolicy: "Always",
|
||||
serviceAccountName: "metric-collector",
|
||||
volumes: [
|
||||
{
|
||||
name: "sa-key",
|
||||
secret: {
|
||||
secretName: "admin-gcp-sa",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
testSuite.run(testCases)
|
||||
File diff suppressed because one or more lines are too long
|
|
@ -1,51 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# A simple shell script to configure the backend timeouts and health checks by using gcloud.
|
||||
|
||||
[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1
|
||||
[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1
|
||||
|
||||
PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id)
|
||||
if [ -z ${PROJECT} ]; then
|
||||
echo Error unable to fetch PROJECT from compute metadata
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Activate the service account, allow 5 retries
|
||||
for i in {1..5}; do gcloud auth activate-service-account --key-file=${GOOGLE_APPLICATION_CREDENTIALS} && break || sleep 10; done
|
||||
|
||||
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
while [[ -z ${BACKEND_SERVICE} ]];
|
||||
do BACKEND_SERVICE=$(gcloud --project=${PROJECT} compute backend-services list --filter=name~k8s-be-${NODE_PORT}- --uri);
|
||||
echo "Waiting for the backend-services resource PROJECT=${PROJECT} NODEPORT=${NODE_PORT} SERVICE=${SERVICE}...";
|
||||
sleep 2;
|
||||
done
|
||||
|
||||
while [[ -z ${HEALTH_CHECK_URI} ]];
|
||||
do HEALTH_CHECK_URI=$(gcloud compute --project=${PROJECT} health-checks list --filter=name~k8s-be-${NODE_PORT}- --uri);
|
||||
echo "Waiting for the healthcheck resource PROJECT=${PROJECT} NODEPORT=${NODE_PORT} SERVICE=${SERVICE}...";
|
||||
sleep 2;
|
||||
done
|
||||
|
||||
# Since we create the envoy-ingress ingress object before creating the envoy
|
||||
# deployment object, healthcheck will not be configured correctly in the GCP
|
||||
# load balancer. It will default the healthcheck request path to a value of
|
||||
# / instead of the intended /healthz.
|
||||
# Manually update the healthcheck request path to /healthz
|
||||
if [[ ${HEALTHCHECK_PATH} ]]; then
|
||||
gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --request-path=${HEALTHCHECK_PATH}
|
||||
else
|
||||
gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --request-path=/healthz
|
||||
fi
|
||||
|
||||
if [[ ${USE_ISTIO} ]]; then
|
||||
# Create the route so healthcheck can pass
|
||||
kubectl apply -f /var/envoy-config/healthcheck_route.yaml
|
||||
fi
|
||||
|
||||
# Since JupyterHub uses websockets we want to increase the backend timeout
|
||||
echo Increasing backend timeout for JupyterHub
|
||||
gcloud --project=${PROJECT} compute backend-services update --global ${BACKEND_SERVICE} --timeout=3600
|
||||
|
||||
echo "Backend updated successfully. Waiting 1 hour before updating again."
|
||||
sleep 3600
|
||||
|
|
@ -1,243 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env {
|
||||
hostname: if std.objectHas(_params, "hostname") then _params.hostname else "null",
|
||||
},
|
||||
local namespace = params.namespace,
|
||||
|
||||
local deployment = {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "gcp-cred-webhook",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "gcp-cred-webhook"
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
name: "gcp-cred-webhook",
|
||||
image: params.image,
|
||||
volumeMounts: [{
|
||||
name: "webhook-cert",
|
||||
mountPath: "/etc/webhook/certs",
|
||||
readOnly: true,
|
||||
}],
|
||||
},
|
||||
],
|
||||
volumes: [
|
||||
{
|
||||
name: "webhook-cert",
|
||||
secret: {
|
||||
secretName: "gcp-cred-webhook-certs",
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deployment
|
||||
deployment:: deployment,
|
||||
|
||||
local service = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "gcp-cred-webhook",
|
||||
},
|
||||
name: "gcp-cred-webhook",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
app: "gcp-cred-webhook",
|
||||
},
|
||||
ports: [
|
||||
{
|
||||
port: 443,
|
||||
targetPort: 443,
|
||||
},
|
||||
],
|
||||
},
|
||||
}, // service
|
||||
service:: service,
|
||||
|
||||
local webhookConfig = {
|
||||
apiVersion: "admissionregistration.k8s.io/v1beta1",
|
||||
kind: "MutatingWebhookConfiguration",
|
||||
metadata: {
|
||||
name: "gcp-cred-webhook",
|
||||
// This is cluster scope.
|
||||
},
|
||||
webhooks: [
|
||||
{
|
||||
// name has to be fully qualified X.X.X
|
||||
name: "gcp-cred-webhook.kubeflow.org",
|
||||
clientConfig: {
|
||||
service: {
|
||||
name: "gcp-cred-webhook",
|
||||
namespace: namespace,
|
||||
path: "/add-cred"
|
||||
},
|
||||
// To be patched.
|
||||
caBundle: "",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
operations: ["CREATE"],
|
||||
apiGroups: [""],
|
||||
apiVersions: ["v1"],
|
||||
resources: ["pods"],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // webhookConfig
|
||||
webhookConfig:: webhookConfig,
|
||||
|
||||
local webhookBootstrapJob = {
|
||||
apiVersion: "apps/v1",
|
||||
kind: "StatefulSet",
|
||||
metadata: {
|
||||
name: "webhook-bootstrap",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
service: "webhook-bootstrap",
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "webhook-bootstrap",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
restartPolicy: "Always",
|
||||
serviceAccountName: "webhook-bootstrap",
|
||||
containers: [
|
||||
{
|
||||
name: "bootstrap",
|
||||
image: params.webhookSetupImage,
|
||||
command: [
|
||||
"sh",
|
||||
"/var/webhook-config/create_ca.sh",
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "NAMESPACE",
|
||||
value: namespace,
|
||||
},
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/var/webhook-config/",
|
||||
name: "webhook-config",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "webhook-bootstrap-config",
|
||||
},
|
||||
name: "webhook-config",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // webhookBootstrapJob
|
||||
webhookBootstrapJob:: webhookBootstrapJob,
|
||||
|
||||
local initServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "webhook-bootstrap",
|
||||
namespace: namespace,
|
||||
},
|
||||
}, // initServiceAccount
|
||||
initServiceAccount:: initServiceAccount,
|
||||
|
||||
local initClusterRoleBinding = {
|
||||
kind: "ClusterRoleBinding",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "webhook-bootstrap",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "webhook-bootstrap",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
roleRef: {
|
||||
kind: "ClusterRole",
|
||||
name: "webhook-bootstrap",
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}, // initClusterRoleBinding
|
||||
initClusterRoleBinding:: initClusterRoleBinding,
|
||||
|
||||
local initClusterRole = {
|
||||
kind: "ClusterRole",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "webhook-bootstrap",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: ["admissionregistration.k8s.io"],
|
||||
resources: ["mutatingwebhookconfigurations"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["secrets"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
],
|
||||
}, // initClusterRoleBinding
|
||||
initClusterRole:: initClusterRole,
|
||||
|
||||
local webhookConfigmap = {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "webhook-bootstrap-config",
|
||||
namespace: namespace,
|
||||
},
|
||||
data: {
|
||||
"create_ca.sh": importstr "create_ca.sh",
|
||||
}
|
||||
}, // webhookConfigmap
|
||||
webhookConfigmap:: webhookConfigmap,
|
||||
|
||||
all:: [
|
||||
self.deployment,
|
||||
self.service,
|
||||
self.webhookBootstrapJob,
|
||||
self.webhookConfigmap,
|
||||
self.webhookConfig,
|
||||
self.initServiceAccount,
|
||||
self.initClusterRole,
|
||||
self.initClusterRoleBinding,
|
||||
],
|
||||
|
||||
list(obj=self.all):: k.core.v1.list.new(obj,),
|
||||
}
|
||||
}
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
approvers:
|
||||
- ioandr
|
||||
- kkasravi
|
||||
- pdmack
|
||||
reviewers:
|
||||
- vkoukis
|
||||
|
|
@ -1,118 +0,0 @@
|
|||
# Configuration file for the default JupyterHub Spawner UI
|
||||
# Each key corresponds to a JupyterHub Spawner UI option
|
||||
# If a key is missing, the respective Spawner UI option will be left untouched
|
||||
#
|
||||
# Each Spawner UI option is configured by two keys: `value` and `readOnly`
|
||||
# - The `value` key contains the default value
|
||||
# - The `readOnly` key determines if the option will be available to users
|
||||
#
|
||||
# If the 'readOnly' key is present and set to 'true', the respective option
|
||||
# will be disabled for users and only set by the admin
|
||||
# If the 'readOnly' key is missing (defaults to 'false'), the respective option
|
||||
# will be available for users
|
||||
#
|
||||
# Please note that some values (e.g. {username}) may be templated
|
||||
# and expanded according to KubeSpawner's rules
|
||||
#
|
||||
# For more information regarding JupyterHub KubeSpawner and its configuration:
|
||||
# https://jupyterhub-kubespawner.readthedocs.io/en/latest/spawner.html
|
||||
|
||||
spawnerFormDefaults:
|
||||
image:
|
||||
# The container Image for the user's Jupyter Notebook
|
||||
# If readonly, this value must be a member of the list below
|
||||
value: gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-cpu:v0.5.0
|
||||
# The list of available standard container Images
|
||||
options:
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.5.1-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.5.1-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.6.0-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.6.0-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.7.0-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.7.0-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.8.0-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.8.0-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.9.0-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.9.0-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.10.1-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.11.0-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.11.0-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-1.13.1-notebook-gpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-2.0.0a-notebook-cpu:v0.5.0
|
||||
- gcr.io/kubeflow-images-public/tensorflow-2.0.0a-notebook-gpu:v0.5.0
|
||||
# By default, custom container Images are allowed
|
||||
# Uncomment the following line to only enable standard container Images
|
||||
readOnly: false
|
||||
cpu:
|
||||
# CPU for user's Notebook
|
||||
value: '0.5'
|
||||
# readOnly: true
|
||||
memory:
|
||||
# Memory for user's Notebook
|
||||
value: 1.0Gi
|
||||
workspaceVolume:
|
||||
# Workspace Volume to be attached to user's Notebook
|
||||
# Each Workspace Volume is declared with the following attributes:
|
||||
# Type, Name, Size, MountPath and Access Mode
|
||||
value:
|
||||
type:
|
||||
# The Type of the Workspace Volume
|
||||
# Supported values: 'New', 'Existing'
|
||||
value: New
|
||||
name:
|
||||
# The Name of the Workspace Volume
|
||||
# Note that this is a templated value
|
||||
# value: {username}-workspace
|
||||
value: {username}-workspace
|
||||
size:
|
||||
# The Size of the Workspace Volume (in Gi)
|
||||
value: '10'
|
||||
mountPath:
|
||||
# The Path that the Workspace Volume will be mounted
|
||||
readOnly: true
|
||||
value: /home/jovyan
|
||||
accessModes:
|
||||
# The Access Mode of the Workspace Volume
|
||||
# Supported values: 'ReadWriteOnce', 'ReadWriteMany', 'ReadOnlyMany'
|
||||
value: ReadWriteOnce
|
||||
dataVolumes:
|
||||
# List of additional Data Volumes to be attached to the user's Notebook
|
||||
value: []
|
||||
# Each Data Volume is declared with the following attributes:
|
||||
# Type, Name, Size, MountPath and Access Mode
|
||||
#
|
||||
# For example, a list with 2 Data Volumes:
|
||||
#value:
|
||||
# - value:
|
||||
# type:
|
||||
# value: New
|
||||
# name:
|
||||
# value: {username}-vol-1
|
||||
# size:
|
||||
# value: '10'
|
||||
# mountPath:
|
||||
# value: /home/jovyan/{username}-vol-1
|
||||
# accessModes:
|
||||
# value: ReadWriteOnce
|
||||
# - value:
|
||||
# type:
|
||||
# value: New
|
||||
# name:
|
||||
# value: {username}-vol-2
|
||||
# size:
|
||||
# value: '5'
|
||||
# mountPath:
|
||||
# value: /home/jovyan/{username}-vol-2
|
||||
# accessModes:
|
||||
# value: ReadWriteOnce
|
||||
#
|
||||
# Uncomment the following line to make the Data Volumes list readonly
|
||||
#readOnly: true
|
||||
extraResources:
|
||||
# Extra Resource Limits for user's Notebook
|
||||
# Note that braces are escaped
|
||||
value: "{{}}"
|
||||
|
|
@ -1,296 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
|
||||
new(_env, _params):: {
|
||||
local params = _env + _params,
|
||||
|
||||
local defaultSpawnerData = {
|
||||
// Default JH Spawner UI files
|
||||
"spawner_ui_config.yaml": importstr "./config.yaml",
|
||||
},
|
||||
|
||||
local jupyterConfig = {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: params.name + "-config",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
data: defaultSpawnerData,
|
||||
},
|
||||
jupyterConfig:: jupyterConfig,
|
||||
|
||||
serviceAccount:: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: params.name,
|
||||
},
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
|
||||
clusterRole:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: params.name + "-cluster-role",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["namespaces"],
|
||||
verbs: ["get", "list", "create", "delete"]
|
||||
},
|
||||
{
|
||||
apiGroups: ["kubeflow.org"],
|
||||
resources: ["notebooks"],
|
||||
verbs: ["get", "list", "create", "delete"],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["persistentvolumeclaims"],
|
||||
verbs: ["create", "delete", "get", "list"],
|
||||
},
|
||||
{
|
||||
apiGroups: ["storage.k8s.io"],
|
||||
resources: ["storageclasses"],
|
||||
verbs: ["get", "list"],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["secrets"],
|
||||
verbs: ["get", "list"],
|
||||
},
|
||||
]
|
||||
},
|
||||
|
||||
clusterRoleBinding:: {
|
||||
kind: "ClusterRoleBinding",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1",
|
||||
metadata: {
|
||||
name: params.name + "-binding"
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: params.name,
|
||||
namespace: params.namespace
|
||||
},
|
||||
],
|
||||
roleRef: {
|
||||
kind: "ClusterRole",
|
||||
name: params.name + "-cluster-role",
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
},
|
||||
|
||||
notebookRole:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
name: "jupyter-notebook-role",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/log",
|
||||
"secrets",
|
||||
"services",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
"apps",
|
||||
"extensions",
|
||||
],
|
||||
resources: [
|
||||
"deployments",
|
||||
"replicasets",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"kubeflow.org",
|
||||
],
|
||||
resources: [
|
||||
"*",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"batch",
|
||||
],
|
||||
resources: [
|
||||
"jobs",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
notebookServiceAccount:: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "jupyter-notebook",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
|
||||
notebookRoleBinding:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
name: "jupyter-notebook-role-binding",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: "jupyter-notebook-role",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "jupyter-notebook",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
svc:: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
labels: {
|
||||
run: params.name
|
||||
},
|
||||
annotations:{
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: webapp_mapping",
|
||||
"prefix: /" + params.prefix + "/",
|
||||
"service: " + params.name + "." + params.namespace,
|
||||
"add_request_headers: ",
|
||||
" x-forwarded-prefix: /" + params.prefix
|
||||
]),
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
type: "ClusterIP",
|
||||
ports: [{
|
||||
port: 80,
|
||||
targetPort: 5000,
|
||||
protocol: "TCP",
|
||||
name: "http",
|
||||
}],
|
||||
selector: {
|
||||
app: params.name
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
depl :: {
|
||||
apiVersion: "apps/v1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: params.name,
|
||||
namespace: params.namespace,
|
||||
labels: {
|
||||
app: params.name,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: params.name,
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata:{
|
||||
labels: {
|
||||
app: params.name,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
serviceAccountName: params.name,
|
||||
containers: [{
|
||||
name: params.name,
|
||||
image: params.image,
|
||||
env: std.prune([
|
||||
{
|
||||
name: "ROK_SECRET_NAME",
|
||||
value: params.rokSecretName,
|
||||
},
|
||||
{
|
||||
name: "UI",
|
||||
value: params.ui,
|
||||
},
|
||||
]),
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/etc/config",
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
ports: [{
|
||||
containerPort: 5000,
|
||||
}],
|
||||
imagePullPolicy: params.policy,
|
||||
}],
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: params.name + "-config",
|
||||
},
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.svc,
|
||||
self.depl,
|
||||
self.jupyterConfig,
|
||||
self.serviceAccount,
|
||||
self.clusterRoleBinding,
|
||||
self.clusterRole,
|
||||
self.notebookServiceAccount,
|
||||
self.notebookRole,
|
||||
self.notebookRoleBinding,
|
||||
],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,423 +0,0 @@
|
|||
{
|
||||
local k = import "k.libsonnet",
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
new(_env, _params):: {
|
||||
local params = _params + _env,
|
||||
|
||||
local defaultSpawnerData = {
|
||||
// Default JH Spawner UI files
|
||||
"template.html": importstr "ui/default/template.html",
|
||||
"script.js": importstr "ui/default/script.js",
|
||||
"style.css": importstr "ui/default/style.css",
|
||||
"spawner.py": std.strReplace(importstr "ui/default/spawner.py", "\\\n", ""),
|
||||
"spawner_ui_config.yaml": importstr "ui/default/config.yaml",
|
||||
},
|
||||
|
||||
local rokSpawnerData = {
|
||||
// Base files that Rok UI extends or overrides
|
||||
"default_template.html": importstr "ui/default/template.html",
|
||||
"default_style.css": importstr "ui/default/style.css",
|
||||
"default_spawner.py": importstr "ui/default/spawner.py",
|
||||
|
||||
// Rok UI files
|
||||
"template.html": importstr "ui/rok/template.html",
|
||||
"script.js": importstr "ui/rok/script.js",
|
||||
"style.css": importstr "ui/rok/style.css",
|
||||
"spawner.py": std.strReplace(importstr "ui/rok/spawner.py", "\\\n", ""),
|
||||
"spawner_ui_config.yaml": importstr "ui/rok/config.yaml",
|
||||
},
|
||||
|
||||
local kubeSpawnerConfig = {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "jupyter-config",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
// JH config file
|
||||
local config = {
|
||||
"jupyter_config.py": std.strReplace(importstr "jupyter_config.py", "\\\n", ""),
|
||||
},
|
||||
data: config +
|
||||
if params.ui == "rok" then rokSpawnerData
|
||||
else if params.ui == "default" then defaultSpawnerData,
|
||||
},
|
||||
kubeSpawnerConfig:: kubeSpawnerConfig,
|
||||
|
||||
local notebookService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "jupyter",
|
||||
},
|
||||
name: "jupyter-0",
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"prometheus.io/scrape": "true",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
// We want a headless service so we set the ClusterIP to be None.
|
||||
// This headless server is used by individual Jupyter pods to connect back to the Hub.
|
||||
clusterIP: "None",
|
||||
ports: [
|
||||
{
|
||||
name: "hub",
|
||||
port: 8000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "jupyter",
|
||||
},
|
||||
},
|
||||
},
|
||||
notebookService:: notebookService,
|
||||
|
||||
local hubService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "jupyter-lb",
|
||||
},
|
||||
name: "jupyter-lb",
|
||||
namespace: params.namespace,
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: jupyter-lb-hub-mapping",
|
||||
"prefix: /hub/",
|
||||
"rewrite: /hub/",
|
||||
"timeout_ms: 300000",
|
||||
"service: jupyter-lb." + params.namespace,
|
||||
"use_websocket: true",
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: jupyter-lb-user-mapping",
|
||||
"prefix: /user/",
|
||||
"rewrite: /user/",
|
||||
"timeout_ms: 300000",
|
||||
"service: jupyter-lb." + params.namespace,
|
||||
"use_websocket: true",
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "hub",
|
||||
port: 80,
|
||||
targetPort: 8000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "jupyter",
|
||||
},
|
||||
type: params.serviceType,
|
||||
},
|
||||
},
|
||||
hubService:: hubService,
|
||||
|
||||
local hubStatefulSet = {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "StatefulSet",
|
||||
metadata: {
|
||||
name: "jupyter",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
serviceName: "",
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "jupyter",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
command: [
|
||||
"jupyterhub",
|
||||
"-f",
|
||||
"/etc/config/jupyter_config.py",
|
||||
],
|
||||
image: params.image,
|
||||
name: "jupyter",
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/etc/config",
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
ports: [
|
||||
// Port 8000 is used by the hub to accept incoming requests.
|
||||
{
|
||||
containerPort: 8000,
|
||||
},
|
||||
// Port 8081 accepts callbacks from the individual Jupyter pods.
|
||||
{
|
||||
containerPort: 8081,
|
||||
},
|
||||
],
|
||||
env: std.prune([
|
||||
{
|
||||
name: "KF_AUTHENTICATOR",
|
||||
value: params.jupyterHubAuthenticator,
|
||||
},
|
||||
{
|
||||
name: "DEFAULT_JUPYTERLAB",
|
||||
value: params.useJupyterLabAsDefault,
|
||||
},
|
||||
{
|
||||
name: "STORAGE_CLASS",
|
||||
value: params.storageClass,
|
||||
},
|
||||
{
|
||||
name: "ROK_SECRET_NAME",
|
||||
value: params.rokSecretName,
|
||||
},
|
||||
if params.platform == "gke" then
|
||||
{
|
||||
name: "GCP_SECRET_NAME",
|
||||
value: params.gcpSecretName,
|
||||
},
|
||||
if params.platform == "minikube" && std.toString(params.notebookUid) != "-1" then
|
||||
{
|
||||
name: "NOTEBOOK_UID",
|
||||
value: std.toString(params.notebookUid),
|
||||
},
|
||||
if params.platform == "minikube" && std.toString(params.notebookGid) != "-1" then
|
||||
{
|
||||
name: "NOTEBOOK_GID",
|
||||
value: std.toString(params.notebookGid),
|
||||
},
|
||||
if params.platform == "minikube" then
|
||||
{
|
||||
name: "ACCESS_LOCAL_FS",
|
||||
value: std.toString(params.accessLocalFs),
|
||||
},
|
||||
]),
|
||||
}, // jupyter container
|
||||
],
|
||||
serviceAccountName: "jupyter",
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "jupyter-config",
|
||||
},
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
updateStrategy: {
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
},
|
||||
},
|
||||
hubStatefulSet:: hubStatefulSet,
|
||||
|
||||
// contents based on https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/jupyterhub/templates/hub/rbac.yaml
|
||||
local hubRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
name: "jupyter-role",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"pods",
|
||||
"persistentvolumeclaims",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"watch",
|
||||
"list",
|
||||
"create",
|
||||
"delete",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"events",
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"watch",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
hubRole:: hubRole,
|
||||
|
||||
local notebookRole = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
name: "jupyter-notebook-role",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"pods",
|
||||
"pods/log",
|
||||
"secrets",
|
||||
"services",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
"apps",
|
||||
"extensions",
|
||||
],
|
||||
resources: [
|
||||
"deployments",
|
||||
"replicasets",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"kubeflow.org",
|
||||
],
|
||||
resources: [
|
||||
"*",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"batch",
|
||||
],
|
||||
resources: [
|
||||
"jobs",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
notebookRole:: notebookRole,
|
||||
|
||||
local hubServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "jupyter",
|
||||
},
|
||||
name: "jupyter",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
hubServiceAccount:: hubServiceAccount,
|
||||
|
||||
local notebookServiceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "jupyter-notebook",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
notebookServiceAccount:: notebookServiceAccount,
|
||||
|
||||
local hubRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
name: "jupyter-role",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: "jupyter-role",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "jupyter",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
hubRoleBinding:: hubRoleBinding,
|
||||
|
||||
local notebookRoleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
name: "jupyter-notebook-role",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: "jupyter-notebook-role",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "jupyter-notebook",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
notebookRoleBinding:: notebookRoleBinding,
|
||||
|
||||
local localstorage = (import "localstorage.libsonnet"),
|
||||
pv:: localstorage.pv,
|
||||
pvclaim:: localstorage.pvclaim,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.kubeSpawnerConfig,
|
||||
self.notebookService,
|
||||
self.hubStatefulSet,
|
||||
self.hubRole,
|
||||
self.notebookRole,
|
||||
self.hubService,
|
||||
self.hubServiceAccount,
|
||||
self.notebookServiceAccount,
|
||||
self.hubRoleBinding,
|
||||
self.notebookRoleBinding,
|
||||
] + std.flattenArrays([
|
||||
if params.accessLocalFs == "true" then [
|
||||
self.pv,
|
||||
self.pvclaim,
|
||||
] else [],
|
||||
]),
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
|
|
@ -1,136 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Configuration file for JupyterHub.
|
||||
|
||||
Kubeflow uses this file as the configuration file for JupyterHub. It contains
|
||||
all glue code necessary to integrate JupyterHub with the remaining Kubeflow
|
||||
components.
|
||||
|
||||
Note that this file is also responsible for importing the UI-specific Spawner
|
||||
class from <ui-dir>/spawner.py, and setting the `spawner_class` configuration
|
||||
option.
|
||||
"""
|
||||
|
||||
import os
|
||||
from importlib.util import spec_from_file_location, module_from_spec
|
||||
from jhub_remote_user_authenticator.remote_user_auth import \
|
||||
RemoteUserAuthenticator
|
||||
|
||||
SERVICE_ACCOUNT_SECRET_MOUNT = '/var/run/secrets/sa'
|
||||
|
||||
# Import the UI-specific Spawner
|
||||
spec = spec_from_file_location('spawner', '/etc/config/spawner.py')
|
||||
spawner = module_from_spec(spec)
|
||||
spec.loader.exec_module(spawner)
|
||||
|
||||
###################################################
|
||||
# JupyterHub Options
|
||||
###################################################
|
||||
c.JupyterHub.ip = '0.0.0.0'
|
||||
c.JupyterHub.hub_ip = '0.0.0.0'
|
||||
# Don't try to cleanup servers on exit - since in general for k8s, we want
|
||||
# the hub to be able to restart without losing user containers
|
||||
c.JupyterHub.cleanup_servers = False
|
||||
###################################################
|
||||
|
||||
###################################################
|
||||
# Spawner Options
|
||||
###################################################
|
||||
c.JupyterHub.spawner_class = spawner.KubeFormSpawner
|
||||
|
||||
c.KubeSpawner.cmd = 'start-singleuser.sh'
|
||||
c.KubeSpawner.args = ['--allow-root']
|
||||
# gpu images are very large ~15GB. need a large timeout.
|
||||
c.KubeSpawner.start_timeout = 60 * 30
|
||||
# Increase timeout to 5 minutes to avoid HTTP 500 errors on JupyterHub
|
||||
c.KubeSpawner.http_timeout = 60 * 5
|
||||
|
||||
# Volume setup
|
||||
c.KubeSpawner.singleuser_uid = 1000
|
||||
c.KubeSpawner.singleuser_fs_gid = 100
|
||||
c.KubeSpawner.singleuser_working_dir = '/home/jovyan'
|
||||
|
||||
# Allow environment vars to override uid and gid.
|
||||
# This allows local host path mounts to be read/writable
|
||||
env_uid = os.environ.get('NOTEBOOK_UID')
|
||||
if env_uid:
|
||||
c.KubeSpawner.singleuser_uid = int(env_uid)
|
||||
env_gid = os.environ.get('NOTEBOOK_GID')
|
||||
if env_gid:
|
||||
c.KubeSpawner.singleuser_fs_gid = int(env_gid)
|
||||
access_local_fs = os.environ.get('ACCESS_LOCAL_FS')
|
||||
if access_local_fs == 'true':
|
||||
|
||||
def modify_pod_hook(spawner, pod):
|
||||
pod.spec.containers[0].lifecycle = {
|
||||
'postStart': {
|
||||
'exec': {
|
||||
'command': [
|
||||
'ln', '-s', '/mnt/local-notebooks',
|
||||
'/home/jovyan/local-notebooks'
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
return pod
|
||||
|
||||
c.KubeSpawner.modify_pod_hook = modify_pod_hook
|
||||
|
||||
###################################################
|
||||
# Persistent volume options
|
||||
###################################################
|
||||
|
||||
# Set user_storage_pvc_ensure to False to prevent KubeSpawner from handling PVCs
|
||||
# We natively handle PVCs via KubeFormSpawner and its dedicated methods
|
||||
|
||||
# NOTE: user_storage_pvc_ensure has been deprecated in a future release
|
||||
c.KubeSpawner.storage_pvc_ensure = False
|
||||
c.KubeSpawner.user_storage_pvc_ensure = False
|
||||
|
||||
volumes = []
|
||||
volume_mounts = []
|
||||
|
||||
gcp_secret_name = os.environ.get('GCP_SECRET_NAME')
|
||||
if gcp_secret_name:
|
||||
volumes.append({
|
||||
'name': gcp_secret_name,
|
||||
'secret': {
|
||||
'secretName': gcp_secret_name,
|
||||
}
|
||||
})
|
||||
volume_mounts.append({
|
||||
'name': gcp_secret_name,
|
||||
'mountPath': SERVICE_ACCOUNT_SECRET_MOUNT
|
||||
})
|
||||
|
||||
c.KubeSpawner.volumes = volumes
|
||||
c.KubeSpawner.volume_mounts = volume_mounts
|
||||
|
||||
storage_class = None
|
||||
if os.environ.get('STORAGE_CLASS') != 'null':
|
||||
storage_class = os.environ.get('STORAGE_CLASS')
|
||||
|
||||
rok_secret_name = ''
|
||||
if os.environ.get('ROK_SECRET_NAME') != 'null':
|
||||
rok_secret_name = os.environ.get('ROK_SECRET_NAME')
|
||||
|
||||
# Set both service_account and singleuser_service_account because
|
||||
# singleuser_service_account has been deprecated in a future release
|
||||
c.KubeSpawner.service_account = 'jupyter-notebook'
|
||||
c.KubeSpawner.singleuser_service_account = 'jupyter-notebook'
|
||||
# Authenticator
|
||||
if os.environ.get('KF_AUTHENTICATOR') == 'iap':
|
||||
c.JupyterHub.authenticator_class = RemoteUserAuthenticator
|
||||
c.RemoteUserAuthenticator.header_name = 'x-goog-authenticated-user-email'
|
||||
else:
|
||||
c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'
|
||||
|
||||
if os.environ.get('DEFAULT_JUPYTERLAB').lower() == 'true':
|
||||
c.KubeSpawner.default_url = '/lab'
|
||||
|
||||
# Set extra spawner configuration variables
|
||||
c.KubeSpawner.extra_spawner_config = {
|
||||
'gcp_secret_name': gcp_secret_name,
|
||||
'storage_class': storage_class,
|
||||
'rok_secret_name': rok_secret_name,
|
||||
}
|
||||
|
|
@ -1,64 +0,0 @@
|
|||
{
|
||||
local pv = {
|
||||
kind: 'PersistentVolume',
|
||||
apiVersion: 'v1',
|
||||
metadata: {
|
||||
name: 'local-volume',
|
||||
labels: {
|
||||
type: 'local',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
persistentVolumeReclaimPolicy: 'Delete',
|
||||
storageClassName: 'local-storage',
|
||||
capacity: {
|
||||
storage: '10Gi',
|
||||
},
|
||||
accessModes: [
|
||||
'ReadWriteOnce',
|
||||
],
|
||||
'local': {
|
||||
path: '/mnt/local',
|
||||
},
|
||||
nodeAffinity: {
|
||||
required: {
|
||||
nodeSelectorTerms: [
|
||||
{
|
||||
matchExpressions: [
|
||||
{
|
||||
key: 'kubernetes.io/hostname',
|
||||
operator: 'In',
|
||||
values: [
|
||||
'minikube',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pv:: pv,
|
||||
|
||||
local pvclaim = {
|
||||
kind: 'PersistentVolumeClaim',
|
||||
apiVersion: 'v1',
|
||||
metadata: {
|
||||
name: 'local-notebooks',
|
||||
},
|
||||
spec: {
|
||||
storageClassName: 'local-storage',
|
||||
accessModes: [
|
||||
'ReadWriteOnce',
|
||||
],
|
||||
resources: {
|
||||
requests: {
|
||||
storage: '10Gi',
|
||||
},
|
||||
},
|
||||
volumeName: 'local-volume',
|
||||
},
|
||||
},
|
||||
pvclaim:: pvclaim,
|
||||
}
|
||||
|
|
@ -1,193 +0,0 @@
|
|||
{
|
||||
local util = import "kubeflow/common/util.libsonnet",
|
||||
|
||||
new(_env, _params):: {
|
||||
local params = _env + _params,
|
||||
|
||||
local notebooksCRD = {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "notebooks.kubeflow.org",
|
||||
},
|
||||
spec: {
|
||||
group: "kubeflow.org",
|
||||
version: "v1alpha1",
|
||||
scope: "Namespaced",
|
||||
subresources: {
|
||||
status: {},
|
||||
},
|
||||
names: {
|
||||
plural: "notebooks",
|
||||
singular: "notebook",
|
||||
kind: "Notebook",
|
||||
},
|
||||
},
|
||||
status: {
|
||||
acceptedNames: {
|
||||
kind: "",
|
||||
plural: "",
|
||||
},
|
||||
conditions: [],
|
||||
storedVersions: [],
|
||||
},
|
||||
},
|
||||
notebooksCRD:: notebooksCRD,
|
||||
|
||||
local controllerService = {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
name: "notebooks-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
app: "notebooks-controller",
|
||||
},
|
||||
ports: [
|
||||
{
|
||||
port: 443,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
controllerService:: controllerService,
|
||||
|
||||
local controllerDeployment = {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "notebooks-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
app: "notebooks-controller",
|
||||
},
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "notebooks-controller",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
serviceAccountName: "notebook-controller",
|
||||
containers: [
|
||||
{
|
||||
name: "manager",
|
||||
image: params.controllerImage,
|
||||
imagePullPolicy: "Always",
|
||||
command: [
|
||||
"/manager",
|
||||
],
|
||||
env: if util.toBool(params.injectGcpCredentials) then [
|
||||
{
|
||||
name: "POD_LABELS",
|
||||
value: "gcp-cred-secret=user-gcp-sa,gcp-cred-secret-filename=user-gcp-sa.json",
|
||||
},
|
||||
] else [],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
controllerDeployment:: controllerDeployment,
|
||||
|
||||
local serviceAccount = {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "notebook-controller",
|
||||
},
|
||||
name: "notebook-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
},
|
||||
serviceAccount:: serviceAccount,
|
||||
|
||||
local role = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "notebooks-controller",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"apps",
|
||||
],
|
||||
resources: [
|
||||
"statefulsets",
|
||||
"deployments",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"services",
|
||||
"pods",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"kubeflow.org",
|
||||
],
|
||||
resources: [
|
||||
"notebooks",
|
||||
"notebooks/status",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
role:: role,
|
||||
|
||||
local roleBinding = {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "notebooks-controller",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "notebooks-controller",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "notebook-controller",
|
||||
namespace: params.namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
roleBinding:: roleBinding,
|
||||
|
||||
parts:: self,
|
||||
all:: [
|
||||
self.notebooksCRD,
|
||||
self.controllerService,
|
||||
self.serviceAccount,
|
||||
self.controllerDeployment,
|
||||
self.role,
|
||||
self.roleBinding,
|
||||
],
|
||||
|
||||
list(obj=self.all):: util.list(obj),
|
||||
},
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue