Adding Openshift stack (#1567)

* Adding owners files

* adding required openshift scc

* Adding istio

* Adding istio

* Fixing kustomize

* adding cert-manager

* Adding Argo

* Adding cert manager

* Adding jupyter web app

* adding profiles and nb controller

* Adding components

* Adding Seldon

* Adding kfdef for openshift

* Fixing the fsgrp issue with nb controller

* Adding a patch for jnakfour in metadatadb

* moving openshift-scc to top

* Adding namespace in profiles

* Disabling tls in destination rule for pipeline ui

* Adding route

* Moving profiles and pipeline to top kustomize

* Adding new line

* Adding custom profile image to disable istio injection

* Updating profile image and istio scc

* Switching pipeline to generic and pulling out to application

Co-authored-by: Juana Nakfour <nakfour>
This commit is contained in:
Juana Nakfour 2020-11-06 16:05:38 -06:00 committed by GitHub
parent b0fd34c2ba
commit 3440e7bf0c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
42 changed files with 1209 additions and 0 deletions

View File

@ -0,0 +1,103 @@
apiVersion: kfdef.apps.kubeflow.org/v1
kind: KfDef
metadata:
name: kubeflow
namespace: kubeflow
spec:
applications:
# This needs to be first to set up permissions
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/openshift/openshift-scc
name: openshift-scc
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/istio-stack
name: istio-stack
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/istio
name: istio
- kustomizeConfig:
repoRef:
name: manifests
path: application/v3
name: application
- kustomizeConfig:
repoRef:
name: manifests
path: metacontroller/base
name: metacontroller
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/cert-manager-crds
name: cert-manager-crds
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/cert-manager-kube-system-resources
name: cert-manager-kube-system-resources
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/cert-manager
name: cert-manager
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/argo
name: argo
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/jupyter-web-app
name: jupyter-web-app
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/notebook-controller
name: notebook-controller
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/metadata
name: metadata
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/tf-job
name: tf-job
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/pytorch-job
name: pytorch-job
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/katib
name: katib
# commenting this out since it is included in kubeflow-apps
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/pipeline
name: pipeline
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift/application/seldon
name: seldon
- kustomizeConfig:
repoRef:
name: manifests
path: stacks/openshift
name: kubeflow-apps
repos:
- name: manifests
uri: https://github.com/kubeflow/manifests/archive/v1.1-branch.tar.gz
version: v1.0-branch

4
stacks/openshift/OWNERS Normal file
View File

@ -0,0 +1,4 @@
approvers:
- nakfour
- vpavlin
- crobby

View File

@ -0,0 +1,32 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../argo/base_v3
configMapGenerator:
- name: workflow-controller-parameters
behavior: merge
literals:
- containerRuntimeExecutor=k8sapi
- name: kubeflow-config
envs:
- ../../config/params.env
# We need to define vars at the top level otherwise we will get
# conflicts.
vars:
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: cert-manager
resources:
- ../../../../cert-manager/cert-manager-crds/base

View File

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kube-system
resources:
- ../../../../cert-manager/cert-manager-kube-system-resources/base

View File

@ -0,0 +1,13 @@
apiVersion: kustomize.config.k8s.io/v1beta1
commonLabels:
app.kubernetes.io/component: cert-manager
app.kubernetes.io/name: cert-manager
kustomize.component: cert-manager
kind: Kustomization
namespace: cert-manager
resources:
- ../../../../cert-manager/cert-manager/base
- ../../../../cert-manager/cert-manager/overlays/application/application.yaml
- ../../../../cert-manager/cert-manager/overlays/self-signed/cluster-issuer.yaml
configurations:
- ../../../../cert-manager/cert-manager/overlays/application/params.yaml

View File

@ -0,0 +1,17 @@
kind: Route
apiVersion: route.openshift.io/v1
metadata:
name: istio-ingressgateway
namespace: istio-system
labels:
app: istio-ingressgateway
istio: ingressgateway
release: istio
spec:
to:
kind: Service
name: istio-ingressgateway
weight: 100
port:
targetPort: http2
wildcardPolicy: None

View File

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: istio-system
resources:
- ../../../../istio/istio-crds/base
- ../../../../istio/istio-install/base
- ingressgateway.route.yaml

View File

@ -0,0 +1,13 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../istio/istio/base
configMapGenerator:
- name: istio-parameters
behavior: merge
envs:
- params.env
configurations:
- params.yaml

View File

@ -0,0 +1 @@
clusterRbacConfig=OFF

View File

@ -0,0 +1,3 @@
varReference:
- path: spec/mode
kind: ClusterRbacConfig

View File

@ -0,0 +1,125 @@
# Configuration file for the Jupyter UI.
#
# Each Jupyter UI option is configured by two keys: 'value' and 'readOnly'
# - The 'value' key contains the default value
# - The 'readOnly' key determines if the option will be available to users
#
# If the 'readOnly' key is present and set to 'true', the respective option
# will be disabled for users and only set by the admin. Also when a
# Notebook is POSTED to the API if a necessary field is not present then
# the value from the config will be used.
#
# If the 'readOnly' key is missing (defaults to 'false'), the respective option
# will be available for users to edit.
#
# Note that some values can be templated. Such values are the names of the
# Volumes as well as their StorageClass
spawnerFormDefaults:
image:
# The container Image for the user's Jupyter Notebook
# If readonly, this value must be a member of the list below
value: quay.io/kubeflow/tf-notebook-image:v0.7.0
# The list of available standard container Images
options:
- quay.io/kubeflow/tf-notebook-image:v0.7.0
# By default, custom container Images are allowed
# Uncomment the following line to only enable standard container Images
readOnly: false
cpu:
# CPU for user's Notebook
value: '0.5'
readOnly: false
memory:
# Memory for user's Notebook
value: 1.0Gi
readOnly: false
workspaceVolume:
# Workspace Volume to be attached to user's Notebook
# Each Workspace Volume is declared with the following attributes:
# Type, Name, Size, MountPath and Access Mode
value:
type:
# The Type of the Workspace Volume
# Supported values: 'New', 'Existing'
value: New
name:
# The Name of the Workspace Volume
# Note that this is a templated value. Special values:
# {notebook-name}: Replaced with the name of the Notebook. The frontend
# will replace this value as the user types the name
value: 'workspace-{notebook-name}'
size:
# The Size of the Workspace Volume (in Gi)
value: '10Gi'
mountPath:
# The Path that the Workspace Volume will be mounted
value: /home/jovyan
accessModes:
# The Access Mode of the Workspace Volume
# Supported values: 'ReadWriteOnce', 'ReadWriteMany', 'ReadOnlyMany'
value: ReadWriteOnce
class:
# The StrageClass the PVC will use if type is New. Special values are:
# {none}: default StorageClass
# {empty}: empty string ""
value: '{none}'
readOnly: false
dataVolumes:
# List of additional Data Volumes to be attached to the user's Notebook
value: []
# Each Data Volume is declared with the following attributes:
# Type, Name, Size, MountPath and Access Mode
#
# For example, a list with 2 Data Volumes:
# value:
# - value:
# type:
# value: New
# name:
# value: '{notebook-name}-vol-1'
# size:
# value: '10Gi'
# class:
# value: standard
# mountPath:
# value: /home/jovyan/vol-1
# accessModes:
# value: ReadWriteOnce
# class:
# value: {none}
# - value:
# type:
# value: New
# name:
# value: '{notebook-name}-vol-2'
# size:
# value: '10Gi'
# mountPath:
# value: /home/jovyan/vol-2
# accessModes:
# value: ReadWriteMany
# class:
# value: {none}
readOnly: false
gpus:
# Number of GPUs to be assigned to the Notebook Container
value:
# values: "none", "1", "2", "4", "8"
num: "none"
# Determines what the UI will show and send to the backend
vendors:
- limitsKey: "nvidia.com/gpu"
uiName: "NVIDIA"
# Values: "" or a `limits-key` from the vendors list
vendor: ""
readOnly: false
shm:
value: true
readOnly: false
configurations:
# List of labels to be selected, these are the labels from PodDefaults
# value:
# - add-gcp-secret
# - default-editor
value: []
readOnly: false

View File

@ -0,0 +1,36 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../jupyter/jupyter-web-app/base_v3
configMapGenerator:
- name: jupyter-web-app-config
behavior: merge
files:
- ./configs/spawner_ui_config.yaml
- name: kubeflow-config
envs:
- ../../config/params.env
# We need to define vars at the top level otherwise we will get
# conflicts.
vars:
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
images:
- name: gcr.io/kubeflow-images-public/jupyter-web-app
newTag: v1.0.0
newName: quay.io/kubeflow/jupyter-web-app

View File

@ -0,0 +1,3 @@
- op: add
path: /spec/template/spec/containers/0/args/-
value: '--webhook-inject-securitycontext=true'

View File

@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: katib-mysql
spec:
template:
spec:
containers:
- name: katib-mysql
env:
- name: MYSQL_USER
valueFrom:
secretKeyRef:
name: katib-mysql-secrets
key: MYSQL_USER
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: katib-mysql-secrets
key: MYSQL_PASSWORD
- name: MYSQL_LOWER_CASE_TABLE_NAMES
value: '1'

View File

@ -0,0 +1,12 @@
- op: replace
path: /spec/template/spec/containers/0/livenessProbe/exec/command/2
value: 'mysqladmin ping -uroot'
- op: replace
path: /spec/template/spec/containers/0/readinessProbe/exec/command/2
value: 'mysql -D ${MYSQL_DATABASE} -uroot -e ''SELECT 1'''
- op: remove
path: /spec/template/spec/containers/0/args
- op: replace
path: /spec/template/spec/containers/0/volumeMounts/0/mountPath
value: /var/lib/mysql/data

View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
type: Opaque
metadata:
name: katib-mysql-secrets
data:
MYSQL_ROOT_PASSWORD: dGVzdA== # "test"
MYSQL_USER: dGVzdA== #test
MYSQL_PASSWORD: dGVzdC9wYXNzd29yZA== #test/password

View File

@ -0,0 +1,59 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../katib/installs/katib-standalone
patchesStrategicMerge:
- katib-mysql-secret.yaml
- katib-mysql-deployment-patch.yaml
patchesJson6902:
- target:
group: apps
version: v1
kind: Deployment
name: katib-controller
path: katib-controller-deployment.yaml
- target:
group: apps
version: v1
kind: Deployment
name: katib-mysql
path: katib-mysql-deployment.yaml
configMapGenerator:
- envs:
- ../../config/params.env
name: kubeflow-config
vars:
# We need to define vars at the top level otherwise we will get
# conflicts.
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldpath: metadata.namespace
name: katib-ui-namespace
objref:
kind: Service
name: katib-ui
apiVersion: v1
images:
- name: mysql
newTag: "latest"
newName: registry.redhat.io/rhscl/mysql-80-rhel7

View File

@ -0,0 +1,41 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../metadata/v3
- metadata-db-serviceaccount.yaml
patchesStrategicMerge:
- metadata-db-deployment.yaml
patches:
- target:
kind: Deployment
version: v1
group: apps
name: metadata-db
path: patchdb.yaml
configMapGenerator:
- name: kubeflow-config
envs:
- ../../config/params.env
# We need to define vars at the top level otherwise we will get
# conflicts.
vars:
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config

View File

@ -0,0 +1,8 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: metadata-db
spec:
template:
spec:
serviceAccountName: metadatadb

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
component: db
name: metadatadb
namespace: kubeflow

View File

@ -0,0 +1,3 @@
- op: replace
path: /spec/template/spec/containers/0/readinessProbe/exec/command/2
value: "mysql -D $$MYSQL_DATABASE -u$$MYSQL_USER_NAME -p$$MYSQL_ROOT_PASSWORD -e 'SELECT 1'"

View File

@ -0,0 +1,12 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: deployment
spec:
template:
spec:
containers:
- name: manager
env:
- name: ADD_FSGROUP
value: "false"

View File

@ -0,0 +1,31 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../jupyter/notebook-controller/base_v3
patchesStrategicMerge:
- ./configs/addfsgroup-env.yaml
configMapGenerator:
- name: kubeflow-config
envs:
- ../../config/params.env
# We need to define vars at the top level otherwise we will get
# conflicts.
vars:
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config

View File

@ -0,0 +1,52 @@
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
annotations:
kubernetes.io/description: kubeflow-anyuid provides all features of the restricted SCC
but allows users to run with any UID and any GID.
name: kubeflow-anyuid-istio
allowHostDirVolumePlugin: false
allowHostIPC: false
allowHostNetwork: false
allowHostPID: false
allowHostPorts: false
allowPrivilegeEscalation: true
allowPrivilegedContainer: false
allowedCapabilities: null
defaultAddCapabilities: null
fsGroup:
type: RunAsAny
groups:
- system:cluster-admins
priority: 10
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
users:
#Component: istio/istio-install
- system:serviceaccount:istio-system:istio-egressgateway-service-account
- system:serviceaccount:istio-system:istio-citadel-service-account
- system:serviceaccount:istio-system:istio-ingressgateway-service-account
- system:serviceaccount:istio-system:istio-cleanup-old-ca-service-account
- system:serviceaccount:istio-system:istio-mixer-post-install-account
- system:serviceaccount:istio-system:istio-mixer-service-account
- system:serviceaccount:istio-system:istio-pilot-service-account
- system:serviceaccount:istio-system:istio-sidecar-injector-service-account
- system:serviceaccount:istio-system:istio-sidecar-injector-service-account
- system:serviceaccount:istio-system:istio-galley-service-account
- system:serviceaccount:istio-system:prometheus
#Component: istio/cluster-local-gateway
- system:serviceaccount:istio-system:cluster-local-gateway-service-account
volumes:
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- projected
- secret

View File

@ -0,0 +1,44 @@
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
annotations:
kubernetes.io/description: kubeflow-anyuid provides all features of the restricted SCC
but allows users to run with any UID and any GID.
name: kubeflow-anyuid-$(NAMESPACE)
allowHostDirVolumePlugin: false
allowHostIPC: false
allowHostNetwork: false
allowHostPID: false
allowHostPorts: false
allowPrivilegeEscalation: true
allowPrivilegedContainer: false
allowedCapabilities: null
defaultAddCapabilities: null
fsGroup:
type: RunAsAny
groups:
- system:cluster-admins
priority: 10
readOnlyRootFilesystem: false
requiredDropCapabilities:
- MKNOD
runAsUser:
type: RunAsAny
seLinuxContext:
type: MustRunAs
supplementalGroups:
type: RunAsAny
users:
#Metadata DB accesses files owned by root
- system:serviceaccount:$(NAMESPACE):metadatadb
#Minio accesses files owned by root
- system:serviceaccount:$(NAMESPACE):minio
#Katib injects container into pods which does not run as non-root user, trying to find Dockerfile for that image and fix it
#- system:serviceaccount:kubeflow:default
volumes:
- configMap
- downwardAPI
- emptyDir
- persistentVolumeClaim
- projected
- secret

View File

@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- kubeflow-anyuid-scc-istio.yaml
- kubeflow-anyuid-scc.yaml
vars:
- name: NAMESPACE
objref:
apiVersion: v1
kind: ConfigMap
name: scc-namespace-check
fieldref:
fieldpath: metadata.namespace
configMapGenerator:
- name: scc-namespace-check
configurations:
- params.yaml

View File

@ -0,0 +1 @@
namespace=kubeflow

View File

@ -0,0 +1,5 @@
varReference:
- path: users
kind: SecurityContextConstraints
- path: metadata/name
kind: SecurityContextConstraints

View File

@ -0,0 +1,45 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../pipeline/minio/installs/generic
- ../../../../pipeline/mysql/installs/generic
# For now we dont have authentication and istio injection disabled
# TODO tie into OCP oauth
#- ../../../../pipeline/installs/multi-user
- ../../../../pipeline/installs/generic
# Disabling tls traffic in Destination Rule because of issue: https://github.com/kubeflow/kubeflow/issues/5271
patches:
- target:
kind: DestinationRule
version: v1alpha3
group: networking.istio.io
name: ml-pipeline-ui
path: patchuirule.yaml
#Only for multi-user
configMapGenerator:
#- name: kubeflow-pipelines-profile-controller-code
# behavior: replace
# files:
# - sync.py
#commenting these out for now since pipelines has to be installed in main kustomize because of
# the dependency on profiles
- name: kubeflow-config
envs:
- ../../config/params.env
vars:
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config

View File

@ -0,0 +1,3 @@
- op: replace
path: /spec/trafficPolicy/tls/mode
value: DISABLE

View File

@ -0,0 +1,281 @@
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import os
import base64
kfp_version = os.environ["KFP_VERSION"]
disable_istio_sidecar = os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
mlpipeline_minio_access_key = base64.b64encode(
bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
mlpipeline_minio_secret_key = base64.b64encode(
bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
# HACK: Currently using serving.kubeflow.org/inferenceservice to identify
# kubeflow user namespaces.
# TODO: let Kubeflow profile controller add a pipeline specific label to
# user namespaces and use that label instead.
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("serving.kubeflow.org/inferenceservice")
if not pipeline_enabled:
return {"status": {}, "children": []}
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready": \
len(children["Secret.v1"]) == 1 and \
len(children["ConfigMap.v1"]) == 1 and \
len(children["Deployment.apps/v1"]) == 2 and \
len(children["Service.v1"]) == 2 and \
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and \
len(children["ServiceRole.rbac.istio.io/v1alpha1"]) == 1 and \
len(children["ServiceRoleBinding.rbac.istio.io/v1alpha1"]) == 1 and \
"True" or "False"
}
# Generate the desired child object(s).
# parent is a namespace
namespace = parent.get("metadata", {}).get("name")
desired_resources = [
{
"apiVersion": "v1",
"kind": "ConfigMap",
"metadata": {
"name": "metadata-grpc-configmap",
"namespace": namespace,
},
"data": {
"METADATA_GRPC_SERVICE_HOST":
"metadata-grpc-service.kubeflow",
"METADATA_GRPC_SERVICE_PORT": "8080",
},
},
# Visualization server related manifests below
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-visualizationserver"
},
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-visualizationserver"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"image":
"gcr.io/ml-pipeline/visualization-server:" +
kfp_version,
"imagePullPolicy":
"IfNotPresent",
"name":
"ml-pipeline-visualizationserver",
"ports": [{
"containerPort": 8888
}],
}],
"serviceAccountName":
"default-editor",
},
},
},
},
{
"apiVersion": "networking.istio.io/v1alpha3",
"kind": "DestinationRule",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"host": "ml-pipeline-visualizationserver",
"trafficPolicy": {
"tls": {
"mode": "ISTIO_MUTUAL"
}
}
}
},
{
"apiVersion": "rbac.istio.io/v1alpha1",
"kind": "ServiceRole",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"rules": [{
"services": ["ml-pipeline-visualizationserver.*"]
}]
}
},
{
"apiVersion": "rbac.istio.io/v1alpha1",
"kind": "ServiceRoleBinding",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"subjects": [{
"properties": {
"source.principal":
"cluster.local/ns/kubeflow/sa/ml-pipeline"
}
}],
"roleRef": {
"kind": "ServiceRole",
"name": "ml-pipeline-visualizationserver"
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-visualizationserver",
"namespace": namespace,
},
"spec": {
"ports": [{
"name": "http",
"port": 8888,
"protocol": "TCP",
"targetPort": 8888,
}],
"selector": {
"app": "ml-pipeline-visualizationserver",
},
},
},
# Artifact fetcher related resources below.
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
},
"spec": {
"selector": {
"matchLabels": {
"app": "ml-pipeline-ui-artifact"
}
},
"template": {
"metadata": {
"labels": {
"app": "ml-pipeline-ui-artifact"
},
"annotations": disable_istio_sidecar and {
"sidecar.istio.io/inject": "false"
} or {},
},
"spec": {
"containers": [{
"name":
"ml-pipeline-ui-artifact",
"image":
"gcr.io/ml-pipeline/frontend:" + kfp_version,
"imagePullPolicy":
"IfNotPresent",
"ports": [{
"containerPort": 3000
}]
}],
"serviceAccountName":
"default-editor"
}
}
}
},
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"name": "ml-pipeline-ui-artifact",
"namespace": namespace,
"labels": {
"app": "ml-pipeline-ui-artifact"
}
},
"spec": {
"ports": [{
"name":
"http", # name is required to let istio understand request protocol
"port": 80,
"protocol": "TCP",
"targetPort": 3000
}],
"selector": {
"app": "ml-pipeline-ui-artifact"
}
}
},
]
print('Received request:', parent)
print('Desired resources except secrets:', desired_resources)
# Moved after the print argument because this is sensitive data.
desired_resources.append({
"apiVersion": "v1",
"kind": "Secret",
"metadata": {
"name": "mlpipeline-minio-artifact",
"namespace": namespace,
},
"data": {
"accesskey": mlpipeline_minio_access_key,
"secretkey": mlpipeline_minio_secret_key,
},
})
return {"status": desired_status, "children": desired_resources}
def do_POST(self):
# Serve the sync() function as a JSON webhook.
observed = json.loads(
self.rfile.read(int(self.headers.get("content-length"))))
desired = self.sync(observed["parent"], observed["children"])
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
HTTPServer(("", 8080), Controller).serve_forever()

View File

@ -0,0 +1,32 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../profiles/base_v3
configMapGenerator:
- name: kubeflow-config
envs:
- ../../config/params.env
# We need to define vars at the top level otherwise we will get
# conflicts.
vars:
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
name: kfam
#This image we created to disable sidecar injection, keeping it out for now
#images:
#- name: gcr.io/kubeflow-images-public/profile-controller
# newName: quay.io/kubeflow/profile-controller
# newTag: v0.7.0

View File

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../pytorch-job/pytorch-job-crds/overlays/application
- ../../../../pytorch-job/pytorch-operator/overlays/application

View File

@ -0,0 +1,6 @@
- op: replace
path: /spec/template/spec/containers/0/args/1
value: --webhook-port=8443
- op: replace
path: /spec/template/spec/containers/0/ports/0/containerPort
value: 8443

View File

@ -0,0 +1,57 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../seldon/seldon-core-operator/overlays/application
#patchesJson6902:
patches:
- target:
kind: MutatingWebhookConfiguration
version: v1beta1
group: admissionregistration.k8s.io
name: seldon-mutating-webhook-configuration-kubeflow
path: patchport.yaml
- target:
kind: ValidatingWebhookConfiguration
version: v1beta1
group: admissionregistration.k8s.io
name: seldon-validating-webhook-configuration-kubeflow
path: vpatchport.yaml
- target:
kind: Service
version: v1
name: seldon-webhook-service
path: resourcepatchport.yaml
- target:
kind: Deployment
group: apps
version: v1
name: seldon-controller-manager
path: deploymentpatchport.yaml
configMapGenerator:
- envs:
- ../../config/params.env
name: kubeflow-config
vars:
# We need to define vars at the top level otherwise we will get
# conflicts.
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config

View File

@ -0,0 +1,18 @@
- op: add
path: /webhooks/0/clientConfig/service/port
value: 8443
- op: replace
path: /webhooks/0/name
value: v1.mseldondeployment.kb.io
- op: add
path: /webhooks/1/clientConfig/service/port
value: 8443
- op: replace
path: /webhooks/1/name
value: v1alpha2.mseldondeployment.kb.io
- op: add
path: /webhooks/2/clientConfig/service/port
value: 8443
- op: replace
path: /webhooks/2/name
value: v1alpha3.mseldondeployment.kb.io

View File

@ -0,0 +1,6 @@
- op: replace
path: /spec/ports/0/port
value: 8443
- op: replace
path: /spec/ports/0/targePort
value: 8443

View File

@ -0,0 +1,18 @@
- op: add
path: /webhooks/0/clientConfig/service/port
value: 8443
- op: replace
path: /webhooks/0/name
value: v1.vseldondeployment.kb.io
- op: add
path: /webhooks/1/clientConfig/service/port
value: 8443
- op: replace
path: /webhooks/1/name
value: v1alpha2.vseldondeployment.kb.io
- op: add
path: /webhooks/2/clientConfig/service/port
value: 8443
- op: replace
path: /webhooks/2/name
value: v1alpha3.vseldondeployment.kb.io

View File

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../../../tf-training/tf-job-crds/overlays/application
- ../../../../tf-training/tf-job-operator/overlays/application

View File

@ -0,0 +1,5 @@
clusterDomain=cluster.local
userid-header=kubeflow-userid
userid-prefix=
namespace=
clusterdomain=

View File

@ -0,0 +1,33 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: kubeflow
resources:
- ../../common/centraldashboard/overlays/stacks
#profiles is here since namespace was not being replaced for the virtual service and pipeline depends on it
- ../../profiles/base_v3
configMapGenerator:
- name: kubeflow-config
envs:
- ./config/params.env
# We need to define vars at the top level otherwise we will get
# conflicts.
vars:
- fieldref:
fieldPath: data.clusterDomain
name: clusterDomain
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
- fieldref:
fieldPath: metadata.namespace
name: namespace
objref:
apiVersion: v1
kind: ConfigMap
name: kubeflow-config
images:
- name: gcr.io/kubeflow-images-public/profile-controller
newName: quay.io/kubeflow/profile-controller
newTag: v1.1.0