mirror of https://github.com/kubeflow/examples.git
Remove vendor from .gitignore (#94)
* Remove vendor from .gitignore * Tell pylint to ignore generated file
This commit is contained in:
parent
a5d808cc88
commit
1a4f4dc1ea
|
|
@ -1,7 +1,6 @@
|
|||
# pkg and bin directories currently contain build artifacts
|
||||
# only so we exclude them.
|
||||
bin/
|
||||
vendor/
|
||||
|
||||
.vscode/
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,59 @@
|
|||
# core
|
||||
|
||||
> Core components of Kubeflow.
|
||||
|
||||
|
||||
* [Quickstart](#quickstart)
|
||||
* [Using Prototypes](#using-prototypes)
|
||||
* [io.ksonnet.pkg.kubeflow-core](#io.ksonnet.pkg.kubeflow-core)
|
||||
|
||||
## Quickstart
|
||||
|
||||
*The following commands use the `io.ksonnet.pkg.kubeflow` prototype to generate Kubernetes YAML for core, and then deploys it to your Kubernetes cluster.*
|
||||
|
||||
First, create a cluster and install the ksonnet CLI (see root-level [README.md](rootReadme)).
|
||||
|
||||
If you haven't yet created a [ksonnet application](linkToSomewhere), do so using `ks init <app-name>`.
|
||||
|
||||
Finally, in the ksonnet application directory, run the following:
|
||||
|
||||
```shell
|
||||
# Expand prototype as a Jsonnet file, place in a file in the
|
||||
# `components/` directory. (YAML and JSON are also available.)
|
||||
$ ks prototype use io.ksonnet.pkg.kubeflow-core \
|
||||
--name core \
|
||||
--namespace default \
|
||||
--disks
|
||||
|
||||
# Apply to server.
|
||||
$ ks apply -f core.jsonnet
|
||||
```
|
||||
|
||||
## Using the library
|
||||
|
||||
The library files for core define a set of relevant *parts* (_e.g._, deployments, services, secrets, and so on) that can be combined to configure core for a wide variety of scenarios. For example, a database like Redis may need a secret to hold the user password, or it may have no password if it's acting as a cache.
|
||||
|
||||
This library provides a set of pre-fabricated "flavors" (or "distributions") of core, each of which is configured for a different use case. These are captured as ksonnet *prototypes*, which allow users to interactively customize these distributions for their specific needs.
|
||||
|
||||
These prototypes, as well as how to use them, are enumerated below.
|
||||
|
||||
### io.ksonnet.pkg.kubeflow-core
|
||||
|
||||
Kubeflow core components
|
||||
#### Example
|
||||
|
||||
```shell
|
||||
# Expand prototype as a Jsonnet file, place in a file in the
|
||||
# `components/` directory. (YAML and JSON are also available.)
|
||||
$ ks prototype use io.ksonnet.pkg.kubeflow-core core \
|
||||
--name YOUR_NAME_HERE
|
||||
```
|
||||
|
||||
#### Parameters
|
||||
|
||||
The available options to pass prototype are:
|
||||
|
||||
* `--name=<name>`: Name to give to each of the components [string]
|
||||
|
||||
|
||||
[rootReadme]: https://github.com/ksonnet/mixins
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
parts(params):: {
|
||||
local ambassador = import "kubeflow/core/ambassador.libsonnet",
|
||||
local jupyter = import "kubeflow/core/jupyterhub.libsonnet",
|
||||
local nfs = import "kubeflow/core/nfs.libsonnet",
|
||||
local tfjob = import "kubeflow/core/tf-job.libsonnet",
|
||||
local spartakus = import "kubeflow/core/spartakus.libsonnet",
|
||||
|
||||
all:: jupyter.all(params)
|
||||
+ tfjob.all(params)
|
||||
+ ambassador.all(params)
|
||||
+ nfs.all(params)
|
||||
+ spartakus.all(params),
|
||||
},
|
||||
}
|
||||
267
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/ambassador.libsonnet
vendored
Normal file
267
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/ambassador.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,267 @@
|
|||
{
|
||||
all(params):: [
|
||||
$.parts(params.namespace).service(params.tfAmbassadorServiceType),
|
||||
$.parts(params.namespace).adminService,
|
||||
$.parts(params.namespace).role,
|
||||
$.parts(params.namespace).serviceAccount,
|
||||
$.parts(params.namespace).roleBinding,
|
||||
$.parts(params.namespace).deploy,
|
||||
$.parts(params.namespace).k8sDashboard(params.cloud),
|
||||
],
|
||||
|
||||
parts(namespace):: {
|
||||
local ambassadorImage = "quay.io/datawire/ambassador:0.30.1",
|
||||
service(serviceType):: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador",
|
||||
},
|
||||
name: "ambassador",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "ambassador",
|
||||
port: 80,
|
||||
targetPort: 80,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "ambassador",
|
||||
},
|
||||
type: serviceType,
|
||||
},
|
||||
}, // service
|
||||
|
||||
adminService:: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador-admin",
|
||||
},
|
||||
name: "ambassador-admin",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "ambassador-admin",
|
||||
port: 8877,
|
||||
targetPort: 8877,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "ambassador",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
}, // adminService
|
||||
|
||||
role:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"services",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"configmaps",
|
||||
],
|
||||
verbs: [
|
||||
"create",
|
||||
"update",
|
||||
"patch",
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"secrets",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
"watch",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // role
|
||||
|
||||
serviceAccount:: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: namespace,
|
||||
},
|
||||
}, // serviceAccount
|
||||
|
||||
roleBinding:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: "ambassador",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "ambassador",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
}, // roleBinding
|
||||
|
||||
deploy:: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "ambassador",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 3,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "ambassador",
|
||||
},
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "AMBASSADOR_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "AMBASSADOR_SINGLE_NAMESPACE",
|
||||
value: "true",
|
||||
},
|
||||
],
|
||||
image: ambassadorImage,
|
||||
livenessProbe: {
|
||||
httpGet: {
|
||||
path: "/ambassador/v0/check_alive",
|
||||
port: 8877,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
name: "ambassador",
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: "/ambassador/v0/check_ready",
|
||||
port: 8877,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: 1,
|
||||
memory: "400Mi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "200m",
|
||||
memory: "100Mi",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
image: "quay.io/datawire/statsd:0.30.1",
|
||||
name: "statsd",
|
||||
},
|
||||
],
|
||||
restartPolicy: "Always",
|
||||
serviceAccountName: "ambassador",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deploy
|
||||
|
||||
isDashboardTls(cloud)::
|
||||
if cloud == "acsengine" || cloud == "aks" then
|
||||
"false"
|
||||
else
|
||||
"true",
|
||||
// This service adds a rule to our reverse proxy for accessing the K8s dashboard.
|
||||
k8sDashboard(cloud):: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
name: "k8s-dashboard",
|
||||
namespace: namespace,
|
||||
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: k8s-dashboard-ui-mapping",
|
||||
"prefix: /k8s/ui/",
|
||||
"rewrite: /",
|
||||
"tls: " + $.parts(namespace).isDashboardTls(cloud),
|
||||
// We redirect to the K8s service created for the dashboard
|
||||
// in namespace kube-system. We don't use the k8s-dashboard service
|
||||
// because that isn't in the kube-system namespace and I don't think
|
||||
// it can select pods in a different namespace.
|
||||
"service: kubernetes-dashboard.kube-system",
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 443,
|
||||
targetPort: 8443,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
"k8s-app": "kubernetes-dashboard",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
}, // k8sDashboard
|
||||
|
||||
}, // parts
|
||||
}
|
||||
182
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/cert-manager.libsonnet
vendored
Normal file
182
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/cert-manager.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
{
|
||||
parts(namespace):: {
|
||||
local k = import "k.libsonnet",
|
||||
local certManagerImage = "quay.io/jetstack/cert-manager-controller:v0.2.3",
|
||||
local certManagerIngressShimImage = "quay.io/jetstack/cert-manager-ingress-shim:v0.2.3",
|
||||
|
||||
// Note, not using std.prune to preserve required empty http01 map in the Issuer spec.
|
||||
certManagerParts(acmeEmail, acmeUrl):: k.core.v1.list.new([
|
||||
$.parts(namespace).certificateCRD,
|
||||
$.parts(namespace).clusterIssuerCRD,
|
||||
$.parts(namespace).issuerCRD,
|
||||
$.parts(namespace).serviceAccount,
|
||||
$.parts(namespace).clusterRole,
|
||||
$.parts(namespace).clusterRoleBinding,
|
||||
$.parts(namespace).deploy,
|
||||
$.parts(namespace).issuerLEProd(acmeEmail, acmeUrl),
|
||||
]),
|
||||
|
||||
certificateCRD:: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "certificates.certmanager.k8s.io"
|
||||
},
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
version: "v1alpha1",
|
||||
names: {
|
||||
kind: "Certificate",
|
||||
plural: "certificates",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
},
|
||||
},
|
||||
|
||||
clusterIssuerCRD:: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "clusterissuers.certmanager.k8s.io",
|
||||
},
|
||||
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
version: "v1alpha1",
|
||||
names: {
|
||||
kind: "ClusterIssuer",
|
||||
plural: "clusterissuers",
|
||||
},
|
||||
scope: "Cluster",
|
||||
},
|
||||
},
|
||||
|
||||
issuerCRD:: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "issuers.certmanager.k8s.io"
|
||||
},
|
||||
spec: {
|
||||
group: "certmanager.k8s.io",
|
||||
version: "v1alpha1",
|
||||
names: {
|
||||
kind: "Issuer",
|
||||
plural: "issuers",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
},
|
||||
},
|
||||
|
||||
serviceAccount:: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
namespace: namespace,
|
||||
}
|
||||
},
|
||||
|
||||
clusterRole:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: ["certmanager.k8s.io"],
|
||||
resources: ["certificates", "issuers", "clusterissuers"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["secrets", "events", "endpoints", "services", "pods"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
{
|
||||
apiGroups: ["extensions"],
|
||||
resources: ["ingresses"],
|
||||
verbs: ["*"],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
clusterRoleBinding:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "cert-manager",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
name: "cert-manager",
|
||||
namespace: namespace,
|
||||
kind: "ServiceAccount",
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
deploy:: {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "cert-manager",
|
||||
namespace: namespace,
|
||||
labels: {
|
||||
app: "cert-manager",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "cert-manager",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
serviceAccountName: "cert-manager",
|
||||
containers: [
|
||||
{
|
||||
name: "cert-manager",
|
||||
image: certManagerImage,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
},
|
||||
{
|
||||
name: "ingress-shim",
|
||||
image: certManagerIngressShimImage,
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
issuerLEProd(acmeEmail, acmeUrl):: {
|
||||
apiVersion: "certmanager.k8s.io/v1alpha1",
|
||||
kind: "Issuer",
|
||||
metadata: {
|
||||
name: "letsencrypt-prod",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
acme: {
|
||||
server: acmeUrl,
|
||||
email: acmeEmail,
|
||||
privateKeySecretRef: {
|
||||
name: "letsencrypt-prod-secret",
|
||||
},
|
||||
http01: {
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
@ -0,0 +1,742 @@
|
|||
{
|
||||
parts(namespace):: {
|
||||
local k = import "k.libsonnet",
|
||||
|
||||
ingressParts(secretName, ipName, hostname, issuer, envoyImage, disableJwt, oauthSecretName):: std.prune(k.core.v1.list.new([
|
||||
$.parts(namespace).service,
|
||||
$.parts(namespace).ingress(secretName, ipName, hostname),
|
||||
$.parts(namespace).certificate(secretName, hostname, issuer),
|
||||
$.parts(namespace).initServiceAcount,
|
||||
$.parts(namespace).initClusterRoleBinding,
|
||||
$.parts(namespace).initClusterRole,
|
||||
$.parts(namespace).deploy(envoyImage, oauthSecretName),
|
||||
$.parts(namespace).configMap(disableJwt),
|
||||
$.parts(namespace).whoamiService,
|
||||
$.parts(namespace).whoamiApp,
|
||||
])),
|
||||
|
||||
service:: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "envoy",
|
||||
},
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "envoy",
|
||||
port: envoyPort,
|
||||
targetPort: envoyPort,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "envoy",
|
||||
},
|
||||
// NodePort because this will be the backend for our ingress.
|
||||
type: "NodePort",
|
||||
},
|
||||
}, // service
|
||||
|
||||
initServiceAcount:: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
}, // initServiceAccount
|
||||
|
||||
initClusterRoleBinding:: {
|
||||
kind: "ClusterRoleBinding",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "envoy",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
roleRef: {
|
||||
kind: "ClusterRole",
|
||||
name: "envoy",
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}, // initClusterRoleBinding
|
||||
|
||||
initClusterRole:: {
|
||||
kind: "ClusterRole",
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
metadata: {
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [""],
|
||||
resources: ["services", "configmaps"],
|
||||
verbs: ["get", "list", "patch", "update"],
|
||||
},
|
||||
],
|
||||
}, // initClusterRoleBinding
|
||||
|
||||
envoyContainer(params):: {
|
||||
image: params.image,
|
||||
command: [
|
||||
"/usr/local/bin/envoy",
|
||||
"-c",
|
||||
params.configPath,
|
||||
"--log-level",
|
||||
"info",
|
||||
// Since we are running multiple instances of envoy on the same host we need to set a unique baseId
|
||||
"--base-id",
|
||||
params.baseId,
|
||||
],
|
||||
imagePullPolicy: "Always",
|
||||
name: params.name,
|
||||
livenessProbe: {
|
||||
httpGet: {
|
||||
path: params.healthPath,
|
||||
port: params.healthPort,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
readinessProbe: {
|
||||
httpGet: {
|
||||
path: params.healthPath,
|
||||
port: params.healthPort,
|
||||
},
|
||||
initialDelaySeconds: 30,
|
||||
periodSeconds: 30,
|
||||
},
|
||||
ports: std.map(function(p)
|
||||
{
|
||||
containerPort: p,
|
||||
}
|
||||
, params.ports),
|
||||
resources: {
|
||||
limits: {
|
||||
cpu: 1,
|
||||
memory: "400Mi",
|
||||
},
|
||||
requests: {
|
||||
cpu: "200m",
|
||||
memory: "100Mi",
|
||||
},
|
||||
},
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/etc/envoy",
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
}, // envoyContainer
|
||||
|
||||
deploy(image, oauthSecretName):: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "envoy",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 3,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "envoy",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
serviceAccountName: "envoy",
|
||||
initContainers: [
|
||||
{
|
||||
name: "iap",
|
||||
image: "google/cloud-sdk:alpine",
|
||||
command: [
|
||||
"sh",
|
||||
"/var/envoy-config/iap-init.sh",
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "NAMESPACE",
|
||||
value: namespace,
|
||||
},
|
||||
{
|
||||
name: "CLIENT_ID",
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
name: oauthSecretName,
|
||||
key: "CLIENT_ID",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "CLIENT_SECRET",
|
||||
valueFrom: {
|
||||
secretKeyRef: {
|
||||
name: oauthSecretName,
|
||||
key: "CLIENT_SECRET",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SERVICE",
|
||||
value: "envoy",
|
||||
},
|
||||
],
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/var/envoy-config/",
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
containers: [
|
||||
$.parts(namespace).envoyContainer({
|
||||
image: image,
|
||||
name: "envoy",
|
||||
// We use the admin port for the health, readiness check because the main port will require a valid JWT.
|
||||
healthPath: "/server_info",
|
||||
healthPort: envoyAdminPort,
|
||||
configPath: "/etc/envoy/envoy-config.json",
|
||||
baseId: "27000",
|
||||
ports: [envoyPort, envoyAdminPort, envoyStatsPort],
|
||||
}),
|
||||
],
|
||||
restartPolicy: "Always",
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "envoy-config",
|
||||
},
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // deploy
|
||||
|
||||
configMap(disableJwt):: {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "envoy-config",
|
||||
namespace: namespace,
|
||||
},
|
||||
data: {
|
||||
"envoy-config.json": std.manifestJson($.parts(namespace).envoyConfig(disableJwt)),
|
||||
// Script executed by init container to enable IAP. When finished, the configmap is patched with the JWT audience.
|
||||
"iap-init.sh": |||
|
||||
[ -z ${CLIENT_ID} ] && echo Error CLIENT_ID must be set && exit 1
|
||||
[ -z ${CLIENT_SECRET} ] && echo Error CLIENT_SECRET must be set && exit 1
|
||||
[ -z ${NAMESPACE} ] && echo Error NAMESPACE must be set && exit 1
|
||||
[ -z ${SERVICE} ] && echo Error SERVICE must be set && exit 1
|
||||
|
||||
apk add --update jq
|
||||
curl https://storage.googleapis.com/kubernetes-release/release/v1.9.4/bin/linux/amd64/kubectl > /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl
|
||||
|
||||
# Stagger init of replicas when acquiring lock
|
||||
sleep $(( $RANDOM % 5 + 1 ))
|
||||
|
||||
kubectl get svc ${SERVICE} -o json > service.json
|
||||
LOCK=$(jq -r ".metadata.annotations.iaplock" service.json)
|
||||
|
||||
NOW=$(date -u +'%s')
|
||||
if [[ -z "${LOCK}" || "${LOCK}" == "null" ]]; then
|
||||
LOCK_T=$NOW
|
||||
else
|
||||
LOCK_T=$(echo "${LOCK}" | cut -d' ' -f2)
|
||||
fi
|
||||
LOCK_AGE=$(( $NOW - $LOCK_T ))
|
||||
LOCK_TTL=120
|
||||
if [[ -z "${LOCK}" || "${LOCK}" == "null" || "${LOCK_AGE}" -gt "${LOCK_TTL}" ]]; then
|
||||
jq -r ".metadata.annotations.iaplock=\"$(hostname -s) ${NOW}\"" service.json > service_lock.json
|
||||
kubectl apply -f service_lock.json 2>/dev/null
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Acquired lock on service annotation to update IAP."
|
||||
else
|
||||
echo "WARN: Failed to acquire lock on service annotation."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "WARN: Lock on service annotation already acquired by: $LOCK, age: $LOCK_AGE, TTL: $LOCK_TTL"
|
||||
sleep 20
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PROJECT=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/project-id)
|
||||
if [ -z ${PROJECT} ]; then
|
||||
echo Error unable to fetch PROJECT from compute metadata
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PROJECT_NUM=$(curl -s -H "Metadata-Flavor: Google" http://metadata.google.internal/computeMetadata/v1/project/numeric-project-id)
|
||||
if [ -z ${PROJECT_NUM} ]; then
|
||||
echo Error unable to fetch PROJECT_NUM from compute metadata
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
while [[ -z ${BACKEND_ID} ]];
|
||||
do BACKEND_ID=$(gcloud compute --project=${PROJECT} backend-services list --filter=name~k8s-be-${NODE_PORT}- --format='value(id)');
|
||||
echo "Waiting for backend id PROJECT=${PROJECT} NAMESPACE=${NAMESPACE} SERVICE=${SERVICE}...";
|
||||
sleep 2;
|
||||
done
|
||||
echo BACKEND_ID=${BACKEND_ID}
|
||||
|
||||
NODE_PORT=$(kubectl --namespace=${NAMESPACE} get svc ${SERVICE} -o jsonpath='{.spec.ports[0].nodePort}')
|
||||
BACKEND_SERVICE=$(gcloud --project=${PROJECT} compute backend-services list --filter=name~k8s-be-${NODE_PORT}- --uri)
|
||||
# Enable IAP on the backend service:
|
||||
gcloud --project=${PROJECT} compute backend-services update ${BACKEND_SERVICE} \
|
||||
--global \
|
||||
--iap=enabled,oauth2-client-id=${CLIENT_ID},oauth2-client-secret=${CLIENT_SECRET}
|
||||
|
||||
while [[ -z ${HEALTH_CHECK_URI} ]];
|
||||
do HEALTH_CHECK_URI=$(gcloud compute --project=${PROJECT} health-checks list --filter=name~k8s-be-${NODE_PORT}- --uri);
|
||||
echo "Waiting for the healthcheck resource PROJECT=${PROJECT} NODEPORT=${NODE_PORT} SERVICE=${SERVICE}...";
|
||||
sleep 2;
|
||||
done
|
||||
|
||||
# Since we create the envoy-ingress ingress object before creating the envoy
|
||||
# deployment object, healthcheck will not be configured correctly in the GCP
|
||||
# load balancer. It will default the healthcheck request path to a value of
|
||||
# / instead of the intended /healthz.
|
||||
# Manually update the healthcheck request path to /healthz
|
||||
gcloud --project=${PROJECT} compute health-checks update http ${HEALTH_CHECK_URI} --request-path=/healthz
|
||||
|
||||
# Since JupyterHub uses websockets we want to increase the backend timeout
|
||||
echo Increasing backend timeout for JupyterHub
|
||||
gcloud --project=${PROJECT} compute backend-services update --global ${BACKEND_SERVICE} --timeout=3600
|
||||
|
||||
JWT_AUDIENCE="/projects/${PROJECT_NUM}/global/backendServices/${BACKEND_ID}"
|
||||
|
||||
echo JWT_AUDIENCE=${JWT_AUDIENCE}
|
||||
|
||||
kubectl get configmap -n ${NAMESPACE} envoy-config -o json | \
|
||||
sed -e "s|{{JWT_AUDIENCE}}|${JWT_AUDIENCE}|g" | kubectl apply -f -
|
||||
|
||||
echo "Clearing lock on service annotation"
|
||||
kubectl patch svc "${SERVICE}" -p "{\"metadata\": { \"annotations\": {\"iaplock\": \"\" }}}"
|
||||
|||,
|
||||
},
|
||||
},
|
||||
|
||||
local envoyPort = 8080,
|
||||
local envoyAdminPort = 8001,
|
||||
local envoyStatsPort = 8025,
|
||||
|
||||
// This is the config for the secondary envoy proxy which does JWT verification
|
||||
// and actually routes requests to the appropriate backend.
|
||||
envoyConfig(disableJwt):: {
|
||||
listeners: [
|
||||
{
|
||||
address: "tcp://0.0.0.0:" + envoyPort,
|
||||
filters: [
|
||||
{
|
||||
type: "read",
|
||||
name: "http_connection_manager",
|
||||
config: {
|
||||
codec_type: "auto",
|
||||
stat_prefix: "ingress_http",
|
||||
access_log: [
|
||||
{
|
||||
format: 'ACCESS [%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%"\n',
|
||||
path: "/dev/fd/1",
|
||||
},
|
||||
],
|
||||
route_config: {
|
||||
virtual_hosts: [
|
||||
{
|
||||
name: "backend",
|
||||
domains: ["*"],
|
||||
routes: [
|
||||
// First route that matches is picked.
|
||||
{
|
||||
timeout_ms: 10000,
|
||||
path: "/healthz",
|
||||
prefix_rewrite: "/server_info",
|
||||
weighted_clusters: {
|
||||
clusters: [
|
||||
|
||||
{ name: "cluster_healthz", weight: 100.0 },
|
||||
|
||||
],
|
||||
},
|
||||
},
|
||||
// Provide access to the whoami app skipping JWT verification.
|
||||
// this is useful for debugging.
|
||||
{
|
||||
timeout_ms: 10000,
|
||||
prefix: "/noiap/whoami",
|
||||
prefix_rewrite: "/",
|
||||
weighted_clusters: {
|
||||
clusters: [
|
||||
{
|
||||
name: "cluster_iap_app",
|
||||
weight: 100.0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
timeout_ms: 10000,
|
||||
prefix: "/whoami",
|
||||
prefix_rewrite: "/",
|
||||
weighted_clusters: {
|
||||
clusters: [
|
||||
{
|
||||
name: "cluster_iap_app",
|
||||
weight: 100.0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
// Jupyter uses the prefixes /hub & /user
|
||||
{
|
||||
// JupyterHub requires the prefix /hub
|
||||
// Use a 10 minute timeout because downloading
|
||||
// images for jupyter notebook can take a while
|
||||
timeout_ms: 600000,
|
||||
prefix: "/hub",
|
||||
prefix_rewrite: "/hub",
|
||||
use_websocket: true,
|
||||
weighted_clusters: {
|
||||
clusters: [
|
||||
{
|
||||
name: "cluster_jupyterhub",
|
||||
weight: 100.0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
// JupyterHub requires the prefix /user
|
||||
// Use a 10 minute timeout because downloading
|
||||
// images for jupyter notebook can take a while
|
||||
timeout_ms: 600000,
|
||||
prefix: "/user",
|
||||
prefix_rewrite: "/user",
|
||||
use_websocket: true,
|
||||
weighted_clusters: {
|
||||
clusters: [
|
||||
{
|
||||
name: "cluster_jupyterhub",
|
||||
weight: 100.0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
// TODO(ankushagarwal): We should eventually
|
||||
// redirect to the central UI once its ready
|
||||
// See https://github.com/kubeflow/kubeflow/pull/146
|
||||
// Redirect to jupyterhub when visiting /
|
||||
{
|
||||
timeout_ms: 600000,
|
||||
path: "/",
|
||||
prefix_rewrite: "/hub",
|
||||
use_websocket: true,
|
||||
weighted_clusters: {
|
||||
clusters: [
|
||||
{
|
||||
name: "cluster_jupyterhub",
|
||||
weight: 100.0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
// Route remaining traffic to Ambassador which supports dynamically adding
|
||||
// routes based on service annotations.
|
||||
timeout_ms: 10000,
|
||||
prefix: "/",
|
||||
prefix_rewrite: "/",
|
||||
use_websocket: true,
|
||||
weighted_clusters: {
|
||||
clusters: [
|
||||
{
|
||||
name: "cluster_ambassador",
|
||||
weight: 100.0,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
local authFilter = if disableJwt then
|
||||
[]
|
||||
else [{
|
||||
type: "decoder",
|
||||
name: "jwt-auth",
|
||||
config: {
|
||||
jwts: [
|
||||
{
|
||||
issuer: "https://cloud.google.com/iap",
|
||||
audiences: "{{JWT_AUDIENCE}}",
|
||||
jwks_uri: "https://www.gstatic.com/iap/verify/public_key-jwk",
|
||||
jwks_uri_envoy_cluster: "iap_issuer",
|
||||
jwt_headers: ["x-goog-iap-jwt-assertion"],
|
||||
},
|
||||
],
|
||||
bypass_jwt: [
|
||||
{
|
||||
http_method: "GET",
|
||||
path_exact: "/healthz",
|
||||
},
|
||||
{
|
||||
http_method: "GET",
|
||||
path_exact: "/noiap/whoami",
|
||||
},
|
||||
],
|
||||
},
|
||||
}],
|
||||
filters:
|
||||
authFilter +
|
||||
[
|
||||
{
|
||||
type: "decoder",
|
||||
name: "router",
|
||||
config: {},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
admin: {
|
||||
// We use 0.0.0.0 and not 127.0.0.1 because we want the admin server to be available on all devices
|
||||
// so that it can be used for health checking.
|
||||
address: "tcp://0.0.0.0:" + envoyAdminPort,
|
||||
access_log_path: "/tmp/admin_access_log",
|
||||
},
|
||||
cluster_manager: {
|
||||
clusters: [
|
||||
{
|
||||
name: "cluster_healthz",
|
||||
connect_timeout_ms: 3000,
|
||||
type: "strict_dns",
|
||||
lb_type: "round_robin",
|
||||
hosts: [
|
||||
{
|
||||
// We just use the admin server for the health check
|
||||
url: "tcp://127.0.0.1:" + envoyAdminPort,
|
||||
},
|
||||
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "iap_issuer",
|
||||
connect_timeout_ms: 5000,
|
||||
type: "strict_dns",
|
||||
circuit_breakers: {
|
||||
default: {
|
||||
max_pending_requests: 10000,
|
||||
max_requests: 10000,
|
||||
},
|
||||
},
|
||||
lb_type: "round_robin",
|
||||
hosts: [
|
||||
{
|
||||
url: "tcp://www.gstatic.com:80",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "cluster_iap_app",
|
||||
connect_timeout_ms: 3000,
|
||||
type: "strict_dns",
|
||||
lb_type: "round_robin",
|
||||
hosts: [
|
||||
{
|
||||
url: "tcp://whoami-app." + namespace + ":80",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "cluster_jupyterhub",
|
||||
connect_timeout_ms: 3000,
|
||||
type: "strict_dns",
|
||||
lb_type: "round_robin",
|
||||
hosts: [
|
||||
{
|
||||
url: "tcp://tf-hub-lb." + namespace + ":80",
|
||||
},
|
||||
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "cluster_ambassador",
|
||||
connect_timeout_ms: 3000,
|
||||
type: "strict_dns",
|
||||
lb_type: "round_robin",
|
||||
hosts: [
|
||||
{
|
||||
url: "tcp://ambassador." + namespace + ":80",
|
||||
},
|
||||
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
statsd_udp_ip_address: "127.0.0.1:" + envoyStatsPort,
|
||||
stats_flush_interval_ms: 1000,
|
||||
}, // envoyConfig
|
||||
|
||||
whoamiService:: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
name: "whoami-app",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8081,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "whoami",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
}, // whoamiService
|
||||
|
||||
whoamiApp:: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "whoami-app",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "PORT",
|
||||
value: "8081",
|
||||
},
|
||||
],
|
||||
image: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
name: "app",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8081,
|
||||
},
|
||||
],
|
||||
readinessProbe: {
|
||||
failureThreshold: 2,
|
||||
httpGet: {
|
||||
path: "/healthz",
|
||||
port: 8081,
|
||||
scheme: "HTTP",
|
||||
},
|
||||
periodSeconds: 10,
|
||||
successThreshold: 1,
|
||||
timeoutSeconds: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
ingress(secretName, ipName, hostname):: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Ingress",
|
||||
metadata: {
|
||||
name: "envoy-ingress",
|
||||
namespace: namespace,
|
||||
annotations: {
|
||||
"kubernetes.io/tls-acme": "true",
|
||||
"ingress.kubernetes.io/ssl-redirect": "true",
|
||||
"kubernetes.io/ingress.global-static-ip-name": ipName,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
rules: [
|
||||
{
|
||||
[if hostname != "null" then "host"]: hostname,
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
backend: {
|
||||
// Due to https://github.com/kubernetes/contrib/blob/master/ingress/controllers/gce/examples/health_checks/README.md#limitations
|
||||
// Keep port the servicePort the same as the port we are targetting on the backend so that servicePort will be the same as targetPort for the purpose of
|
||||
// health checking.
|
||||
serviceName: "envoy",
|
||||
servicePort: envoyPort,
|
||||
},
|
||||
path: "/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
tls: [
|
||||
{
|
||||
secretName: secretName,
|
||||
},
|
||||
],
|
||||
},
|
||||
}, // iapIngress
|
||||
|
||||
certificate(secretName, hostname, issuer):: {
|
||||
apiVersion: "certmanager.k8s.io/v1alpha1",
|
||||
kind: "Certificate",
|
||||
metadata: {
|
||||
name: secretName,
|
||||
namespace: namespace,
|
||||
},
|
||||
|
||||
spec: {
|
||||
secretName: secretName,
|
||||
issuerRef: {
|
||||
name: issuer,
|
||||
},
|
||||
commonName: hostname,
|
||||
dnsNames: [
|
||||
hostname,
|
||||
],
|
||||
acme: {
|
||||
config: [
|
||||
{
|
||||
http01: {
|
||||
ingress: "envoy-ingress",
|
||||
},
|
||||
domains: [
|
||||
hostname,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}, // certificate
|
||||
}, // parts
|
||||
}
|
||||
256
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/jupyterhub.libsonnet
vendored
Normal file
256
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/jupyterhub.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,256 @@
|
|||
{
|
||||
all(params):: [
|
||||
$.parts(params.namespace).jupyterHubConfigMap(params.jupyterHubAuthenticator, params.disks),
|
||||
$.parts(params.namespace).jupyterHubService,
|
||||
$.parts(params.namespace).jupyterHubLoadBalancer(params.jupyterHubServiceType),
|
||||
$.parts(params.namespace).jupyterHub(params.jupyterHubImage),
|
||||
$.parts(params.namespace).jupyterHubRole,
|
||||
$.parts(params.namespace).jupyterHubServiceAccount,
|
||||
$.parts(params.namespace).jupyterHubRoleBinding,
|
||||
],
|
||||
|
||||
parts(namespace):: {
|
||||
jupyterHubConfigMap(jupyterHubAuthenticator, disks): {
|
||||
local util = import "kubeflow/core/util.libsonnet",
|
||||
local diskNames = util.toArray(disks),
|
||||
local kubeSpawner = $.parts(namespace).kubeSpawner(jupyterHubAuthenticator, diskNames),
|
||||
result:: $.parts(namespace).jupyterHubConfigMapWithSpawner(kubeSpawner),
|
||||
}.result,
|
||||
|
||||
kubeSpawner(authenticator, volumeClaims=[]): {
|
||||
// TODO(jlewi): We should make whether we use PVC configurable.
|
||||
local baseKubeConfigSpawner = importstr "jupyterhub_spawner.py",
|
||||
|
||||
authenticatorOptions:: {
|
||||
|
||||
//## Authenticator Options
|
||||
local kubeConfigDummyAuthenticator = "c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'",
|
||||
|
||||
// This configuration allows us to use the id provided by IAP.
|
||||
local kubeConfigIAPAuthenticator = @"c.JupyterHub.authenticator_class ='jhub_remote_user_authenticator.remote_user_auth.RemoteUserAuthenticator'
|
||||
c.RemoteUserAuthenticator.header_name = 'x-goog-authenticated-user-email'",
|
||||
|
||||
options:: std.join("\n", std.prune([
|
||||
"######## Authenticator ######",
|
||||
if authenticator == "iap" then
|
||||
kubeConfigIAPAuthenticator else
|
||||
kubeConfigDummyAuthenticator,
|
||||
])),
|
||||
}.options, // authenticatorOptions
|
||||
|
||||
volumeOptions:: {
|
||||
local volumes = std.map(function(v)
|
||||
{
|
||||
name: v,
|
||||
persistentVolumeClaim: {
|
||||
claimName: v,
|
||||
},
|
||||
}, volumeClaims),
|
||||
|
||||
|
||||
local volumeMounts = std.map(function(v)
|
||||
{
|
||||
mountPath: "/mnt/" + v,
|
||||
name: v,
|
||||
}, volumeClaims),
|
||||
|
||||
options::
|
||||
if std.length(volumeClaims) > 0 then
|
||||
std.join("\n",
|
||||
[
|
||||
"###### Volumes #######",
|
||||
"c.KubeSpawner.volumes = " + std.manifestPython(volumes),
|
||||
"c.KubeSpawner.volume_mounts = " + std.manifestPython(volumeMounts),
|
||||
])
|
||||
else "",
|
||||
|
||||
}.options, // volumeOptions
|
||||
|
||||
spawner:: std.join("\n", std.prune([baseKubeConfigSpawner, self.authenticatorOptions, self.volumeOptions])),
|
||||
}.spawner, // kubeSpawner
|
||||
|
||||
local baseJupyterHubConfigMap = {
|
||||
apiVersion: "v1",
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "jupyterhub-config",
|
||||
namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
||||
jupyterHubConfigMapWithSpawner(spawner): baseJupyterHubConfigMap {
|
||||
data: {
|
||||
"jupyterhub_config.py": spawner,
|
||||
},
|
||||
},
|
||||
|
||||
jupyterHubService: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-hub",
|
||||
},
|
||||
name: "tf-hub-0",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
// We want a headless service so we set the ClusterIP to be None.
|
||||
// This headless server is used by individual Jupyter pods to connect back to the Hub.
|
||||
clusterIP: "None",
|
||||
ports: [
|
||||
{
|
||||
name: "hub",
|
||||
port: 8000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "tf-hub",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
jupyterHubLoadBalancer(serviceType): {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-hub-lb",
|
||||
},
|
||||
name: "tf-hub-lb",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "hub",
|
||||
port: 80,
|
||||
targetPort: 8000,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "tf-hub",
|
||||
},
|
||||
type: serviceType,
|
||||
},
|
||||
},
|
||||
|
||||
// image: Image for JupyterHub
|
||||
jupyterHub(image): {
|
||||
apiVersion: "apps/v1beta1",
|
||||
kind: "StatefulSet",
|
||||
metadata: {
|
||||
name: "tf-hub",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
serviceName: "",
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-hub",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
command: [
|
||||
"jupyterhub",
|
||||
"-f",
|
||||
"/etc/config/jupyterhub_config.py",
|
||||
],
|
||||
image: image,
|
||||
name: "tf-hub",
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/etc/config",
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
ports: [
|
||||
// Port 8000 is used by the hub to accept incoming requests.
|
||||
{
|
||||
containerPort: 8000,
|
||||
},
|
||||
// Port 8081 accepts callbacks from the individual Jupyter pods.
|
||||
{
|
||||
containerPort: 8081,
|
||||
},
|
||||
],
|
||||
}, // jupyterHub container
|
||||
],
|
||||
serviceAccountName: "jupyter-hub",
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "jupyterhub-config",
|
||||
},
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
updateStrategy: {
|
||||
type: "RollingUpdate",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
jupyterHubRole: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
name: "jupyter-role",
|
||||
namespace: namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"*",
|
||||
],
|
||||
// TODO(jlewi): This is very permissive so we may want to lock this down.
|
||||
resources: [
|
||||
"*",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
jupyterHubServiceAccount: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "jupyter-hub",
|
||||
},
|
||||
name: "jupyter-hub",
|
||||
namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
||||
jupyterHubRoleBinding: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
name: "jupyter-role",
|
||||
namespace: namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: "jupyter-role",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "jupyter-hub",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
}, // parts
|
||||
}
|
||||
122
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/jupyterhub_spawner.py
vendored
Normal file
122
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/jupyterhub_spawner.py
vendored
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
# pylint: skip-file
|
||||
import json
|
||||
import os
|
||||
from kubespawner.spawner import KubeSpawner
|
||||
from jhub_remote_user_authenticator.remote_user_auth import RemoteUserAuthenticator
|
||||
from oauthenticator.github import GitHubOAuthenticator
|
||||
|
||||
class KubeFormSpawner(KubeSpawner):
|
||||
|
||||
# relies on HTML5 for image datalist
|
||||
def _options_form_default(self):
|
||||
return '''
|
||||
<label for='image'>Image</label>
|
||||
<input list="image" name="image" placeholder='repo/image:tag'>
|
||||
<datalist id="image">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.4.1-notebook-cpu:v20180403-1f854c44">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.4.1-notebook-gpu:v20180403-1f854c44">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.5.1-notebook-cpu:v20180403-1f854c44">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.5.1-notebook-gpu:v20180403-1f854c44">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.6.0-notebook-cpu:v20180403-1f854c44">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.6.0-notebook-gpu:v20180403-1f854c44">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.7.0-notebook-cpu:v20180403-1f854c44">
|
||||
<option value="gcr.io/kubeflow-images-staging/tensorflow-1.7.0-notebook-gpu:v20180403-1f854c44">
|
||||
</datalist>
|
||||
<br/><br/>
|
||||
|
||||
<label for='cpu_guarantee'>CPU</label>
|
||||
<input name='cpu_guarantee' placeholder='200m, 1.0, 2.5, etc'></input>
|
||||
<br/><br/>
|
||||
|
||||
<label for='mem_guarantee'>Memory</label>
|
||||
<input name='mem_guarantee' placeholder='100Mi, 1.5Gi'></input>
|
||||
<br/><br/>
|
||||
|
||||
<label for='extra_resource_limits'>Extra Resource Limits</label>
|
||||
<input name='extra_resource_limits' placeholder='{'nvidia.com/gpu': '3'}'></input>
|
||||
<br/><br/>
|
||||
'''
|
||||
|
||||
def options_from_form(self, formdata):
|
||||
options = {}
|
||||
options['image'] = formdata.get('image', [''])[0].strip()
|
||||
options['cpu_guarantee'] = formdata.get('cpu_guarantee', [''])[0].strip()
|
||||
options['mem_guarantee'] = formdata.get('mem_guarantee', [''])[0].strip()
|
||||
options['extra_resource_limits'] = formdata.get('extra_resource_limits', [''])[0].strip()
|
||||
return options
|
||||
|
||||
@property
|
||||
def singleuser_image_spec(self):
|
||||
image = 'gcr.io/kubeflow/tensorflow-notebook-cpu'
|
||||
if self.user_options.get('image'):
|
||||
image = self.user_options['image']
|
||||
return image
|
||||
|
||||
@property
|
||||
def cpu_guarantee(self):
|
||||
cpu = '500m'
|
||||
if self.user_options.get('cpu_guarantee'):
|
||||
cpu = self.user_options['cpu_guarantee']
|
||||
return cpu
|
||||
|
||||
@property
|
||||
def mem_guarantee(self):
|
||||
mem = '1Gi'
|
||||
if self.user_options.get('mem_guarantee'):
|
||||
mem = self.user_options['mem_guarantee']
|
||||
return mem
|
||||
|
||||
@property
|
||||
def extra_resource_limits(self):
|
||||
extra = ''
|
||||
if self.user_options.get('extra_resource_limits'):
|
||||
extra = json.loads(self.user_options['extra_resource_limits'])
|
||||
return extra
|
||||
|
||||
###################################################
|
||||
### JupyterHub Options
|
||||
###################################################
|
||||
c.JupyterHub.ip = '0.0.0.0'
|
||||
c.JupyterHub.hub_ip = '0.0.0.0'
|
||||
# Don't try to cleanup servers on exit - since in general for k8s, we want
|
||||
# the hub to be able to restart without losing user containers
|
||||
c.JupyterHub.cleanup_servers = False
|
||||
###################################################
|
||||
|
||||
###################################################
|
||||
### Spawner Options
|
||||
###################################################
|
||||
c.JupyterHub.spawner_class = KubeFormSpawner
|
||||
c.KubeSpawner.singleuser_image_spec = 'gcr.io/kubeflow/tensorflow-notebook'
|
||||
c.KubeSpawner.cmd = 'start-singleuser.sh'
|
||||
c.KubeSpawner.args = ['--allow-root']
|
||||
# gpu images are very large ~15GB. need a large timeout.
|
||||
c.KubeSpawner.start_timeout = 60 * 30
|
||||
|
||||
###################################################
|
||||
### Persistent volume options
|
||||
###################################################
|
||||
# Using persistent storage requires a default storage class.
|
||||
# TODO(jlewi): Verify this works on minikube.
|
||||
# TODO(jlewi): Should we set c.KubeSpawner.singleuser_fs_gid = 1000
|
||||
# see https://github.com/kubeflow/kubeflow/pull/22#issuecomment-350500944
|
||||
pvc_mount = os.environ.get('NOTEBOOK_PVC_MOUNT')
|
||||
if pvc_mount and pvc_mount != 'null':
|
||||
c.KubeSpawner.user_storage_pvc_ensure = True
|
||||
# How much disk space do we want?
|
||||
c.KubeSpawner.user_storage_capacity = '10Gi'
|
||||
c.KubeSpawner.pvc_name_template = 'claim-{username}{servername}'
|
||||
c.KubeSpawner.volumes = [
|
||||
{
|
||||
'name': 'volume-{username}{servername}',
|
||||
'persistentVolumeClaim': {
|
||||
'claimName': 'claim-{username}{servername}'
|
||||
}
|
||||
}
|
||||
]
|
||||
c.KubeSpawner.volume_mounts = [
|
||||
{
|
||||
'mountPath': pvc_mount,
|
||||
'name': 'volume-{username}{servername}'
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,302 @@
|
|||
// A ksonnet prototype/component for using NFS.
|
||||
|
||||
{
|
||||
// TODO(https://github.com/ksonnet/ksonnet/issues/222): Taking namespace as an argument is a work around for the fact that ksonnet
|
||||
// doesn't support automatically piping in the namespace from the environment to prototypes.
|
||||
//
|
||||
// Return a list of components needed if you want to mount some disks using NFS.
|
||||
// diskNames should be a list of PDs.
|
||||
all(params):: {
|
||||
local namespace = params.namespace,
|
||||
local name = params.name,
|
||||
local disks = params.disks,
|
||||
|
||||
// Create a list of the resources needed for a particular disk
|
||||
local diskToList = function(diskName) [
|
||||
$.parts(namespace, name,).diskResources(diskName).storageClass,
|
||||
$.parts(namespace, name,).diskResources(diskName).volumeClaim,
|
||||
$.parts(namespace, name,).diskResources(diskName).service,
|
||||
$.parts(namespace, name,).diskResources(diskName).provisioner,
|
||||
],
|
||||
local util = import "kubeflow/core/util.libsonnet",
|
||||
local allDisks = std.flattenArrays(std.map(diskToList, util.toArray(disks))),
|
||||
|
||||
items::
|
||||
if std.length(allDisks) > 0 then
|
||||
[
|
||||
$.parts(namespace, name).serviceAccount,
|
||||
$.parts(namespace, name).role,
|
||||
$.parts(namespace, name).roleBinding,
|
||||
$.parts(namespace, name).clusterRoleBinding,
|
||||
] + allDisks
|
||||
else
|
||||
[],
|
||||
|
||||
}.items,
|
||||
|
||||
// Create a provisioner with the specified name.
|
||||
// disks should be a list GCP persistent disk names; these disks should be in the
|
||||
// same zone as your cluster.
|
||||
// TODO(jlewi):
|
||||
parts(namespace, name):: {
|
||||
|
||||
local serviceAccountName = name,
|
||||
local serviceAccountRoleName = name,
|
||||
|
||||
|
||||
// Create the resources for a specific disk.
|
||||
// Each NFS Provisioner can only manage 1 PD so we need to create one for each disk.
|
||||
diskResources(diskName): {
|
||||
|
||||
local storageClassName = diskName + "-nfs",
|
||||
local provisionerName = diskName + "-provisioner",
|
||||
local storageClassProvisioner = diskName + "/nfs",
|
||||
local serviceName = diskName + "-service",
|
||||
|
||||
volumeClaim: {
|
||||
apiVersion: "v1",
|
||||
kind: "PersistentVolumeClaim",
|
||||
metadata: {
|
||||
annotations: {
|
||||
"volume.beta.kubernetes.io/storage-class": storageClassName,
|
||||
},
|
||||
name: diskName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
accessModes: [
|
||||
"ReadWriteMany",
|
||||
],
|
||||
resources: {
|
||||
requests: {
|
||||
storage: "1Mi",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
// TODO(jlewi): Is storageClass actually name space scoped? Seems to show up in default namespace as well.
|
||||
// TODO(jlewi): Could we just use the default cluster storage class?
|
||||
storageClass: {
|
||||
apiVersion: "storage.k8s.io/v1beta1",
|
||||
kind: "StorageClass",
|
||||
metadata: {
|
||||
name: storageClassName,
|
||||
namespace: namespace,
|
||||
},
|
||||
// This value must be the same as passed as argument --provisioner to the provisioner
|
||||
provisioner: storageClassProvisioner,
|
||||
},
|
||||
|
||||
service: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: provisionerName,
|
||||
},
|
||||
name: serviceName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "nfs",
|
||||
port: 2049,
|
||||
},
|
||||
{
|
||||
name: "mountd",
|
||||
port: 20048,
|
||||
},
|
||||
{
|
||||
name: "rpcbind",
|
||||
port: 111,
|
||||
},
|
||||
{
|
||||
name: "rpcbind-udp",
|
||||
port: 111,
|
||||
protocol: "UDP",
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: provisionerName,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
provisioner: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: provisionerName,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
strategy: {
|
||||
type: "Recreate",
|
||||
},
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: provisionerName,
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
args: [
|
||||
"-provisioner=" + storageClassProvisioner,
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "POD_IP",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
fieldPath: "status.podIP",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "SERVICE_NAME",
|
||||
value: serviceName,
|
||||
},
|
||||
{
|
||||
name: "POD_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.8",
|
||||
imagePullPolicy: "IfNotPresent",
|
||||
name: "nfs-provisioner",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 2049,
|
||||
name: "nfs",
|
||||
},
|
||||
{
|
||||
containerPort: 20048,
|
||||
name: "mountd",
|
||||
},
|
||||
{
|
||||
containerPort: 111,
|
||||
name: "rpcbind",
|
||||
},
|
||||
{
|
||||
containerPort: 111,
|
||||
name: "rpcbind-udp",
|
||||
protocol: "UDP",
|
||||
},
|
||||
],
|
||||
securityContext: {
|
||||
capabilities: {
|
||||
add: [
|
||||
"DAC_READ_SEARCH",
|
||||
],
|
||||
},
|
||||
},
|
||||
volumeMounts: [{
|
||||
// Needs to be mounted under /export because /export is what is exported for NFS.
|
||||
// https://github.com/kubernetes-incubator/external-storage/tree/master/nfs#quickstart
|
||||
mountPath: "/export",
|
||||
name: diskName,
|
||||
}],
|
||||
},
|
||||
],
|
||||
volumes: [{
|
||||
name: diskName,
|
||||
gcePersistentDisk: {
|
||||
pdName: diskName,
|
||||
},
|
||||
}],
|
||||
serviceAccountName: serviceAccountName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // provisioner
|
||||
},
|
||||
|
||||
serviceAccount: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: name + "nfs-provisioner",
|
||||
},
|
||||
name: serviceAccountName,
|
||||
namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
||||
role: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "Role",
|
||||
metadata: {
|
||||
name: serviceAccountRoleName,
|
||||
namespace: namespace,
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"*",
|
||||
],
|
||||
// TODO(jlewi): This is very permissive so we may want to lock this down.
|
||||
resources: [
|
||||
"*",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
roleBinding: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "RoleBinding",
|
||||
metadata: {
|
||||
name: name + "-nfs-role",
|
||||
namespace: namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "Role",
|
||||
name: serviceAccountName,
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: serviceAccountRoleName,
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
// see https://github.com/kubernetes-incubator/external-storage/tree/master/docs#authorizing-provisioners-for-rbac-or-openshift
|
||||
clusterRoleBinding: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
name: name + "-nfs-role",
|
||||
namespace: namespace,
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "system:persistent-volume-provisioner",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: serviceAccountRoleName,
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
}, // parts
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"name": "core",
|
||||
"apiVersion": "0.0.1",
|
||||
"kind": "ksonnet.io/parts",
|
||||
"description": "Core components of Kubeflow.\n",
|
||||
"author": "kubeflow team <kubeflow-team@google.com>",
|
||||
"contributors": [
|
||||
{
|
||||
"name": "Jeremy Lewi",
|
||||
"email": "jlewi@google.com"
|
||||
}
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/kubeflow/kubeflow"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/kubeflow/kubeflow/issues"
|
||||
},
|
||||
"keywords": [
|
||||
"kubeflow",
|
||||
"tensorflow"
|
||||
],
|
||||
"quickStart": {
|
||||
"prototype": "io.ksonnet.pkg.kubeflow",
|
||||
"componentName": "core",
|
||||
"flags": {
|
||||
"name": "core",
|
||||
"namespace": "default",
|
||||
"disks": ""
|
||||
},
|
||||
"comment": "Core Kubeflow components."
|
||||
},
|
||||
"license": "Apache 2.0"
|
||||
}
|
||||
29
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/prototypes/all.jsonnet
vendored
Normal file
29
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/prototypes/all.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.kubeflow-core
|
||||
// @description Kubeflow core components
|
||||
// @shortDescription Kubeflow core components. This currently includes JupyterHub and the TfJob controller.
|
||||
// @param name string Name to give to each of the components
|
||||
// @optionalParam namespace string null Namespace to use for the components. It is automatically inherited from the environment if not set.
|
||||
// @optionalParam disks string null Comma separated list of Google persistent disks to attach to jupyter environments.
|
||||
// @optionalParam cloud string null String identifying the cloud to customize the deployment for.
|
||||
// @optionalParam tfAmbassadorServiceType string ClusterIP The service type for the API Gateway.
|
||||
// @optionalParam tfJobImage string gcr.io/kubeflow-images-staging/tf_operator:v20180329-a7511ff The image for the TfJob controller.
|
||||
// @optionalParam tfDefaultImage string null The default image to use for TensorFlow.
|
||||
// @optionalParam tfJobUiServiceType string ClusterIP The service type for the UI.
|
||||
// @optionalParam jupyterHubServiceType string ClusterIP The service type for Jupyterhub.
|
||||
// @optionalParam jupyterHubImage string gcr.io/kubeflow/jupyterhub-k8s:1.0.1 The image to use for JupyterHub.
|
||||
// @optionalParam jupyterHubAuthenticator string null The authenticator to use
|
||||
// @optionalParam jupyterNotebookPVCMount string null Mount path for PVC. Set empty to disable PVC
|
||||
// @optionalParam reportUsage string false Whether or not to report Kubeflow usage to kubeflow.org.
|
||||
// @optionalParam usageId string unknown_cluster Optional id to use when reporting usage to kubeflow.org
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
local all = import "kubeflow/core/all.libsonnet";
|
||||
|
||||
// updatedParams uses the environment namespace if
|
||||
// the namespace parameter is not explicitly set
|
||||
local updatedParams = params {
|
||||
namespace: if params.namespace == "null" then env.namespace else params.namespace
|
||||
};
|
||||
|
||||
std.prune(k.core.v1.list.new(all.parts(updatedParams).all))
|
||||
26
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/prototypes/cert-manager.jsonnet
vendored
Normal file
26
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/prototypes/cert-manager.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.cert-manager
|
||||
// @description Provides cert-manager prototypes for generating SSL certificates.
|
||||
// @shortDescription Certificate generation on GKE.
|
||||
// @param name string Name for the component
|
||||
// @optionalParam namespace string null Namespace to use for the components. It is automatically inherited from the environment if not set.
|
||||
// @param acmeEmail string The Lets Encrypt account email address
|
||||
// @optionalParam acmeUrl string https://acme-v01.api.letsencrypt.org/directory The ACME server URL, set to https://acme-staging.api.letsencrypt.org/directory for staging API.
|
||||
|
||||
// TODO(https://github.com/ksonnet/ksonnet/issues/222): We have to add namespace as an explicit parameter
|
||||
// because ksonnet doesn't support inheriting it from the environment yet.
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
local certManager = import "kubeflow/core/cert-manager.libsonnet";
|
||||
|
||||
local name = import "param://name";
|
||||
local acmeEmail = import "param://acmeEmail";
|
||||
local acmeUrl = import "param://acmeUrl";
|
||||
|
||||
// updatedParams uses the environment namespace if
|
||||
// the namespace parameter is not explicitly set
|
||||
local updatedParams = params {
|
||||
namespace: if params.namespace == "null" then env.namespace else params.namespace
|
||||
};
|
||||
|
||||
certManager.parts(updatedParams.namespace).certManagerParts(acmeEmail, acmeUrl)
|
||||
36
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/prototypes/iap-ingress.jsonnet
vendored
Normal file
36
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/prototypes/iap-ingress.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.iap-ingress
|
||||
// @description Provides ingress prototypes for setting up IAP on GKE.
|
||||
// @shortDescription Ingress for IAP on GKE.
|
||||
// @param name string Name for the component
|
||||
// @param ipName string The name of the global ip address to use.
|
||||
// @optionalParam namespace string null Namespace to use for the components. It is automatically inherited from the environment if not set.
|
||||
// @optionalParam secretName string envoy-ingress-tls The name of the secret containing the SSL certificates.
|
||||
// @optionalParam hostname string null The hostname associated with this ingress. Eg: mykubeflow.example.com
|
||||
// @optionalParam issuer string letsencrypt-prod The cert-manager issuer name.
|
||||
// @optionalParam envoyImage string gcr.io/kubeflow-images-staging/envoy:v20180309-0fb4886b463698702b6a08955045731903a18738 The image for envoy.
|
||||
// @optionalParam disableJwtChecking string false Disable JWT checking.
|
||||
// @optionalParam oauthSecretName string kubeflow-oauth The name of the secret containing the OAuth CLIENT_ID and CLIENT_SECRET.
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
local iap = import "kubeflow/core/iap.libsonnet";
|
||||
local util = import "kubeflow/core/util.libsonnet";
|
||||
|
||||
// updatedParams uses the environment namespace if
|
||||
// the namespace parameter is not explicitly set
|
||||
local updatedParams = params {
|
||||
namespace: if params.namespace == "null" then env.namespace else params.namespace
|
||||
};
|
||||
|
||||
local name = import "param://name";
|
||||
local namespace = updatedParams.namespace;
|
||||
local secretName = import "param://secretName";
|
||||
local ipName = import "param://ipName";
|
||||
local hostname = import "param://hostname";
|
||||
local issuer = import "param://issuer";
|
||||
local envoyImage = import "param://envoyImage";
|
||||
local disableJwtCheckingParam = import "param://disableJwtChecking";
|
||||
local disableJwtChecking = util.toBool(disableJwtCheckingParam);
|
||||
local oauthSecretName = import "param://oauthSecretName";
|
||||
|
||||
iap.parts(namespace).ingressParts(secretName, ipName, hostname, issuer, envoyImage, disableJwtChecking, oauthSecretName)
|
||||
113
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/spartakus.libsonnet
vendored
Normal file
113
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/spartakus.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,113 @@
|
|||
{
|
||||
local util = import "kubeflow/core/util.libsonnet",
|
||||
|
||||
all(params):: {
|
||||
local reportUsageBool = util.toBool(params.reportUsage),
|
||||
result:: if reportUsageBool then
|
||||
[
|
||||
$.parts(params.namespace).role,
|
||||
$.parts(params.namespace).roleBinding,
|
||||
$.parts(params.namespace).serviceAccount,
|
||||
$.parts(params.namespace).deployment(params.usageId),
|
||||
]
|
||||
else [],
|
||||
}.result,
|
||||
|
||||
parts(namespace):: {
|
||||
|
||||
// Spartakus needs to be able to get information about the cluster in order to create a report.
|
||||
role: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"nodes",
|
||||
],
|
||||
verbs: [
|
||||
"get",
|
||||
"list",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // role
|
||||
|
||||
roleBinding:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "spartakus",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "spartakus",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
}, // operator-role binding
|
||||
|
||||
|
||||
serviceAccount: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus",
|
||||
},
|
||||
name: "spartakus",
|
||||
namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
||||
deployment(usageId):: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "spartakus-volunteer",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "spartakus-volunteer",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: "gcr.io/google_containers/spartakus-amd64:v1.0.0",
|
||||
name: "volunteer",
|
||||
args: [
|
||||
"volunteer",
|
||||
"--cluster-id=" + usageId,
|
||||
"--database=https://stats-collector.kubeflow.org",
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "spartakus",
|
||||
}, // spec
|
||||
},
|
||||
},
|
||||
}, // deployment
|
||||
},
|
||||
}
|
||||
250
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/ambassador_test.jsonnet
vendored
Normal file
250
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/ambassador_test.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,250 @@
|
|||
local ambassador = import "../ambassador.libsonnet";
|
||||
local params = {
|
||||
namespace:: "test-kf-001",
|
||||
tfAmbassadorServiceType:: "ClusterIP",
|
||||
};
|
||||
|
||||
std.assertEqual(
|
||||
ambassador.parts(params.namespace).service(params.tfAmbassadorServiceType),
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"service": "ambassador"
|
||||
},
|
||||
"name": "ambassador",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"name": "ambassador",
|
||||
"port": 80,
|
||||
"targetPort": 80
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"service": "ambassador"
|
||||
},
|
||||
"type": "ClusterIP"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
ambassador.parts(params.namespace).adminService,
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"service": "ambassador-admin"
|
||||
},
|
||||
"name": "ambassador-admin",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"name": "ambassador-admin",
|
||||
"port": 8877,
|
||||
"targetPort": 8877
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"service": "ambassador"
|
||||
},
|
||||
"type": "ClusterIP"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
ambassador.parts(params.namespace).role,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "Role",
|
||||
"metadata": {
|
||||
"name": "ambassador",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups": [
|
||||
""
|
||||
],
|
||||
"resources": [
|
||||
"services"
|
||||
],
|
||||
"verbs": [
|
||||
"get",
|
||||
"list",
|
||||
"watch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"apiGroups": [
|
||||
""
|
||||
],
|
||||
"resources": [
|
||||
"configmaps"
|
||||
],
|
||||
"verbs": [
|
||||
"create",
|
||||
"update",
|
||||
"patch",
|
||||
"get",
|
||||
"list",
|
||||
"watch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"apiGroups": [
|
||||
""
|
||||
],
|
||||
"resources": [
|
||||
"secrets"
|
||||
],
|
||||
"verbs": [
|
||||
"get",
|
||||
"list",
|
||||
"watch"
|
||||
]
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
ambassador.parts(params.namespace).serviceAccount,
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "ambassador",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
ambassador.parts(params.namespace).roleBinding,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "ambassador",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "Role",
|
||||
"name": "ambassador"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "ambassador",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
ambassador.parts(params.namespace).deploy,
|
||||
{
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "ambassador",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 3,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"service": "ambassador"
|
||||
},
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"env": [
|
||||
{
|
||||
"name": "AMBASSADOR_NAMESPACE",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"fieldPath": "metadata.namespace"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "AMBASSADOR_SINGLE_NAMESPACE",
|
||||
"value": "true"
|
||||
}
|
||||
],
|
||||
"image": "quay.io/datawire/ambassador:0.30.1",
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/ambassador/v0/check_alive",
|
||||
"port": 8877
|
||||
},
|
||||
"initialDelaySeconds": 30,
|
||||
"periodSeconds": 30
|
||||
},
|
||||
"name": "ambassador",
|
||||
"readinessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/ambassador/v0/check_ready",
|
||||
"port": 8877
|
||||
},
|
||||
"initialDelaySeconds": 30,
|
||||
"periodSeconds": 30
|
||||
},
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": 1,
|
||||
"memory": "400Mi"
|
||||
},
|
||||
"requests": {
|
||||
"cpu": "200m",
|
||||
"memory": "100Mi"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"image": "quay.io/datawire/statsd:0.30.1",
|
||||
"name": "statsd"
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Always",
|
||||
"serviceAccountName": "ambassador"
|
||||
}
|
||||
}
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
ambassador.parts(params.namespace).k8sDashboard("cloud"),
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"getambassador.io/config": "---\napiVersion: ambassador/v0\nkind: Mapping\nname: k8s-dashboard-ui-mapping\nprefix: /k8s/ui/\nrewrite: /\ntls: true\nservice: kubernetes-dashboard.kube-system"
|
||||
},
|
||||
"name": "k8s-dashboard",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"port": 443,
|
||||
"targetPort": 8443
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"k8s-app": "kubernetes-dashboard"
|
||||
},
|
||||
"type": "ClusterIP"
|
||||
}
|
||||
})
|
||||
203
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/iap_test.jsonnet
vendored
Normal file
203
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/iap_test.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,203 @@
|
|||
local iap = import "../iap.libsonnet";
|
||||
|
||||
std.assertEqual(iap.parts("namespace").service, {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
service: "envoy",
|
||||
},
|
||||
name: "envoy",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
name: "envoy",
|
||||
port: 8080,
|
||||
targetPort: 8080,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
service: "envoy",
|
||||
},
|
||||
type: "NodePort",
|
||||
},
|
||||
}) &&
|
||||
|
||||
std.assertEqual(iap.parts("namespace").ingress("secretName", "ipName", "hostname"), {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Ingress",
|
||||
metadata: {
|
||||
name: "envoy-ingress",
|
||||
namespace: "namespace",
|
||||
annotations: {
|
||||
"kubernetes.io/tls-acme": "true",
|
||||
"ingress.kubernetes.io/ssl-redirect": "true",
|
||||
"kubernetes.io/ingress.global-static-ip-name": "ipName",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
rules: [
|
||||
{
|
||||
host: "hostname",
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
backend: {
|
||||
serviceName: "envoy",
|
||||
servicePort: 8080,
|
||||
},
|
||||
path: "/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
tls: [
|
||||
{
|
||||
secretName: "secretName",
|
||||
},
|
||||
],
|
||||
},
|
||||
}) &&
|
||||
|
||||
std.assertEqual(iap.parts("namespace").ingress("secretName", "ipName", "null"), {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Ingress",
|
||||
metadata: {
|
||||
name: "envoy-ingress",
|
||||
namespace: "namespace",
|
||||
annotations: {
|
||||
"kubernetes.io/tls-acme": "true",
|
||||
"ingress.kubernetes.io/ssl-redirect": "true",
|
||||
"kubernetes.io/ingress.global-static-ip-name": "ipName",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
rules: [
|
||||
{
|
||||
http: {
|
||||
paths: [
|
||||
{
|
||||
backend: {
|
||||
serviceName: "envoy",
|
||||
servicePort: 8080,
|
||||
},
|
||||
path: "/*",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
tls: [
|
||||
{
|
||||
secretName: "secretName",
|
||||
},
|
||||
],
|
||||
},
|
||||
}) &&
|
||||
|
||||
std.assertEqual(iap.parts("namespace").certificate("secretName", "hostname", "issuer"), {
|
||||
apiVersion: "certmanager.k8s.io/v1alpha1",
|
||||
kind: "Certificate",
|
||||
metadata: {
|
||||
name: "secretName",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
secretName: "secretName",
|
||||
issuerRef: {
|
||||
name: "issuer",
|
||||
},
|
||||
commonName: "hostname",
|
||||
dnsNames: [
|
||||
"hostname",
|
||||
],
|
||||
acme: {
|
||||
config: [
|
||||
{
|
||||
http01: {
|
||||
ingress: "envoy-ingress",
|
||||
},
|
||||
domains: [
|
||||
"hostname",
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}) &&
|
||||
|
||||
std.assertEqual(iap.parts("namespace").whoamiApp, {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "whoami-app",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
env: [
|
||||
{
|
||||
name: "PORT",
|
||||
value: "8081",
|
||||
},
|
||||
],
|
||||
image: "gcr.io/cloud-solutions-group/esp-sample-app:1.0.0",
|
||||
name: "app",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8081,
|
||||
},
|
||||
],
|
||||
readinessProbe: {
|
||||
failureThreshold: 2,
|
||||
httpGet: {
|
||||
path: "/healthz",
|
||||
port: 8081,
|
||||
scheme: "HTTP",
|
||||
},
|
||||
periodSeconds: 10,
|
||||
successThreshold: 1,
|
||||
timeoutSeconds: 5,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}) &&
|
||||
|
||||
std.assertEqual(iap.parts("namespace").whoamiService, {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "whoami",
|
||||
},
|
||||
name: "whoami-app",
|
||||
namespace: "namespace",
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8081,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
app: "whoami",
|
||||
},
|
||||
type: "ClusterIP",
|
||||
},
|
||||
})
|
||||
224
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/jupyterhub_test.jsonnet
vendored
Normal file
224
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/jupyterhub_test.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
local jupyterhub = import "../jupyterhub.libsonnet";
|
||||
local params = {
|
||||
namespace:: "test-kf-001",
|
||||
disks:: "disk01,disk02",
|
||||
jupyterHubAuthenticator:: null,
|
||||
jupyterHubServiceType:: "ClusterIP",
|
||||
jupyterHubImage: "gcr.io/kubeflow/jupyterhub-k8s:1.0.1",
|
||||
};
|
||||
|
||||
local baseSpawner= importstr "../jupyterhub_spawner.py";
|
||||
|
||||
// TODO(jlewi): We should be able to use std.startsWidth in later versions of jsonnet.
|
||||
//
|
||||
local config = jupyterhub.parts(params.namespace).jupyterHubConfigMap(params.jupyterHubAuthenticator, params.disks)["data"]["jupyterhub_config.py"];
|
||||
local configPrefix = std.substr(config, 0, std.length(baseSpawner));
|
||||
local configSuffix = std.substr(config, std.length(baseSpawner), std.length(config) - std.length(baseSpawner));
|
||||
local configSuffixLines = std.split(configSuffix, "\n");
|
||||
|
||||
// This assertion varies the config map is the same after zeroing the actual data.
|
||||
// The data will be compared in subsequent steps.
|
||||
std.assertEqual(jupyterhub.parts(params.namespace).jupyterHubConfigMap(params.jupyterHubAuthenticator, params.disks) + {
|
||||
"data": {
|
||||
"jupyterhub_config.py": "",
|
||||
},
|
||||
}
|
||||
, {
|
||||
"apiVersion": "v1",
|
||||
"data": {
|
||||
"jupyterhub_config.py": "",
|
||||
},
|
||||
"kind": "ConfigMap",
|
||||
"metadata": {
|
||||
"name": "jupyterhub-config",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
}) &&
|
||||
|
||||
// This step verifies that the start of the spawner config is the raw file.
|
||||
std.assertEqual(configPrefix, baseSpawner)
|
||||
|
||||
&&
|
||||
|
||||
// These step verifies the suffix.
|
||||
// Verifying each line makes it much easier to debug test failures because if you just compare to a big blob
|
||||
// of text its much harder to know where they differ.
|
||||
std.assertEqual(configSuffixLines[1], "######## Authenticator ######")
|
||||
&&
|
||||
std.assertEqual(configSuffixLines[2], "c.JupyterHub.authenticator_class = 'dummyauthenticator.DummyAuthenticator'")
|
||||
&&
|
||||
std.assertEqual(configSuffixLines[3], "###### Volumes #######")
|
||||
&&
|
||||
std.assertEqual(configSuffixLines[4], 'c.KubeSpawner.volumes = [{"name": "disk01", "persistentVolumeClaim": {"claimName": "disk01"}}, {"name": "disk02", "persistentVolumeClaim": {"claimName": "disk02"}}]')
|
||||
&&
|
||||
std.assertEqual(configSuffixLines[5], 'c.KubeSpawner.volume_mounts = [{"mountPath": "/mnt/disk01", "name": "disk01"}, {"mountPath": "/mnt/disk02", "name": "disk02"}]')
|
||||
&&
|
||||
|
||||
std.assertEqual(jupyterhub.parts(params.namespace).jupyterHubService,
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "tf-hub"
|
||||
},
|
||||
"name": "tf-hub-0",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"clusterIP": "None",
|
||||
"ports": [
|
||||
{
|
||||
"name": "hub",
|
||||
"port": 8000
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "tf-hub"
|
||||
}
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(jupyterhub.parts(params.namespace).jupyterHubLoadBalancer(params.jupyterHubServiceType),
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "tf-hub-lb"
|
||||
},
|
||||
"name": "tf-hub-lb",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"name": "hub",
|
||||
"port": 80,
|
||||
"targetPort": 8000
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "tf-hub"
|
||||
},
|
||||
"type": "ClusterIP"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(jupyterhub.parts(params.namespace).jupyterHub(params.jupyterHubImage),
|
||||
{
|
||||
"apiVersion": "apps/v1beta1",
|
||||
"kind": "StatefulSet",
|
||||
"metadata": {
|
||||
"name": "tf-hub",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"serviceName": "",
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "tf-hub"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"command": [
|
||||
"jupyterhub",
|
||||
"-f",
|
||||
"/etc/config/jupyterhub_config.py"
|
||||
],
|
||||
"image": "gcr.io/kubeflow/jupyterhub-k8s:1.0.1",
|
||||
"name": "tf-hub",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8000
|
||||
},
|
||||
{
|
||||
"containerPort": 8081
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"mountPath": "/etc/config",
|
||||
"name": "config-volume"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"serviceAccountName": "jupyter-hub",
|
||||
"volumes": [
|
||||
{
|
||||
"configMap": {
|
||||
"name": "jupyterhub-config"
|
||||
},
|
||||
"name": "config-volume"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"updateStrategy": {
|
||||
"type": "RollingUpdate"
|
||||
}
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(jupyterhub.parts(params.namespace).jupyterHubRole,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "Role",
|
||||
"metadata": {
|
||||
"name": "jupyter-role",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups": [
|
||||
"*"
|
||||
],
|
||||
"resources": [
|
||||
"*"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(jupyterhub.parts(params.namespace).jupyterHubServiceAccount,
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "jupyter-hub"
|
||||
},
|
||||
"name": "jupyter-hub",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(jupyterhub.parts(params.namespace).jupyterHubRoleBinding,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "jupyter-role",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "Role",
|
||||
"name": "jupyter-role"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "jupyter-hub",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
]
|
||||
})
|
||||
89
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/nfs_test.jsonnet
vendored
Normal file
89
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/nfs_test.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
local nfs = import "../nfs.libsonnet";
|
||||
local params = {
|
||||
namespace:: "test-kf-001",
|
||||
name:: "nfs",
|
||||
};
|
||||
|
||||
std.assertEqual(
|
||||
nfs.parts(params.namespace, params.name).serviceAccount,
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "nfsnfs-provisioner"
|
||||
},
|
||||
"name": "nfs",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
nfs.parts(params.namespace, params.name).role,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "Role",
|
||||
"metadata": {
|
||||
"name": "nfs",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups": [
|
||||
"*"
|
||||
],
|
||||
"resources": [
|
||||
"*"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
nfs.parts(params.namespace, params.name).roleBinding,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "RoleBinding",
|
||||
"metadata": {
|
||||
"name": "nfs-nfs-role",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "Role",
|
||||
"name": "nfs"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "nfs",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
nfs.parts(params.namespace, params.name).clusterRoleBinding,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "ClusterRoleBinding",
|
||||
"metadata": {
|
||||
"name": "nfs-nfs-role",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "system:persistent-volume-provisioner"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "nfs",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
]
|
||||
})
|
||||
106
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/spartakus_test.jsonnet
vendored
Normal file
106
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/spartakus_test.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
local spartakus = import "../spartakus.libsonnet";
|
||||
local params = {
|
||||
namespace:: "test-kf-001",
|
||||
usageId:: "unknown_cluster",
|
||||
};
|
||||
|
||||
std.assertEqual(
|
||||
spartakus.parts(params.namespace).role,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "ClusterRole",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "spartakus"
|
||||
},
|
||||
"name": "spartakus"
|
||||
},
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups": [
|
||||
""
|
||||
],
|
||||
"resources": [
|
||||
"nodes"
|
||||
],
|
||||
"verbs": [
|
||||
"get",
|
||||
"list"
|
||||
]
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
spartakus.parts(params.namespace).roleBinding,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "ClusterRoleBinding",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "spartakus"
|
||||
},
|
||||
"name": "spartakus"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "spartakus"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "spartakus",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
spartakus.parts(params.namespace).serviceAccount,
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "spartakus"
|
||||
},
|
||||
"name": "spartakus",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
spartakus.parts(params.namespace).deployment(params.usageId),
|
||||
{
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "spartakus-volunteer",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "spartakus-volunteer"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"args": [
|
||||
"volunteer",
|
||||
"--cluster-id=unknown_cluster",
|
||||
"--database=https://stats-collector.kubeflow.org"
|
||||
],
|
||||
"image": "gcr.io/google_containers/spartakus-amd64:v1.0.0",
|
||||
"name": "volunteer"
|
||||
}
|
||||
],
|
||||
"serviceAccountName": "spartakus"
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
235
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/tf-job_test.jsonnet
vendored
Normal file
235
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/tf-job_test.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,235 @@
|
|||
local tfjob = import "../tf-job.libsonnet";
|
||||
local params = {
|
||||
namespace:: "test-kf-001",
|
||||
cloud:: "azure",
|
||||
tfJobImage:: "gcr.io/kubeflow-images-staging/tf_operator:v20180226-403",
|
||||
tfDefaultImage:: "null",
|
||||
};
|
||||
|
||||
std.assertEqual(
|
||||
tfjob.parts(params.namespace).tfJobDeploy(params.tfJobImage),
|
||||
{
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "tf-job-operator",
|
||||
"namespace": "test-kf-001"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"name": "tf-job-operator"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"command": [
|
||||
"/opt/mlkube/tf-operator",
|
||||
"--controller-config-file=/etc/config/controller_config_file.yaml",
|
||||
"--alsologtostderr",
|
||||
"-v=1"
|
||||
],
|
||||
"env": [
|
||||
{
|
||||
"name": "MY_POD_NAMESPACE",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"fieldPath": "metadata.namespace"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "MY_POD_NAME",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"fieldPath": "metadata.name"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"image": "gcr.io/kubeflow-images-staging/tf_operator:v20180226-403",
|
||||
"name": "tf-job-operator",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"mountPath": "/etc/config",
|
||||
"name": "config-volume"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"serviceAccountName": "tf-job-operator",
|
||||
"volumes": [
|
||||
{
|
||||
"configMap": {
|
||||
"name": "tf-job-operator-config"
|
||||
},
|
||||
"name": "config-volume"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
tfjob.parts(params.namespace).configMap(params.cloud, params.tfDefaultImage),
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"data": {
|
||||
"controller_config_file.yaml": "{\n \"grpcServerFilePath\": \"/opt/mlkube/grpc_tensorflow_server/grpc_tensorflow_server.py\"\n}"
|
||||
},
|
||||
"kind": "ConfigMap",
|
||||
"metadata": {
|
||||
"name": "tf-job-operator-config",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
tfjob.parts(params.namespace).serviceAccount,
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "tf-job-operator"
|
||||
},
|
||||
"name": "tf-job-operator",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
tfjob.parts(params.namespace).operatorRole,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "ClusterRole",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "tf-job-operator"
|
||||
},
|
||||
"name": "tf-job-operator"
|
||||
},
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups": [
|
||||
"tensorflow.org",
|
||||
"kubeflow.org"
|
||||
],
|
||||
"resources": [
|
||||
"tfjobs"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"apiGroups": [
|
||||
"apiextensions.k8s.io"
|
||||
],
|
||||
"resources": [
|
||||
"customresourcedefinitions"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"apiGroups": [
|
||||
"storage.k8s.io"
|
||||
],
|
||||
"resources": [
|
||||
"storageclasses"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"apiGroups": [
|
||||
"batch"
|
||||
],
|
||||
"resources": [
|
||||
"jobs"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"apiGroups": [
|
||||
""
|
||||
],
|
||||
"resources": [
|
||||
"configmaps",
|
||||
"pods",
|
||||
"services",
|
||||
"endpoints",
|
||||
"persistentvolumeclaims",
|
||||
"events"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"apiGroups": [
|
||||
"apps",
|
||||
"extensions"
|
||||
],
|
||||
"resources": [
|
||||
"deployments"
|
||||
],
|
||||
"verbs": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
tfjob.parts(params.namespace).operatorRoleBinding,
|
||||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
|
||||
"kind": "ClusterRoleBinding",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "tf-job-operator"
|
||||
},
|
||||
"name": "tf-job-operator"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "tf-job-operator"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "tf-job-operator",
|
||||
"namespace": "test-kf-001"
|
||||
}
|
||||
]
|
||||
}) &&
|
||||
|
||||
std.assertEqual(
|
||||
tfjob.parts(params.namespace).crd,
|
||||
{
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
"kind": "CustomResourceDefinition",
|
||||
"metadata": {
|
||||
"name": "tfjobs.kubeflow.org"
|
||||
},
|
||||
"spec": {
|
||||
"group": "kubeflow.org",
|
||||
"names": {
|
||||
"kind": "TFJob",
|
||||
"plural": "tfjobs",
|
||||
"singular": "tfjob"
|
||||
},
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
})
|
||||
22
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/util_test.jsonnet
vendored
Normal file
22
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tests/util_test.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
local util = import "../util.libsonnet";
|
||||
|
||||
std.assertEqual(util.upper("True"), "TRUE") &&
|
||||
std.assertEqual(util.upper("TrUe"), "TRUE") &&
|
||||
std.assertEqual(util.upper("true"), "TRUE") &&
|
||||
std.assertEqual(util.upper("TRUE"), "TRUE") &&
|
||||
std.assertEqual(util.toBool(false), false) &&
|
||||
std.assertEqual(util.toBool(true), true) &&
|
||||
std.assertEqual(util.toBool("true"), true) &&
|
||||
std.assertEqual(util.toBool("True"), true) &&
|
||||
std.assertEqual(util.toBool("TRUE"), true) &&
|
||||
std.assertEqual(util.toBool("false"), false) &&
|
||||
std.assertEqual(util.toBool("False"), false) &&
|
||||
std.assertEqual(util.toBool("FALSE"), false) &&
|
||||
std.assertEqual(util.toBool("random string"), false) &&
|
||||
std.assertEqual(util.toBool(1), true) &&
|
||||
std.assertEqual(util.toBool(0), false) &&
|
||||
std.assertEqual(util.toBool(123), true) &&
|
||||
std.assertEqual(std.length(util.toArray("a,b,c,d")), 4) &&
|
||||
std.assertEqual(std.length(util.toArray(2)), 0) &&
|
||||
std.assertEqual(std.length(util.toArray("hello world")), 1) &&
|
||||
std.assertEqual(std.length(util.toArray([1, 2, 3, 4])), 0)
|
||||
479
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tf-job.libsonnet
vendored
Normal file
479
github_issue_summarization/ks-kubeflow/vendor/kubeflow/core/tf-job.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,479 @@
|
|||
{
|
||||
all(params):: [
|
||||
$.parts(params.namespace).tfJobDeploy(params.tfJobImage),
|
||||
$.parts(params.namespace).configMap(params.cloud, params.tfDefaultImage),
|
||||
$.parts(params.namespace).serviceAccount,
|
||||
$.parts(params.namespace).operatorRole,
|
||||
$.parts(params.namespace).operatorRoleBinding,
|
||||
$.parts(params.namespace).crd,
|
||||
$.parts(params.namespace).uiRole,
|
||||
$.parts(params.namespace).uiRoleBinding,
|
||||
$.parts(params.namespace).uiService(params.tfJobUiServiceType),
|
||||
$.parts(params.namespace).uiServiceAccount,
|
||||
$.parts(params.namespace).ui(params.tfJobImage)
|
||||
],
|
||||
|
||||
parts(namespace):: {
|
||||
crd: {
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "tfjobs.kubeflow.org",
|
||||
},
|
||||
spec: {
|
||||
group: "kubeflow.org",
|
||||
version: "v1alpha1",
|
||||
names: {
|
||||
kind: "TFJob",
|
||||
singular: "tfjob",
|
||||
plural: "tfjobs",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
tfJobDeploy(image): {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "tf-job-operator",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
replicas: 1,
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
name: "tf-job-operator",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
command: [
|
||||
"/opt/mlkube/tf-operator",
|
||||
"--controller-config-file=/etc/config/controller_config_file.yaml",
|
||||
"--alsologtostderr",
|
||||
"-v=1",
|
||||
],
|
||||
env: [
|
||||
{
|
||||
name: "MY_POD_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
fieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "MY_POD_NAME",
|
||||
valueFrom: {
|
||||
fieldRef: {
|
||||
fieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
image: image,
|
||||
name: "tf-job-operator",
|
||||
volumeMounts: [
|
||||
{
|
||||
mountPath: "/etc/config",
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "tf-job-operator",
|
||||
volumes: [
|
||||
{
|
||||
configMap: {
|
||||
name: "tf-job-operator-config",
|
||||
},
|
||||
name: "config-volume",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // tfJobDeploy
|
||||
|
||||
// Default value for
|
||||
defaultControllerConfig(tfDefaultImage):: {
|
||||
grpcServerFilePath: "/opt/mlkube/grpc_tensorflow_server/grpc_tensorflow_server.py",
|
||||
}
|
||||
+ if tfDefaultImage != "" && tfDefaultImage != "null" then
|
||||
{
|
||||
tfImage: tfDefaultImage,
|
||||
}
|
||||
else
|
||||
{},
|
||||
|
||||
aksAccelerators:: {
|
||||
accelerators: {
|
||||
"alpha.kubernetes.io/nvidia-gpu": {
|
||||
volumes: [
|
||||
{
|
||||
name: "lib",
|
||||
mountPath: "/usr/local/nvidia/lib64",
|
||||
hostPath: "/usr/lib/nvidia-384",
|
||||
},
|
||||
{
|
||||
name: "bin",
|
||||
mountPath: "/usr/local/nvidia/bin",
|
||||
hostPath: "/usr/lib/nvidia-384/bin",
|
||||
},
|
||||
{
|
||||
name: "libcuda",
|
||||
mountPath: "/usr/lib/x86_64-linux-gnu/libcuda.so.1",
|
||||
hostPath: "/usr/lib/x86_64-linux-gnu/libcuda.so.1",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
acsEngineAccelerators:: {
|
||||
accelerators: {
|
||||
"alpha.kubernetes.io/nvidia-gpu": {
|
||||
volumes: [
|
||||
{
|
||||
name: "nvidia",
|
||||
mountPath: "/usr/local/nvidia",
|
||||
hostPath: "/usr/local/nvidia",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
configData(cloud, tfDefaultImage):: self.defaultControllerConfig(tfDefaultImage) +
|
||||
if cloud == "aks" then
|
||||
self.aksAccelerators
|
||||
else if cloud == "acsengine" then
|
||||
self.acsEngineAccelerators
|
||||
else
|
||||
{},
|
||||
|
||||
configMap(cloud, tfDefaultImage): {
|
||||
apiVersion: "v1",
|
||||
data: {
|
||||
"controller_config_file.yaml": std.manifestJson($.parts(namespace).configData(cloud, tfDefaultImage)),
|
||||
},
|
||||
kind: "ConfigMap",
|
||||
metadata: {
|
||||
name: "tf-job-operator-config",
|
||||
namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
||||
serviceAccount: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-job-operator",
|
||||
},
|
||||
name: "tf-job-operator",
|
||||
namespace: namespace,
|
||||
},
|
||||
},
|
||||
|
||||
operatorRole: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-job-operator",
|
||||
},
|
||||
name: "tf-job-operator",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"tensorflow.org",
|
||||
"kubeflow.org",
|
||||
],
|
||||
resources: [
|
||||
"tfjobs",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"apiextensions.k8s.io",
|
||||
],
|
||||
resources: [
|
||||
"customresourcedefinitions",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"storage.k8s.io",
|
||||
],
|
||||
resources: [
|
||||
"storageclasses",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"batch",
|
||||
],
|
||||
resources: [
|
||||
"jobs",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"configmaps",
|
||||
"pods",
|
||||
"services",
|
||||
"endpoints",
|
||||
"persistentvolumeclaims",
|
||||
"events",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"apps",
|
||||
"extensions",
|
||||
],
|
||||
resources: [
|
||||
"deployments",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // operator-role
|
||||
|
||||
operatorRoleBinding:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-job-operator",
|
||||
},
|
||||
name: "tf-job-operator",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "tf-job-operator",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "tf-job-operator",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
}, // operator-role binding
|
||||
|
||||
uiService(serviceType):: {
|
||||
apiVersion: "v1",
|
||||
kind: "Service",
|
||||
metadata: {
|
||||
name: "tf-job-dashboard",
|
||||
namespace: namespace,
|
||||
annotations: {
|
||||
"getambassador.io/config":
|
||||
std.join("\n", [
|
||||
"---",
|
||||
"apiVersion: ambassador/v0",
|
||||
"kind: Mapping",
|
||||
"name: tfjobs-ui-mapping",
|
||||
"prefix: /tfjobs/",
|
||||
"rewrite: /tfjobs/",
|
||||
"service: tf-job-dashboard." + namespace,
|
||||
]),
|
||||
}, //annotations
|
||||
},
|
||||
spec: {
|
||||
ports: [
|
||||
{
|
||||
port: 80,
|
||||
targetPort: 8080,
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
name: "tf-job-dashboard",
|
||||
},
|
||||
type: serviceType,
|
||||
},
|
||||
}, // uiService
|
||||
|
||||
uiServiceAccount: {
|
||||
apiVersion: "v1",
|
||||
kind: "ServiceAccount",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-job-dashboard",
|
||||
},
|
||||
name: "tf-job-dashboard",
|
||||
namespace: namespace,
|
||||
},
|
||||
}, // uiServiceAccount
|
||||
|
||||
ui(image):: {
|
||||
apiVersion: "extensions/v1beta1",
|
||||
kind: "Deployment",
|
||||
metadata: {
|
||||
name: "tf-job-dashboard",
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
template: {
|
||||
metadata: {
|
||||
labels: {
|
||||
name: "tf-job-dashboard",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
command: [
|
||||
"/opt/tensorflow_k8s/dashboard/backend",
|
||||
],
|
||||
image: image,
|
||||
name: "tf-job-dashboard",
|
||||
ports: [
|
||||
{
|
||||
containerPort: 8080,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
serviceAccountName: "tf-job-dashboard",
|
||||
},
|
||||
},
|
||||
},
|
||||
}, // ui
|
||||
|
||||
uiRole:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRole",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-job-dashboard",
|
||||
},
|
||||
name: "tf-job-dashboard",
|
||||
},
|
||||
rules: [
|
||||
{
|
||||
apiGroups: [
|
||||
"tensorflow.org",
|
||||
"kubeflow.org",
|
||||
],
|
||||
resources: [
|
||||
"tfjobs",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"apiextensions.k8s.io",
|
||||
],
|
||||
resources: [
|
||||
"customresourcedefinitions",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"storage.k8s.io",
|
||||
],
|
||||
resources: [
|
||||
"storageclasses",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"batch",
|
||||
],
|
||||
resources: [
|
||||
"jobs",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"",
|
||||
],
|
||||
resources: [
|
||||
"configmaps",
|
||||
"pods",
|
||||
"services",
|
||||
"endpoints",
|
||||
"persistentvolumeclaims",
|
||||
"events",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
{
|
||||
apiGroups: [
|
||||
"apps",
|
||||
"extensions",
|
||||
],
|
||||
resources: [
|
||||
"deployments",
|
||||
],
|
||||
verbs: [
|
||||
"*",
|
||||
],
|
||||
},
|
||||
],
|
||||
}, // uiRole
|
||||
|
||||
uiRoleBinding:: {
|
||||
apiVersion: "rbac.authorization.k8s.io/v1beta1",
|
||||
kind: "ClusterRoleBinding",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "tf-job-dashboard",
|
||||
},
|
||||
name: "tf-job-dashboard",
|
||||
},
|
||||
roleRef: {
|
||||
apiGroup: "rbac.authorization.k8s.io",
|
||||
kind: "ClusterRole",
|
||||
name: "tf-job-dashboard",
|
||||
},
|
||||
subjects: [
|
||||
{
|
||||
kind: "ServiceAccount",
|
||||
name: "tf-job-dashboard",
|
||||
namespace: namespace,
|
||||
},
|
||||
],
|
||||
}, // uiRoleBinding
|
||||
},
|
||||
}
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
// Some useful routines.
|
||||
{
|
||||
// Convert a string to upper case.
|
||||
upper:: function(x) {
|
||||
local cp(c) = std.codepoint(c),
|
||||
local upLetter(c) = if cp(c) >= 97 && cp(c) < 123 then
|
||||
std.char(cp(c) - 32)
|
||||
else c,
|
||||
result:: std.join("", std.map(upLetter, std.stringChars(x))),
|
||||
}.result,
|
||||
|
||||
// Convert non-boolean types like string,number to a boolean.
|
||||
// This is primarily intended for dealing with parameters that should be booleans.
|
||||
toBool:: function(x) {
|
||||
result::
|
||||
if std.type(x) == "boolean" then
|
||||
x
|
||||
else if std.type(x) == "string" then
|
||||
$.upper(x) == "TRUE"
|
||||
else if std.type(x) == "number" then
|
||||
x != 0
|
||||
else
|
||||
false,
|
||||
}.result,
|
||||
|
||||
// Convert a comma-delimited string to an Array
|
||||
toArray:: function(str) {
|
||||
result::
|
||||
if std.type(str) == "string" && str != "null" && std.length(str) > 0 then
|
||||
std.split(str, ",")
|
||||
else [],
|
||||
}.result,
|
||||
}
|
||||
96
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/core.libsonnet
vendored
Normal file
96
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/core.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
local k = import "k.libsonnet";
|
||||
local deployment = k.extensions.v1beta1.deployment;
|
||||
local container = k.apps.v1beta1.deployment.mixin.spec.template.spec.containersType;
|
||||
local service = k.core.v1.service.mixin;
|
||||
local serviceAccountMixin = k.core.v1.serviceAccount.mixin;
|
||||
local clusterRoleBindingMixin = k.rbac.v1beta1.clusterRoleBinding.mixin;
|
||||
local clusterRoleBinding = k.rbac.v1beta1.clusterRoleBinding;
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
local baseApife = import "json/apife-deployment.json";
|
||||
local apifeService = import "json/apife-service.json";
|
||||
local operatorDeployment = import "json/operator-deployment.json";
|
||||
local redisDeployment = import "json/redis-deployment.json";
|
||||
local redisService = import "json/redis-service.json";
|
||||
local rbacServiceAccount = import "json/rbac-service-account.json";
|
||||
local rbacClusterRoleBinding = import "json/rbac-cluster-binding.json";
|
||||
local crdDefn = import "crd.libsonnet";
|
||||
|
||||
{
|
||||
parts(namespace):: {
|
||||
|
||||
apife(apifeImage, withRbac)::
|
||||
|
||||
local c = baseApife.spec.template.spec.containers[0] +
|
||||
container.withImage(apifeImage) +
|
||||
container.withImagePullPolicy("IfNotPresent");
|
||||
|
||||
local apiFeBase =
|
||||
baseApife +
|
||||
deployment.mixin.metadata.withNamespace(namespace) +
|
||||
deployment.mixin.spec.template.spec.withContainers([c]);
|
||||
|
||||
if withRbac == "true" then
|
||||
apiFeBase +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName("seldon")
|
||||
else
|
||||
apiFeBase,
|
||||
|
||||
|
||||
apifeService(serviceType)::
|
||||
|
||||
apifeService +
|
||||
service.metadata.withNamespace(namespace) +
|
||||
service.spec.withType(serviceType),
|
||||
|
||||
deploymentOperator(engineImage, clusterManagerImage, springOpts, javaOpts, withRbac):
|
||||
local env = [
|
||||
{ name: "JAVA_OPTS", value: javaOpts },
|
||||
{ name: "SPRING_OPTS", value: springOpts },
|
||||
{ name: "ENGINE_CONTAINER_IMAGE_AND_VERSION", value: engineImage },
|
||||
];
|
||||
|
||||
local c = operatorDeployment.spec.template.spec.containers[0] +
|
||||
container.withImage(clusterManagerImage) +
|
||||
container.withEnvMixin(env) +
|
||||
container.withImagePullPolicy("IfNotPresent");
|
||||
|
||||
local depOp = operatorDeployment +
|
||||
deployment.mixin.metadata.withNamespace(namespace) +
|
||||
deployment.mixin.spec.template.spec.withContainers([c]);
|
||||
|
||||
if withRbac == "true" then
|
||||
depOp +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName("seldon")
|
||||
else
|
||||
depOp,
|
||||
|
||||
redisDeployment():
|
||||
|
||||
redisDeployment +
|
||||
deployment.mixin.metadata.withNamespace(namespace),
|
||||
|
||||
redisService():
|
||||
|
||||
redisService +
|
||||
service.metadata.withNamespace(namespace),
|
||||
|
||||
rbacServiceAccount():
|
||||
|
||||
rbacServiceAccount +
|
||||
serviceAccountMixin.metadata.withNamespace(namespace),
|
||||
|
||||
rbacClusterRoleBinding():
|
||||
|
||||
local subject = rbacClusterRoleBinding.subjects[0]
|
||||
{ namespace: namespace };
|
||||
|
||||
rbacClusterRoleBinding +
|
||||
clusterRoleBindingMixin.metadata.withNamespace(namespace) +
|
||||
clusterRoleBinding.withSubjects([subject]),
|
||||
|
||||
crd():
|
||||
|
||||
crdDefn.crd(),
|
||||
|
||||
}, // parts
|
||||
}
|
||||
254
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/crd.libsonnet
vendored
Normal file
254
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/crd.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,254 @@
|
|||
local podTemplateValidation = import "json/pod-template-spec-validation.json";
|
||||
local k = import "k.libsonnet";
|
||||
|
||||
{
|
||||
crd()::
|
||||
{
|
||||
apiVersion: "apiextensions.k8s.io/v1beta1",
|
||||
kind: "CustomResourceDefinition",
|
||||
metadata: {
|
||||
name: "seldondeployments.machinelearning.seldon.io",
|
||||
},
|
||||
spec: {
|
||||
group: "machinelearning.seldon.io",
|
||||
names: {
|
||||
kind: "SeldonDeployment",
|
||||
plural: "seldondeployments",
|
||||
shortNames: [
|
||||
"sdep",
|
||||
],
|
||||
singular: "seldondeployment",
|
||||
},
|
||||
scope: "Namespaced",
|
||||
validation: {
|
||||
openAPIV3Schema: {
|
||||
properties: {
|
||||
spec: {
|
||||
properties: {
|
||||
annotations: {
|
||||
description: "The annotations to be updated to a deployment",
|
||||
type: "object",
|
||||
},
|
||||
name: {
|
||||
type: "string",
|
||||
},
|
||||
oauth_key: {
|
||||
type: "string",
|
||||
},
|
||||
oauth_secret: {
|
||||
type: "string",
|
||||
},
|
||||
predictors: {
|
||||
description: "List of predictors belonging to the deployment",
|
||||
items: {
|
||||
properties: {
|
||||
annotations: {
|
||||
description: "The annotations to be updated to a predictor",
|
||||
type: "object",
|
||||
},
|
||||
graph: {
|
||||
properties: {
|
||||
children: {
|
||||
items: {
|
||||
properties: {
|
||||
children: {
|
||||
items: {
|
||||
properties: {
|
||||
children: {
|
||||
items: {},
|
||||
type: "array",
|
||||
},
|
||||
endpoint: {
|
||||
properties: {
|
||||
service_host: {
|
||||
type: "string",
|
||||
},
|
||||
service_port: {
|
||||
type: "integer",
|
||||
},
|
||||
type: {
|
||||
enum: [
|
||||
"REST",
|
||||
"GRPC",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
name: {
|
||||
type: "string",
|
||||
},
|
||||
implementation: {
|
||||
enum: [
|
||||
"UNKNOWN_IMPLEMENTATION",
|
||||
"SIMPLE_MODEL",
|
||||
"SIMPLE_ROUTER",
|
||||
"RANDOM_ABTEST",
|
||||
"AVERAGE_COMBINER",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
type: {
|
||||
enum: [
|
||||
"UNKNOWN_TYPE",
|
||||
"ROUTER",
|
||||
"COMBINER",
|
||||
"MODEL",
|
||||
"TRANSFORMER",
|
||||
"OUTPUT_TRANSFORMER",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
methods: {
|
||||
type: "array",
|
||||
items: {
|
||||
enum: [
|
||||
"TRANSFORM_INPUT",
|
||||
"TRANSFORM_OUTPUT",
|
||||
"ROUTE",
|
||||
"AGGREGATE",
|
||||
"SEND_FEEDBACK",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
type: "array",
|
||||
},
|
||||
endpoint: {
|
||||
properties: {
|
||||
service_host: {
|
||||
type: "string",
|
||||
},
|
||||
service_port: {
|
||||
type: "integer",
|
||||
},
|
||||
type: {
|
||||
enum: [
|
||||
"REST",
|
||||
"GRPC",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
name: {
|
||||
type: "string",
|
||||
},
|
||||
implementation: {
|
||||
enum: [
|
||||
"UNKNOWN_IMPLEMENTATION",
|
||||
"SIMPLE_MODEL",
|
||||
"SIMPLE_ROUTER",
|
||||
"RANDOM_ABTEST",
|
||||
"AVERAGE_COMBINER",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
type: {
|
||||
enum: [
|
||||
"UNKNOWN_TYPE",
|
||||
"ROUTER",
|
||||
"COMBINER",
|
||||
"MODEL",
|
||||
"TRANSFORMER",
|
||||
"OUTPUT_TRANSFORMER",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
methods: {
|
||||
type: "array",
|
||||
items: {
|
||||
enum: [
|
||||
"TRANSFORM_INPUT",
|
||||
"TRANSFORM_OUTPUT",
|
||||
"ROUTE",
|
||||
"AGGREGATE",
|
||||
"SEND_FEEDBACK",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
type: "array",
|
||||
},
|
||||
endpoint: {
|
||||
properties: {
|
||||
service_host: {
|
||||
type: "string",
|
||||
},
|
||||
service_port: {
|
||||
type: "integer",
|
||||
},
|
||||
type: {
|
||||
enum: [
|
||||
"REST",
|
||||
"GRPC",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
name: {
|
||||
type: "string",
|
||||
},
|
||||
implementation: {
|
||||
enum: [
|
||||
"UNKNOWN_IMPLEMENTATION",
|
||||
"SIMPLE_MODEL",
|
||||
"SIMPLE_ROUTER",
|
||||
"RANDOM_ABTEST",
|
||||
"AVERAGE_COMBINER",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
type: {
|
||||
enum: [
|
||||
"UNKNOWN_TYPE",
|
||||
"ROUTER",
|
||||
"COMBINER",
|
||||
"MODEL",
|
||||
"TRANSFORMER",
|
||||
"OUTPUT_TRANSFORMER",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
methods: {
|
||||
type: "array",
|
||||
items: {
|
||||
enum: [
|
||||
"TRANSFORM_INPUT",
|
||||
"TRANSFORM_OUTPUT",
|
||||
"ROUTE",
|
||||
"AGGREGATE",
|
||||
"SEND_FEEDBACK",
|
||||
],
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
name: {
|
||||
type: "string",
|
||||
},
|
||||
replicas: {
|
||||
type: "integer",
|
||||
},
|
||||
},
|
||||
},
|
||||
type: "array",
|
||||
},
|
||||
componentSpec: podTemplateValidation,
|
||||
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
version: "v1alpha1",
|
||||
},
|
||||
},
|
||||
|
||||
}
|
||||
52
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/apife-deployment.json
vendored
Normal file
52
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/apife-deployment.json
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
{
|
||||
"apiVersion": "extensions/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"name": "seldon-apiserver"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"template": {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"prometheus.io/path": "/prometheus",
|
||||
"prometheus.io/port": "8080",
|
||||
"prometheus.io/scrape": "true"
|
||||
},
|
||||
"labels": {
|
||||
"app": "seldon-apiserver-container-app",
|
||||
"version": "1"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"env": [
|
||||
{
|
||||
"name": "SELDON_ENGINE_KAFKA_SERVER",
|
||||
"value": "kafka:9092"
|
||||
},
|
||||
{
|
||||
"name": "SELDON_CLUSTER_MANAGER_REDIS_HOST",
|
||||
"value": "redis"
|
||||
}
|
||||
],
|
||||
"image": "seldonio/apife:{{ .Values.apife.image.tag }}",
|
||||
"imagePullPolicy": "{{ .Values.apife.image.pull_policy }}",
|
||||
"name": "seldon-apiserver-container",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080,
|
||||
"protocol": "TCP"
|
||||
},
|
||||
{
|
||||
"containerPort": 5000,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
34
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/apife-service.json
vendored
Normal file
34
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/apife-service.json
vendored
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"app": "seldon-apiserver-container-app"
|
||||
},
|
||||
"name": "seldon-apiserver"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"name": "http",
|
||||
"port": 8080,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 8080
|
||||
},
|
||||
{
|
||||
"name": "grpc",
|
||||
"port": 5000,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 5000
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "seldon-apiserver-container-app"
|
||||
},
|
||||
"sessionAffinity": "None",
|
||||
"type": "{{ .Values.apife_service_type }}"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
||||
73
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/operator-deployment.json
vendored
Normal file
73
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/operator-deployment.json
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
{
|
||||
"kind": "Deployment",
|
||||
"apiVersion": "apps/v1beta1",
|
||||
"metadata": {
|
||||
"name": "seldon-cluster-manager",
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"app": "seldon-cluster-manager-server"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "seldon-cluster-manager-server"
|
||||
}
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"app": "seldon-cluster-manager-server"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "seldon-cluster-manager-container",
|
||||
"image": "seldonio/cluster-manager:1234",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 8080,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"env": [
|
||||
{
|
||||
"name": "SELDON_CLUSTER_MANAGER_REDIS_HOST",
|
||||
"value": "redis"
|
||||
},
|
||||
{
|
||||
"name": "SELDON_CLUSTER_MANAGER_POD_NAMESPACE",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"apiVersion": "v1",
|
||||
"fieldPath": "metadata.namespace"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"resources": {},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"terminationMessagePolicy": "File",
|
||||
"imagePullPolicy": "IfNotPresent"
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Always",
|
||||
"terminationGracePeriodSeconds": 30,
|
||||
"dnsPolicy": "ClusterFirst",
|
||||
"securityContext": {},
|
||||
"schedulerName": "default-scheduler"
|
||||
}
|
||||
},
|
||||
"strategy": {
|
||||
"type": "RollingUpdate",
|
||||
"rollingUpdate": {
|
||||
"maxUnavailable": 1,
|
||||
"maxSurge": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"status": {}
|
||||
}
|
||||
3336
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/pod-template-spec-validation.json
vendored
Normal file
3336
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/pod-template-spec-validation.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/rbac-cluster-binding.json
vendored
Normal file
20
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/rbac-cluster-binding.json
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"apiVersion": "rbac.authorization.k8s.io/v1",
|
||||
"kind": "ClusterRoleBinding",
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"name": "seldon"
|
||||
},
|
||||
"roleRef": {
|
||||
"apiGroup": "rbac.authorization.k8s.io",
|
||||
"kind": "ClusterRole",
|
||||
"name": "cluster-admin"
|
||||
},
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "seldon",
|
||||
"namespace": "default"
|
||||
}
|
||||
]
|
||||
}
|
||||
9
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/rbac-service-account.json
vendored
Normal file
9
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/rbac-service-account.json
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"name": "seldon"
|
||||
}
|
||||
}
|
||||
|
||||
59
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/redis-deployment.json
vendored
Normal file
59
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/redis-deployment.json
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
{
|
||||
"apiVersion": "apps/v1beta1",
|
||||
"kind": "Deployment",
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"app": "redis-app"
|
||||
},
|
||||
"name": "redis"
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 1,
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"app": "redis-app"
|
||||
}
|
||||
},
|
||||
"strategy": {
|
||||
"rollingUpdate": {
|
||||
"maxSurge": 1,
|
||||
"maxUnavailable": 1
|
||||
},
|
||||
"type": "RollingUpdate"
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"labels": {
|
||||
"app": "redis-app"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"image": "redis:4.0.1",
|
||||
"imagePullPolicy": "IfNotPresent",
|
||||
"name": "redis-container",
|
||||
"ports": [
|
||||
{
|
||||
"containerPort": 6379,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
],
|
||||
"resources": {},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"terminationMessagePolicy": "File"
|
||||
}
|
||||
],
|
||||
"dnsPolicy": "ClusterFirst",
|
||||
"restartPolicy": "Always",
|
||||
"schedulerName": "default-scheduler",
|
||||
"securityContext": {},
|
||||
"terminationGracePeriodSeconds": 30
|
||||
}
|
||||
}
|
||||
},
|
||||
"status": {}
|
||||
}
|
||||
|
||||
25
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/redis-service.json
vendored
Normal file
25
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/json/redis-service.json
vendored
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Service",
|
||||
"metadata": {
|
||||
"creationTimestamp": null,
|
||||
"name": "redis"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"port": 6379,
|
||||
"protocol": "TCP",
|
||||
"targetPort": 6379
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"app": "redis-app"
|
||||
},
|
||||
"sessionAffinity": "None",
|
||||
"type": "ClusterIP"
|
||||
},
|
||||
"status": {
|
||||
"loadBalancer": {}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"name": "seldon",
|
||||
"apiVersion": "0.0.1",
|
||||
"kind": "ksonnet.io/parts",
|
||||
"description": "Seldon ML Deployment\n",
|
||||
"author": "seldon-core team <devext@seldon.io>",
|
||||
"contributors": [
|
||||
{
|
||||
"name": "Clive Cox",
|
||||
"email": "cc@seldon.io"
|
||||
}
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/kubeflow/kubeflow"
|
||||
},
|
||||
"bugs": {
|
||||
"url": "https://github.com/SeldonIO/seldon-core/issues"
|
||||
},
|
||||
"keywords": [
|
||||
"kubernetes",
|
||||
"machine learning",
|
||||
"deployment"
|
||||
],
|
||||
"quickStart": {
|
||||
"prototype": "io.ksonnet.pkg.seldon",
|
||||
"componentName": "seldon",
|
||||
"flags": {
|
||||
"name": "seldon",
|
||||
"namespace": "default"
|
||||
},
|
||||
"comment": "Seldon Core Components."
|
||||
},
|
||||
"license": "Apache 2.0"
|
||||
}
|
||||
70
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/prototypes/core.jsonnet
vendored
Normal file
70
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/prototypes/core.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.seldon
|
||||
// @description Seldon Core components. Operator and API FrontEnd.
|
||||
// @shortDescription Seldon Core components.
|
||||
// @param name string seldon Name to give seldon
|
||||
// @optionalParam namespace string null Namespace to use for the components. It is automatically inherited from the environment if not set.
|
||||
// @optionalParam withRbac string true Whether to include RBAC setup
|
||||
// @optionalParam withApife string false Whether to include builtin API Oauth fornt end server for ingress
|
||||
// @optionalParam apifeImage string seldonio/apife:0.1.5 Default image for API Front End
|
||||
// @optionalParam apifeServiceType string NodePort API Front End Service Type
|
||||
// @optionalParam operatorImage string seldonio/cluster-manager:0.1.5 Seldon cluster manager image version
|
||||
// @optionalParam operatorSpringOpts string null cluster manager spring opts
|
||||
// @optionalParam operatorJavaOpts string null cluster manager java opts
|
||||
// @optionalParam engineImage string seldonio/engine:0.1.5 Seldon engine image version
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
local core = import "kubeflow/seldon/core.libsonnet";
|
||||
|
||||
// updatedParams uses the environment namespace if
|
||||
// the namespace parameter is not explicitly set
|
||||
local updatedParams = params {
|
||||
namespace: if params.namespace == "null" then env.namespace else params.namespace,
|
||||
};
|
||||
|
||||
local name = import "param://name";
|
||||
local namespace = updatedParams.namespace;
|
||||
local withRbac = import "param://withRbac";
|
||||
local withApife = import "param://withApife";
|
||||
|
||||
// APIFE
|
||||
local apifeImage = import "param://apifeImage";
|
||||
local apifeServiceType = import "param://apifeServiceType";
|
||||
|
||||
// Cluster Manager (The CRD Operator)
|
||||
local operatorImage = import "param://operatorImage";
|
||||
local operatorSpringOptsParam = import "param://operatorSpringOpts";
|
||||
local operatorSpringOpts = if operatorSpringOptsParam != "null" then operatorSpringOptsParam else "";
|
||||
local operatorJavaOptsParam = import "param://operatorJavaOpts";
|
||||
local operatorJavaOpts = if operatorJavaOptsParam != "null" then operatorJavaOptsParam else "";
|
||||
|
||||
// Engine
|
||||
local engineImage = import "param://engineImage";
|
||||
|
||||
// APIFE
|
||||
local apife = [
|
||||
core.parts(namespace).apife(apifeImage, withRbac),
|
||||
core.parts(namespace).apifeService(apifeServiceType),
|
||||
];
|
||||
|
||||
local rbac = [
|
||||
core.parts(namespace).rbacServiceAccount(),
|
||||
core.parts(namespace).rbacClusterRoleBinding(),
|
||||
];
|
||||
|
||||
// Core
|
||||
local coreComponents = [
|
||||
core.parts(namespace).deploymentOperator(engineImage, operatorImage, operatorSpringOpts, operatorJavaOpts, withRbac),
|
||||
core.parts(namespace).redisDeployment(),
|
||||
core.parts(namespace).redisService(),
|
||||
core.parts(namespace).crd(),
|
||||
];
|
||||
|
||||
if withRbac == "true" && withApife == "true" then
|
||||
k.core.v1.list.new(apife + rbac + coreComponents)
|
||||
else if withRbac == "true" && withApife == "false" then
|
||||
k.core.v1.list.new(rbac + coreComponents)
|
||||
else if withRbac == "false" && withApife == "true" then
|
||||
k.core.v1.list.new(apife + coreComponents)
|
||||
else if withRbac == "false" && withApife == "false" then
|
||||
k.core.v1.list.new(coreComponents)
|
||||
26
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/prototypes/serve-simple.jsonnet
vendored
Normal file
26
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/prototypes/serve-simple.jsonnet
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
// @apiVersion 0.1
|
||||
// @name io.ksonnet.pkg.seldon-serve-simple
|
||||
// @description A prototype to serve a single seldon model
|
||||
// @shortDescription A prototype to serve a single seldon model
|
||||
// @param name string Name to give this deployment
|
||||
// @param image string Docker image which contains this model
|
||||
// @optionalParam namespace string null Namespace to use for the components. It is automatically inherited from the environment if not set.
|
||||
// @optionalParam replicas number 1 Number of replicas
|
||||
// @optionalParam endpoint string REST The endpoint type: REST or GRPC
|
||||
|
||||
local k = import "k.libsonnet";
|
||||
local serve = import "kubeflow/seldon/serve-simple.libsonnet";
|
||||
|
||||
// updatedParams uses the environment namespace if
|
||||
// the namespace parameter is not explicitly set
|
||||
local updatedParams = params {
|
||||
namespace: if params.namespace == "null" then env.namespace else params.namespace,
|
||||
};
|
||||
|
||||
local name = import "param://name";
|
||||
local image = import "param://image";
|
||||
local namespace = updatedParams.namespace;
|
||||
local replicas = import "param://replicas";
|
||||
local endpoint = import "param://endpoint";
|
||||
|
||||
k.core.v1.list.new(serve.parts(namespace).serve(name, image, replicas, endpoint))
|
||||
54
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/serve-simple.libsonnet
vendored
Normal file
54
github_issue_summarization/ks-kubeflow/vendor/kubeflow/seldon/serve-simple.libsonnet
vendored
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
{
|
||||
parts(namespace):: {
|
||||
serve(name, image, replicas, endpoint):: {
|
||||
apiVersion: "machinelearning.seldon.io/v1alpha1",
|
||||
kind: "SeldonDeployment",
|
||||
metadata: {
|
||||
labels: {
|
||||
app: "seldon",
|
||||
},
|
||||
name: name,
|
||||
namespace: namespace,
|
||||
},
|
||||
spec: {
|
||||
annotations: {
|
||||
deployment_version: "v1",
|
||||
project_name: name,
|
||||
},
|
||||
name: name,
|
||||
predictors: [
|
||||
{
|
||||
annotations: {
|
||||
predictor_version: "v1",
|
||||
},
|
||||
componentSpec: {
|
||||
spec: {
|
||||
containers: [
|
||||
{
|
||||
image: image,
|
||||
imagePullPolicy: "Always",
|
||||
name: name,
|
||||
},
|
||||
],
|
||||
terminationGracePeriodSeconds: 1,
|
||||
},
|
||||
},
|
||||
graph: {
|
||||
children: [
|
||||
|
||||
],
|
||||
endpoint: {
|
||||
type: endpoint,
|
||||
},
|
||||
name: name,
|
||||
type: "MODEL",
|
||||
},
|
||||
name: name,
|
||||
replicas: replicas,
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
},
|
||||
}
|
||||
Loading…
Reference in New Issue