Update sample notebook to clean up deployed models. (#622)

* Update sample notebook to clean up deployed models.

Update SDK client to return correct links in local Jupyter with user's own proxy connection.

* Fix sample tests.
This commit is contained in:
qimingj 2019-01-04 13:07:30 -08:00 committed by Kubernetes Prow Robot
parent d4d4f62056
commit 410f9b979f
3 changed files with 321 additions and 183 deletions

View File

@ -24,7 +24,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {
"tags": [
"parameters"
@ -38,7 +38,7 @@
"PROJECT_NAME = 'Your-Gcp-Project-Name'\n",
"BASE_IMAGE='gcr.io/%s/pusherbase:dev' % PROJECT_NAME\n",
"TARGET_IMAGE='gcr.io/%s/pusher:dev' % PROJECT_NAME\n",
"KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.4/kfp.tar.gz'\n",
"KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.6/kfp.tar.gz'\n",
"TRAIN_DATA = 'gs://ml-pipeline-playground/tfx/taxi-cab-classification/train.csv'\n",
"EVAL_DATA = 'gs://ml-pipeline-playground/tfx/taxi-cab-classification/eval.csv'\n",
"HIDDEN_LAYER_SIZE = '1500'\n",
@ -49,13 +49,14 @@
"DATAFLOW_TF_PREDICT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:85c6413a2e13da4b8f198aeac1abc2f3a74fe789'\n",
"KUBEFLOW_TF_TRAINER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:85c6413a2e13da4b8f198aeac1abc2f3a74fe789'\n",
"KUBEFLOW_DEPLOYER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:85c6413a2e13da4b8f198aeac1abc2f3a74fe789'\n",
"DEV_DEPLOYER_MODEL = 'notebook_tfx_devtaxi.beta'\n",
"PROD_DEPLOYER_MODEL = 'notebook_tfx_prodtaxi.beta'"
"DEPLOYER_MODEL = 'notebook_tfx_taxi'\n",
"DEPLOYER_VERSION_DEV = 'dev'\n",
"DEPLOYER_VERSION_PROD = 'prod'"
]
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"metadata": {
"scrolled": false
},
@ -64,43 +65,42 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Collecting https://storage.googleapis.com/ml-pipeline/release/0.1.4/kfp.tar.gz\n",
"\u001b[?25l Downloading https://storage.googleapis.com/ml-pipeline/release/0.1.4/kfp.tar.gz (68kB)\n",
"\u001b[K 100% |████████████████████████████████| 71kB 9.4MB/s eta 0:00:01\n",
"\u001b[?25hRequirement already satisfied, skipping upgrade: urllib3>=1.15 in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (1.22)\n",
"Collecting https://storage.googleapis.com/ml-pipeline/release/0.1.6/kfp.tar.gz\n",
" Using cached https://storage.googleapis.com/ml-pipeline/release/0.1.6/kfp.tar.gz\n",
"Requirement already satisfied, skipping upgrade: urllib3>=1.15 in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (1.22)\n",
"Requirement already satisfied, skipping upgrade: six>=1.10 in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (1.11.0)\n",
"Requirement already satisfied, skipping upgrade: certifi in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (2018.10.15)\n",
"Requirement already satisfied, skipping upgrade: python-dateutil in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (2.7.3)\n",
"Requirement already satisfied, skipping upgrade: PyYAML in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (3.13)\n",
"Requirement already satisfied, skipping upgrade: google-cloud-storage==1.13.0 in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (1.13.0)\n",
"Requirement already satisfied, skipping upgrade: kubernetes==8.0.0 in /opt/conda/lib/python3.6/site-packages (from kfp==0.1) (8.0.0)\n",
"Requirement already satisfied, skipping upgrade: google-api-core<2.0.0dev,>=0.1.1 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage==1.13.0->kfp==0.1) (1.5.2)\n",
"Requirement already satisfied, skipping upgrade: google-api-core<2.0.0dev,>=0.1.1 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage==1.13.0->kfp==0.1) (1.7.0)\n",
"Requirement already satisfied, skipping upgrade: google-cloud-core<0.29dev,>=0.28.0 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage==1.13.0->kfp==0.1) (0.28.1)\n",
"Requirement already satisfied, skipping upgrade: google-resumable-media>=0.3.1 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage==1.13.0->kfp==0.1) (0.3.1)\n",
"Requirement already satisfied, skipping upgrade: requests-oauthlib in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (1.0.0)\n",
"Requirement already satisfied, skipping upgrade: adal>=1.0.2 in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (1.2.0)\n",
"Requirement already satisfied, skipping upgrade: requests in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (2.18.4)\n",
"Requirement already satisfied, skipping upgrade: google-auth>=1.0.1 in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (1.6.1)\n",
"Requirement already satisfied, skipping upgrade: google-resumable-media>=0.3.1 in /opt/conda/lib/python3.6/site-packages (from google-cloud-storage==1.13.0->kfp==0.1) (0.3.2)\n",
"Requirement already satisfied, skipping upgrade: setuptools>=21.0.0 in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (38.4.0)\n",
"Requirement already satisfied, skipping upgrade: google-auth>=1.0.1 in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (1.6.2)\n",
"Requirement already satisfied, skipping upgrade: requests in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (2.18.4)\n",
"Requirement already satisfied, skipping upgrade: adal>=1.0.2 in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (1.2.0)\n",
"Requirement already satisfied, skipping upgrade: requests-oauthlib in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (1.0.0)\n",
"Requirement already satisfied, skipping upgrade: websocket-client!=0.40.0,!=0.41.*,!=0.42.*,>=0.32.0 in /opt/conda/lib/python3.6/site-packages (from kubernetes==8.0.0->kfp==0.1) (0.54.0)\n",
"Requirement already satisfied, skipping upgrade: protobuf>=3.4.0 in /opt/conda/lib/python3.6/site-packages (from google-api-core<2.0.0dev,>=0.1.1->google-cloud-storage==1.13.0->kfp==0.1) (3.6.1)\n",
"Requirement already satisfied, skipping upgrade: pytz in /opt/conda/lib/python3.6/site-packages (from google-api-core<2.0.0dev,>=0.1.1->google-cloud-storage==1.13.0->kfp==0.1) (2018.7)\n",
"Requirement already satisfied, skipping upgrade: googleapis-common-protos!=1.5.4,<2.0dev,>=1.5.3 in /opt/conda/lib/python3.6/site-packages (from google-api-core<2.0.0dev,>=0.1.1->google-cloud-storage==1.13.0->kfp==0.1) (1.5.5)\n",
"Requirement already satisfied, skipping upgrade: oauthlib>=0.6.2 in /opt/conda/lib/python3.6/site-packages (from requests-oauthlib->kubernetes==8.0.0->kfp==0.1) (2.1.0)\n",
"Requirement already satisfied, skipping upgrade: cryptography>=1.1.0 in /opt/conda/lib/python3.6/site-packages (from adal>=1.0.2->kubernetes==8.0.0->kfp==0.1) (2.1.4)\n",
"Requirement already satisfied, skipping upgrade: PyJWT>=1.0.0 in /opt/conda/lib/python3.6/site-packages (from adal>=1.0.2->kubernetes==8.0.0->kfp==0.1) (1.6.4)\n",
"Requirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (0.2.3)\n",
"Requirement already satisfied, skipping upgrade: cachetools>=2.0.0 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (3.0.0)\n",
"Requirement already satisfied, skipping upgrade: rsa>=3.1.4 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (4.0)\n",
"Requirement already satisfied, skipping upgrade: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.6/site-packages (from requests->kubernetes==8.0.0->kfp==0.1) (3.0.4)\n",
"Requirement already satisfied, skipping upgrade: idna<2.7,>=2.5 in /opt/conda/lib/python3.6/site-packages (from requests->kubernetes==8.0.0->kfp==0.1) (2.6)\n",
"Requirement already satisfied, skipping upgrade: cachetools>=2.0.0 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (3.0.0)\n",
"Requirement already satisfied, skipping upgrade: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (0.2.2)\n",
"Requirement already satisfied, skipping upgrade: rsa>=3.1.4 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (4.0)\n",
"Requirement already satisfied, skipping upgrade: cryptography>=1.1.0 in /opt/conda/lib/python3.6/site-packages (from adal>=1.0.2->kubernetes==8.0.0->kfp==0.1) (2.1.4)\n",
"Requirement already satisfied, skipping upgrade: PyJWT>=1.0.0 in /opt/conda/lib/python3.6/site-packages (from adal>=1.0.2->kubernetes==8.0.0->kfp==0.1) (1.7.1)\n",
"Requirement already satisfied, skipping upgrade: oauthlib>=0.6.2 in /opt/conda/lib/python3.6/site-packages (from requests-oauthlib->kubernetes==8.0.0->kfp==0.1) (2.1.0)\n",
"Requirement already satisfied, skipping upgrade: pyasn1<0.5.0,>=0.4.1 in /opt/conda/lib/python3.6/site-packages (from pyasn1-modules>=0.2.1->google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (0.4.5)\n",
"Requirement already satisfied, skipping upgrade: asn1crypto>=0.21.0 in /opt/conda/lib/python3.6/site-packages (from cryptography>=1.1.0->adal>=1.0.2->kubernetes==8.0.0->kfp==0.1) (0.24.0)\n",
"Requirement already satisfied, skipping upgrade: cffi>=1.7 in /opt/conda/lib/python3.6/site-packages (from cryptography>=1.1.0->adal>=1.0.2->kubernetes==8.0.0->kfp==0.1) (1.11.4)\n",
"Requirement already satisfied, skipping upgrade: pyasn1<0.5.0,>=0.4.1 in /opt/conda/lib/python3.6/site-packages (from pyasn1-modules>=0.2.1->google-auth>=1.0.1->kubernetes==8.0.0->kfp==0.1) (0.4.4)\n",
"Requirement already satisfied, skipping upgrade: pycparser in /opt/conda/lib/python3.6/site-packages (from cffi>=1.7->cryptography>=1.1.0->adal>=1.0.2->kubernetes==8.0.0->kfp==0.1) (2.18)\n",
"Building wheels for collected packages: kfp\n",
" Running setup.py bdist_wheel for kfp ... \u001b[?25ldone\n",
"\u001b[?25h Stored in directory: /tmp/pip-ephem-wheel-cache-bctiow25/wheels/f9/43/15/db34c8d6291d495360ac6eea475e6bf473750320547f837caf\n",
"\u001b[?25h Stored in directory: /tmp/pip-ephem-wheel-cache-wex7xrp9/wheels/a5/f2/9b/2abbe11f35b86317d9c1be9022540fd30e06c5595e5d173680\n",
"Successfully built kfp\n",
"Installing collected packages: kfp\n",
" Found existing installation: kfp 0.1\n",
@ -126,13 +126,13 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"Experiment link <a href=\"/pipeline/#/experiments/details/a13c1b50-93db-40b5-89a2-a72a8129606b\" target=\"_blank\" >here</a>"
"Experiment link <a href=\"/pipeline/#/experiments/details/5ed64e97-8996-4f5a-9c97-7f7dda002bea\" target=\"_blank\" >here</a>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
@ -150,6 +150,11 @@
"import kfp.dsl as dsl\n",
"import kfp.notebook\n",
"import kfp.gcp as gcp\n",
"\n",
"# If you are using Kubeflow JupyterHub, then no need to set host in Client() constructor.\n",
"# But if you are using your local Jupyter instance, and have a kubectl connection to the cluster,\n",
"# Then do:\n",
"# client = kfp.Client('127.0.0.1:8080/pipeline')\n",
"client = kfp.Client()\n",
"exp = client.create_experiment(name=EXPERIMENT_NAME)"
]
@ -163,7 +168,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 5,
"metadata": {},
"outputs": [
{
@ -183,13 +188,13 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"Run link <a href=\"/pipeline/#/runs/details/67b216a8-f1ad-11e8-927c-42010a8000f7\" target=\"_blank\" >here</a>"
"Run link <a href=\"/pipeline/#/runs/details/0e80392d-0fb5-11e9-9e63-42010a8000b6\" target=\"_blank\" >here</a>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
@ -214,7 +219,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@ -366,13 +371,13 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"Run link <a href=\"/pipeline/#/runs/details/d1a380ae-f1ad-11e8-927c-42010a8000f7\" target=\"_blank\" >here</a>"
"Run link <a href=\"/pipeline/#/runs/details/0ea27789-0fb5-11e9-9e63-42010a8000b6\" target=\"_blank\" >here</a>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
@ -410,23 +415,23 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: google-api-python-client in /opt/conda/lib/python3.6/site-packages (1.7.4)\n",
"Requirement already satisfied: google-auth>=1.4.1 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (1.6.1)\n",
"Requirement already satisfied: httplib2<1dev,>=0.9.2 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (0.11.3)\n",
"Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (3.0.0)\n",
"Requirement already satisfied: six<2dev,>=1.6.1 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (1.11.0)\n",
"Requirement already satisfied: google-api-python-client in /opt/conda/lib/python3.6/site-packages (1.7.7)\n",
"Requirement already satisfied: google-auth-httplib2>=0.0.3 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (0.0.3)\n",
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.4.1->google-api-python-client) (0.2.2)\n",
"Requirement already satisfied: six<2dev,>=1.6.1 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (1.11.0)\n",
"Requirement already satisfied: httplib2<1dev,>=0.9.2 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (0.12.0)\n",
"Requirement already satisfied: google-auth>=1.4.1 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (1.6.2)\n",
"Requirement already satisfied: uritemplate<4dev,>=3.0.0 in /opt/conda/lib/python3.6/site-packages (from google-api-python-client) (3.0.0)\n",
"Requirement already satisfied: cachetools>=2.0.0 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.4.1->google-api-python-client) (3.0.0)\n",
"Requirement already satisfied: rsa>=3.1.4 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.4.1->google-api-python-client) (4.0)\n",
"Requirement already satisfied: pyasn1<0.5.0,>=0.4.1 in /opt/conda/lib/python3.6/site-packages (from pyasn1-modules>=0.2.1->google-auth>=1.4.1->google-api-python-client) (0.4.4)\n"
"Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.6/site-packages (from google-auth>=1.4.1->google-api-python-client) (0.2.3)\n",
"Requirement already satisfied: pyasn1>=0.1.3 in /opt/conda/lib/python3.6/site-packages (from rsa>=3.1.4->google-auth>=1.4.1->google-api-python-client) (0.4.5)\n"
]
}
],
@ -437,7 +442,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
@ -446,7 +451,7 @@
" description='deploys a model to GCP CMLE',\n",
" base_image=BASE_IMAGE\n",
")\n",
"def deploy_model(model_dot_version: str, model_path: str, gcp_project: str, runtime: str):\n",
"def deploy_model(model_name: str, version_name: str, model_path: str, gcp_project: str, runtime: str):\n",
"\n",
" from googleapiclient import discovery\n",
" from tensorflow.python.lib.io import file_io\n",
@ -454,7 +459,6 @@
" \n",
" model_path = file_io.get_matching_files(os.path.join(model_path, 'export', 'export', '*'))[0]\n",
" api = discovery.build('ml', 'v1')\n",
" model_name, version_name = model_dot_version.split('.')\n",
" body = {'name': model_name}\n",
" parent = 'projects/%s' % gcp_project\n",
" try:\n",
@ -478,7 +482,7 @@
" while True:\n",
" response = api.projects().operations().get(name=response['name']).execute()\n",
" if 'done' not in response or response['done'] is not True:\n",
" time.sleep(5)\n",
" time.sleep(10)\n",
" print('still deploying...')\n",
" else:\n",
" if 'error' in response:\n",
@ -490,13 +494,34 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"execution_count": 11,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"still deploying...\n",
"Done.\n"
]
}
],
"source": [
"# Test the function and make sure it works.\n",
"path = 'gs://ml-pipeline-playground/sampledata/taxi/train'\n",
"deploy_model(DEV_DEPLOYER_MODEL, path, PROJECT_NAME, '1.9')"
"deploy_model(DEPLOYER_MODEL, DEPLOYER_VERSION_DEV, path, PROJECT_NAME, '1.9')"
]
},
{
@ -510,49 +535,33 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"2018-11-26 19:29:13:INFO:Build an image that is based on gcr.io/ml-pipeline-dogfood/pusherbase:dev and push the image to gcr.io/ml-pipeline-dogfood/pusher:dev\n",
"2018-11-26 19:29:13:INFO:Checking path: gs://ngao-bugbash...\n",
"2018-11-26 19:29:13:INFO:Generate entrypoint and serialization codes.\n",
"2018-11-26 19:29:13:INFO:Generate build files.\n",
"2018-11-26 19:29:13:INFO:Start a kaniko job for build.\n",
"2018-11-26 19:29:18:INFO:5 seconds: waiting for job to complete\n",
"2018-11-26 19:29:23:INFO:10 seconds: waiting for job to complete\n",
"2018-11-26 19:29:28:INFO:15 seconds: waiting for job to complete\n",
"2018-11-26 19:29:33:INFO:20 seconds: waiting for job to complete\n",
"2018-11-26 19:29:38:INFO:25 seconds: waiting for job to complete\n",
"2018-11-26 19:29:43:INFO:30 seconds: waiting for job to complete\n",
"2018-11-26 19:29:48:INFO:35 seconds: waiting for job to complete\n",
"2018-11-26 19:29:53:INFO:40 seconds: waiting for job to complete\n",
"2018-11-26 19:29:58:INFO:45 seconds: waiting for job to complete\n",
"2018-11-26 19:30:03:INFO:50 seconds: waiting for job to complete\n",
"2018-11-26 19:30:08:INFO:55 seconds: waiting for job to complete\n",
"2018-11-26 19:30:13:INFO:60 seconds: waiting for job to complete\n",
"2018-11-26 19:30:18:INFO:65 seconds: waiting for job to complete\n",
"2018-11-26 19:30:23:INFO:70 seconds: waiting for job to complete\n",
"2018-11-26 19:30:28:INFO:75 seconds: waiting for job to complete\n",
"2018-11-26 19:30:33:INFO:80 seconds: waiting for job to complete\n",
"2018-11-26 19:30:38:INFO:85 seconds: waiting for job to complete\n",
"2018-11-26 19:30:43:INFO:90 seconds: waiting for job to complete\n",
"2018-11-26 19:30:48:INFO:95 seconds: waiting for job to complete\n",
"2018-11-26 19:30:53:INFO:100 seconds: waiting for job to complete\n",
"2018-11-26 19:30:58:INFO:105 seconds: waiting for job to complete\n",
"2018-11-26 19:31:03:INFO:110 seconds: waiting for job to complete\n",
"2018-11-26 19:31:08:INFO:115 seconds: waiting for job to complete\n",
"2018-11-26 19:31:13:INFO:120 seconds: waiting for job to complete\n",
"2018-11-26 19:31:18:INFO:125 seconds: waiting for job to complete\n",
"2018-11-26 19:31:24:INFO:130 seconds: waiting for job to complete\n",
"2018-11-26 19:31:29:INFO:135 seconds: waiting for job to complete\n",
"2018-11-26 19:31:34:INFO:140 seconds: waiting for job to complete\n",
"2018-11-26 19:31:39:INFO:145 seconds: waiting for job to complete\n",
"2018-11-26 19:31:39:INFO:Kaniko job complete.\n",
"2018-11-26 19:31:39:INFO:Build component complete.\n"
"2019-01-04 00:11:49:INFO:Build an image that is based on tensorflow/tensorflow:1.12.0-py3 and push the image to gcr.io/bradley-playground/pusher:dev\n",
"2019-01-04 00:11:49:INFO:Checking path: gs://bradley-playground...\n",
"2019-01-04 00:11:49:INFO:Generate entrypoint and serialization codes.\n",
"2019-01-04 00:11:49:INFO:Generate build files.\n",
"2019-01-04 00:11:49:INFO:Start a kaniko job for build.\n",
"2019-01-04 00:11:49:INFO:Cannot Find local kubernetes config. Trying in-cluster config.\n",
"2019-01-04 00:11:49:INFO:Initialized with in-cluster config.\n",
"2019-01-04 00:11:54:INFO:5 seconds: waiting for job to complete\n",
"2019-01-04 00:11:59:INFO:10 seconds: waiting for job to complete\n",
"2019-01-04 00:12:04:INFO:15 seconds: waiting for job to complete\n",
"2019-01-04 00:12:09:INFO:20 seconds: waiting for job to complete\n",
"2019-01-04 00:12:14:INFO:25 seconds: waiting for job to complete\n",
"2019-01-04 00:12:19:INFO:30 seconds: waiting for job to complete\n",
"2019-01-04 00:12:24:INFO:35 seconds: waiting for job to complete\n",
"2019-01-04 00:12:29:INFO:40 seconds: waiting for job to complete\n",
"2019-01-04 00:12:34:INFO:45 seconds: waiting for job to complete\n",
"2019-01-04 00:12:39:INFO:50 seconds: waiting for job to complete\n",
"2019-01-04 00:12:45:INFO:55 seconds: waiting for job to complete\n",
"2019-01-04 00:12:45:INFO:Kaniko job complete.\n",
"2019-01-04 00:12:45:INFO:Build component complete.\n"
]
}
],
@ -577,41 +586,33 @@
},
{
"cell_type": "code",
"execution_count": 35,
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stderr",
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:root:Checking path: gs://bradley-playground...\n",
"INFO:root:Generate build files.\n",
"INFO:root:Start a kaniko job for build.\n",
"INFO:root:5 seconds: waiting for job to complete\n",
"INFO:root:10 seconds: waiting for job to complete\n",
"INFO:root:15 seconds: waiting for job to complete\n",
"INFO:root:20 seconds: waiting for job to complete\n",
"INFO:root:25 seconds: waiting for job to complete\n",
"INFO:root:30 seconds: waiting for job to complete\n",
"INFO:root:35 seconds: waiting for job to complete\n",
"INFO:root:40 seconds: waiting for job to complete\n",
"INFO:root:45 seconds: waiting for job to complete\n",
"INFO:root:50 seconds: waiting for job to complete\n",
"INFO:root:55 seconds: waiting for job to complete\n",
"INFO:root:60 seconds: waiting for job to complete\n",
"INFO:root:65 seconds: waiting for job to complete\n",
"INFO:root:70 seconds: waiting for job to complete\n",
"INFO:root:75 seconds: waiting for job to complete\n",
"INFO:root:80 seconds: waiting for job to complete\n",
"INFO:root:85 seconds: waiting for job to complete\n",
"INFO:root:90 seconds: waiting for job to complete\n",
"INFO:root:95 seconds: waiting for job to complete\n",
"INFO:root:100 seconds: waiting for job to complete\n",
"INFO:root:105 seconds: waiting for job to complete\n",
"INFO:root:110 seconds: waiting for job to complete\n",
"INFO:root:115 seconds: waiting for job to complete\n",
"INFO:root:Kaniko job complete.\n",
"INFO:root:Build image complete.\n"
"2019-01-04 00:12:45:INFO:Checking path: gs://bradley-playground...\n",
"2019-01-04 00:12:45:INFO:Generate build files.\n",
"2019-01-04 00:12:45:INFO:Start a kaniko job for build.\n",
"2019-01-04 00:12:45:INFO:Cannot Find local kubernetes config. Trying in-cluster config.\n",
"2019-01-04 00:12:45:INFO:Initialized with in-cluster config.\n",
"2019-01-04 00:12:50:INFO:5 seconds: waiting for job to complete\n",
"2019-01-04 00:12:55:INFO:10 seconds: waiting for job to complete\n",
"2019-01-04 00:13:00:INFO:15 seconds: waiting for job to complete\n",
"2019-01-04 00:13:05:INFO:20 seconds: waiting for job to complete\n",
"2019-01-04 00:13:10:INFO:25 seconds: waiting for job to complete\n",
"2019-01-04 00:13:15:INFO:30 seconds: waiting for job to complete\n",
"2019-01-04 00:13:20:INFO:35 seconds: waiting for job to complete\n",
"2019-01-04 00:13:25:INFO:40 seconds: waiting for job to complete\n",
"2019-01-04 00:13:30:INFO:45 seconds: waiting for job to complete\n",
"2019-01-04 00:13:35:INFO:50 seconds: waiting for job to complete\n",
"2019-01-04 00:13:40:INFO:55 seconds: waiting for job to complete\n",
"2019-01-04 00:13:45:INFO:60 seconds: waiting for job to complete\n",
"2019-01-04 00:13:50:INFO:65 seconds: waiting for job to complete\n",
"2019-01-04 00:13:51:INFO:Kaniko job complete.\n",
"2019-01-04 00:13:51:INFO:Build image complete.\n"
]
}
],
@ -630,50 +631,40 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stderr",
"name": "stdout",
"output_type": "stream",
"text": [
"INFO:root:Build an image that is based on gcr.io/bradley-playground/pusher:dev and push the image to gcr.io/bradley-playground/pusher:latest\n",
"INFO:root:Checking path: gs://bradley-playground...\n",
"INFO:root:Generate entrypoint and serialization codes.\n",
"INFO:root:Generate build files.\n",
"INFO:root:Start a kaniko job for build.\n",
"INFO:root:5 seconds: waiting for job to complete\n",
"INFO:root:10 seconds: waiting for job to complete\n",
"INFO:root:15 seconds: waiting for job to complete\n",
"INFO:root:20 seconds: waiting for job to complete\n",
"INFO:root:25 seconds: waiting for job to complete\n",
"INFO:root:30 seconds: waiting for job to complete\n",
"INFO:root:35 seconds: waiting for job to complete\n",
"INFO:root:40 seconds: waiting for job to complete\n",
"INFO:root:45 seconds: waiting for job to complete\n",
"INFO:root:50 seconds: waiting for job to complete\n",
"INFO:root:55 seconds: waiting for job to complete\n",
"INFO:root:60 seconds: waiting for job to complete\n",
"INFO:root:65 seconds: waiting for job to complete\n",
"INFO:root:70 seconds: waiting for job to complete\n",
"INFO:root:75 seconds: waiting for job to complete\n",
"INFO:root:80 seconds: waiting for job to complete\n",
"INFO:root:85 seconds: waiting for job to complete\n",
"INFO:root:90 seconds: waiting for job to complete\n",
"INFO:root:95 seconds: waiting for job to complete\n",
"INFO:root:100 seconds: waiting for job to complete\n",
"INFO:root:105 seconds: waiting for job to complete\n",
"INFO:root:110 seconds: waiting for job to complete\n",
"INFO:root:115 seconds: waiting for job to complete\n",
"INFO:root:120 seconds: waiting for job to complete\n",
"INFO:root:125 seconds: waiting for job to complete\n",
"INFO:root:130 seconds: waiting for job to complete\n",
"INFO:root:135 seconds: waiting for job to complete\n",
"INFO:root:140 seconds: waiting for job to complete\n",
"INFO:root:145 seconds: waiting for job to complete\n",
"INFO:root:150 seconds: waiting for job to complete\n",
"INFO:root:Kaniko job complete.\n",
"INFO:root:Build component complete.\n"
"2019-01-04 00:13:51:INFO:Build an image that is based on gcr.io/bradley-playground/pusherbase:dev and push the image to gcr.io/bradley-playground/pusher:dev\n",
"2019-01-04 00:13:51:INFO:Checking path: gs://bradley-playground...\n",
"2019-01-04 00:13:51:INFO:Generate entrypoint and serialization codes.\n",
"2019-01-04 00:13:51:INFO:Generate build files.\n",
"2019-01-04 00:13:51:INFO:Start a kaniko job for build.\n",
"2019-01-04 00:13:51:INFO:Cannot Find local kubernetes config. Trying in-cluster config.\n",
"2019-01-04 00:13:51:INFO:Initialized with in-cluster config.\n",
"2019-01-04 00:13:56:INFO:5 seconds: waiting for job to complete\n",
"2019-01-04 00:14:01:INFO:10 seconds: waiting for job to complete\n",
"2019-01-04 00:14:06:INFO:15 seconds: waiting for job to complete\n",
"2019-01-04 00:14:11:INFO:20 seconds: waiting for job to complete\n",
"2019-01-04 00:14:16:INFO:25 seconds: waiting for job to complete\n",
"2019-01-04 00:14:21:INFO:30 seconds: waiting for job to complete\n",
"2019-01-04 00:14:26:INFO:35 seconds: waiting for job to complete\n",
"2019-01-04 00:14:31:INFO:40 seconds: waiting for job to complete\n",
"2019-01-04 00:14:36:INFO:45 seconds: waiting for job to complete\n",
"2019-01-04 00:14:41:INFO:50 seconds: waiting for job to complete\n",
"2019-01-04 00:14:46:INFO:55 seconds: waiting for job to complete\n",
"2019-01-04 00:14:51:INFO:60 seconds: waiting for job to complete\n",
"2019-01-04 00:14:56:INFO:65 seconds: waiting for job to complete\n",
"2019-01-04 00:15:01:INFO:70 seconds: waiting for job to complete\n",
"2019-01-04 00:15:06:INFO:75 seconds: waiting for job to complete\n",
"2019-01-04 00:15:11:INFO:80 seconds: waiting for job to complete\n",
"2019-01-04 00:15:16:INFO:85 seconds: waiting for job to complete\n",
"2019-01-04 00:15:21:INFO:90 seconds: waiting for job to complete\n",
"2019-01-04 00:15:21:INFO:Kaniko job complete.\n",
"2019-01-04 00:15:22:INFO:Build component complete.\n"
]
}
],
@ -709,6 +700,7 @@
" output,\n",
" project,\n",
" model,\n",
" version,\n",
" column_names=dsl.PipelineParam(\n",
" name='column-names',\n",
" value='gs://ml-pipeline-playground/tfx/taxi-cab-classification/column-names.json'),\n",
@ -739,15 +731,26 @@
" analysis_output = '%s/{{workflow.name}}/analysis' % output\n",
" prediction_output = '%s/{{workflow.name}}/predict' % output\n",
"\n",
" validation = dataflow_tf_data_validation_op(train, evaluation, column_names, key_columns, project, validation_mode, validation_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
"\n",
" preprocess = dataflow_tf_transform_op(train, evaluation, validation.outputs['schema'], project, preprocess_mode, preprocess_module, transform_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" training = tf_train_op(preprocess.output, validation.outputs['schema'], learning_rate, hidden_layer_size, steps, target, preprocess_module, training_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" analysis = dataflow_tf_model_analyze_op(training.output, evaluation, validation.outputs['schema'], project, analyze_mode, analyze_slice_column, analysis_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" prediction = dataflow_tf_predict_op(evaluation, validation.outputs['schema'], target, training.output, predict_mode, project, prediction_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" validation = dataflow_tf_data_validation_op(\n",
" train, evaluation, column_names, key_columns, project,\n",
" validation_mode, validation_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" preprocess = dataflow_tf_transform_op(\n",
" train, evaluation, validation.outputs['schema'], project, preprocess_mode,\n",
" preprocess_module, transform_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" training = tf_train_op(\n",
" preprocess.output, validation.outputs['schema'], learning_rate, hidden_layer_size,\n",
" steps, target, preprocess_module, training_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" analysis = dataflow_tf_model_analyze_op(\n",
" training.output, evaluation, validation.outputs['schema'], project,\n",
" analyze_mode, analyze_slice_column, analysis_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" prediction = dataflow_tf_predict_op(\n",
" evaluation, validation.outputs['schema'], target, training.output,\n",
" predict_mode, project, prediction_output).apply(gcp.use_gcp_secret('user-gcp-sa'))\n",
" \n",
" # The new deployer. Note that the DeployerOp interface is similar to the function \"deploy_model\".\n",
" deploy = DeployerOp(gcp_project=project, model_dot_version=model, runtime='1.9', model_path=training.output).apply(gcp.use_gcp_secret('user-gcp-sa'))"
" deploy = DeployerOp(\n",
" gcp_project=project, model_name=model, version_name=version, runtime='1.9',\n",
" model_path=training.output).apply(gcp.use_gcp_secret('user-gcp-sa'))"
]
},
{
@ -759,13 +762,13 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 16,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"Run link <a href=\"/pipeline/#/runs/details/cb2d51d0-e2be-11e8-93d0-42010a800048\" target=\"_blank\" >here</a>"
"Run link <a href=\"/pipeline/#/runs/details/d3a55fa8-0fb5-11e9-9e63-42010a8000b6\" target=\"_blank\" >here</a>"
],
"text/plain": [
"<IPython.core.display.HTML object>"
@ -773,6 +776,100 @@
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"2019-01-04 00:15:22:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:15:27:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:15:32:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:15:37:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:15:42:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:15:47:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:15:52:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:15:57:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:02:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:07:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:12:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:17:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:22:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:27:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:32:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:37:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:42:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:47:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:52:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:16:57:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:02:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:07:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:13:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:18:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:23:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:28:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:33:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:38:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:43:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:48:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:53:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:17:58:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:03:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:08:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:13:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:18:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:23:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:28:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:33:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:38:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:43:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:48:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:53:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:18:58:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:03:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:08:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:13:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:18:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:23:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:28:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:33:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:38:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:43:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:48:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:53:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:19:58:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:03:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:08:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:13:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:18:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:23:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:28:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:33:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:38:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:43:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:48:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:53:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:20:58:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:03:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:08:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:13:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:18:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:23:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:28:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:33:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:38:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:43:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:48:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:53:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:21:58:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:03:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:08:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:13:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:19:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:24:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:29:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:34:INFO:Waiting for the job to complete...\n",
"2019-01-04 00:22:39:INFO:Waiting for the job to complete...\n"
]
}
],
"source": [
@ -781,15 +878,44 @@
"run = client.run_pipeline(exp.id, 'my-tfx', 'my-tfx.tar.gz',\n",
" params={'output': OUTPUT_DIR,\n",
" 'project': PROJECT_NAME,\n",
" 'model': PROD_DEPLOYER_MODEL})"
" 'model': DEPLOYER_MODEL,\n",
" 'version': DEPLOYER_VERSION_PROD})\n",
"\n",
"result = client.wait_for_run_completion(run.id, timeout=600)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Clean up"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": []
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Activated service account credentials for: [kubeflow3-user@bradley-playground.iam.gserviceaccount.com]\n",
"Deleting version [prod]......done. \n",
"Deleting version [dev]......done. \n",
"Deleting model [notebook_tfx_taxi]...done. \n"
]
}
],
"source": [
"# the step is only needed if you are using an in-cluster JupyterHub instance.\n",
"!gcloud auth activate-service-account --key-file /var/run/secrets/sa/user-gcp-sa.json\n",
"\n",
"\n",
"!gcloud ml-engine versions delete $DEPLOYER_VERSION_PROD --model $DEPLOYER_MODEL -q\n",
"!gcloud ml-engine versions delete $DEPLOYER_VERSION_DEV --model $DEPLOYER_MODEL -q\n",
"!gcloud ml-engine models delete $DEPLOYER_MODEL -q"
]
}
],
"metadata": {
@ -808,7 +934,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.4rc1"
"version": "3.6.4"
}
},
"nbformat": 4,

View File

@ -28,13 +28,15 @@ class Client(object):
""" API Client for KubeFlow Pipeline.
"""
def __init__(self, host='ml-pipeline.kubeflow.svc.cluster.local:8888'):
# in-cluster DNS name of the pipeline service
IN_CLUSTER_DNS_NAME = 'ml-pipeline.kubeflow.svc.cluster.local:8888'
def __init__(self, host=None):
"""Create a new instance of kfp client.
Args:
host: the host name to use to talk to Kubeflow Pipelines. Default value
"ml-pipeline.kubeflow.svc.cluster.local:8888" is the in-cluster DNS name
of the pipeline service. It only works if the current environment is a pod
host: the host name to use to talk to Kubeflow Pipelines. If not set, the in-cluster
service DNS name will be used, which only works if the current environment is a pod
in the same cluster (such as a Jupyter instance spawned by Kubeflow's
JupyterHub). If you have a different connection to cluster, such as a kubectl
proxy connection, then set it to something like "127.0.0.1:8080/pipeline".
@ -50,13 +52,15 @@ class Client(object):
except ImportError:
raise Exception('This module requires installation of kfp_run')
self._host = host
config = kfp_run.configuration.Configuration()
config.host = host
config.host = host if host else Client.IN_CLUSTER_DNS_NAME
api_client = kfp_run.api_client.ApiClient(config)
self._run_api = kfp_run.api.run_service_api.RunServiceApi(api_client)
config = kfp_experiment.configuration.Configuration()
config.host = host
config.host = host if host else Client.IN_CLUSTER_DNS_NAME
api_client = kfp_experiment.api_client.ApiClient(config)
self._experiment_api = \
kfp_experiment.api.experiment_service_api.ExperimentServiceApi(api_client)
@ -70,6 +74,17 @@ class Client(object):
return True
def _get_url_prefix(self):
if self._host:
# User's own connection.
if self._host.startswith('http://'):
return self._host
else:
return 'http://' + self._host
# In-cluster pod. We could use relative URL.
return '/pipeline'
def create_experiment(self, name):
"""Create a new experiment.
Args:
@ -85,8 +100,8 @@ class Client(object):
if self._is_ipython():
import IPython
html = \
('Experiment link <a href="/pipeline/#/experiments/details/%s" target="_blank" >here</a>'
% response.id)
('Experiment link <a href="%s/#/experiments/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), response.id))
IPython.display.display(IPython.display.HTML(html))
return response
@ -170,8 +185,8 @@ class Client(object):
if self._is_ipython():
import IPython
html = ('Run link <a href="/pipeline/#/runs/details/%s" target="_blank" >here</a>'
% response.run.id)
html = ('Run link <a href="%s/#/runs/details/%s" target="_blank" >here</a>'
% (self._get_url_prefix(), response.run.id))
IPython.display.display(IPython.display.HTML(html))
return response.run

View File

@ -279,9 +279,6 @@ elif [ "$TEST_NAME" == "notebook-tfx" ]; then
# CMLE model name format: A name should start with a letter and contain only letters, numbers and underscores.
DEPLOYER_MODEL=`cat /proc/sys/kernel/random/uuid`
DEPLOYER_MODEL=A`echo ${DEPLOYER_MODEL//-/_}`
DEV_DEPLOYER_MODEL=${DEPLOYER_MODEL}_dev
PROD_DEPLOYER_MODEL=${DEPLOYER_MODEL}_prod
MODEL_VERSION=beta
cd ${BASE_DIR}/samples/notebooks
export LC_ALL=C.UTF-8
@ -310,9 +307,9 @@ elif [ "$TEST_NAME" == "notebook-tfx" ]; then
echo "Copy the test results to GCS ${RESULTS_GCS_DIR}/"
gsutil cp $SAMPLE_NOTEBOOK_TFX_TEST_RESULT ${RESULTS_GCS_DIR}/$SAMPLE_NOTEBOOK_TFX_TEST_RESULT
#Clean CMLE models
python3 clean_cmle_models.py --project ml-pipeline-test --model ${DEV_DEPLOYER_MODEL} --version ${MODEL_VERSION}
python3 clean_cmle_models.py --project ml-pipeline-test --model ${PROD_DEPLOYER_MODEL} --version ${MODEL_VERSION}
#Clean CMLE models. Not needed because we cleaned them up inside notebook.
# python3 clean_cmle_models.py --project ml-pipeline-test --model ${DEV_DEPLOYER_MODEL} --version ${MODEL_VERSION}
# python3 clean_cmle_models.py --project ml-pipeline-test --model ${PROD_DEPLOYER_MODEL} --version ${MODEL_VERSION}
elif [ "$TEST_NAME" == "notebook-lightweight" ]; then
SAMPLE_NOTEBOOK_LIGHTWEIGHT_TEST_RESULT=junit_SampleNotebookLightweightOutput.xml
SAMPLE_NOTEBOOK_LIGHTWEIGHT_TEST_OUTPUT=${RESULTS_GCS_DIR}