Updated component images to version d4960d3379

This commit is contained in:
Kevin Bache 2019-06-17 21:18:07 -07:00
parent b935836c30
commit d0aa15dfb3
27 changed files with 40 additions and 40 deletions

View File

@ -15,7 +15,7 @@ outputs:
- {name: Predictions dir, type: GCSPath, description: 'GCS or local directory.'} #Will contain prediction_results-* and schema.json files; TODO: Split outputs and replace dir with single file # type: {GCSPath: {path_type: Directory}}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [python2, /ml/predict.py]
args: [
--data, {inputValue: Data file pattern},

View File

@ -18,7 +18,7 @@ outputs:
- {name: Validation result, type: String, description: Indicates whether anomalies were detected or not.}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [python2, /ml/validate.py]
args: [
--csv-data-for-inference, {inputValue: Inference data},

View File

@ -17,7 +17,7 @@ outputs:
- {name: Analysis results dir, type: GCSPath, description: GCS or local directory where the analysis results should were written.} # type: {GCSPath: {path_type: Directory}}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [python2, /ml/model_analysis.py]
args: [
--model, {inputValue: Model},

View File

@ -12,7 +12,7 @@ outputs:
- {name: Transformed data dir, type: GCSPath} # type: {GCSPath: {path_type: Directory}}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [python2, /ml/transform.py]
args: [
--train, {inputValue: Training data file pattern},

View File

@ -54,7 +54,7 @@ outputs:
type: GCSPath
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.bigquery, query,
--query, {inputValue: query},

View File

@ -48,7 +48,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataflow, launch_python,
--python_file_path, {inputValue: python_file_path},

View File

@ -58,7 +58,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataflow, launch_template,
--project_id, {inputValue: project_id},

View File

@ -65,7 +65,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, create_cluster,
--project_id, {inputValue: project_id},

View File

@ -33,7 +33,7 @@ inputs:
type: Integer
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, delete_cluster,
--project_id, {inputValue: project_id},

View File

@ -75,7 +75,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, submit_hadoop_job,
--project_id, {inputValue: project_id},

View File

@ -70,7 +70,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, submit_hive_job,
--project_id, {inputValue: project_id},

View File

@ -70,7 +70,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, submit_pig_job,
--project_id, {inputValue: project_id},

View File

@ -64,7 +64,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, submit_pyspark_job,
--project_id, {inputValue: project_id},

View File

@ -71,7 +71,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, submit_spark_job,
--project_id, {inputValue: project_id},

View File

@ -70,7 +70,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.dataproc, submit_sparksql_job,
--project_id, {inputValue: project_id},

View File

@ -64,7 +64,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.ml_engine, batch_predict,
--project_id, {inputValue: project_id},

View File

@ -90,7 +90,7 @@ outputs:
type: String
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.ml_engine, deploy,
--model_uri, {inputValue: model_uri},

View File

@ -98,7 +98,7 @@ outputs:
type: GCSPath
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-gcp:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-gcp:d4960d3379af4735fd04dc7167fab5fff82d0f22
args: [
kfp_component.google.ml_engine, train,
--project_id, {inputValue: project_id},

View File

@ -11,7 +11,7 @@ inputs:
# - {name: Endppoint URI, type: Serving URI, description: 'URI of the deployed prediction service..'}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [/bin/deploy.sh]
args: [
--model-export-path, {inputValue: Model dir},

View File

@ -15,7 +15,7 @@ outputs:
- {name: Training output dir, type: GCSPath, description: 'GCS or local directory.'} # type: {GCSPath: {path_type: Directory}}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [python2, -m, trainer.task]
args: [
--transformed-data-dir, {inputValue: Transformed data dir},

View File

@ -17,7 +17,7 @@ from kfp import dsl
def kubeflow_tfjob_launcher_op(container_image, command, number_of_workers: int, number_of_parameter_servers: int, tfjob_timeout_minutes: int, output_dir=None, step_name='TFJob-launcher'):
return dsl.ContainerOp(
name = step_name,
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf:d4960d3379af4735fd04dc7167fab5fff82d0f22',
arguments = [
'--workers', number_of_workers,
'--pss', number_of_parameter_servers,

View File

@ -26,7 +26,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d4960d3379af4735fd04dc7167fab5fff82d0f22
command:
- python
- -m
@ -49,7 +49,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d4960d3379af4735fd04dc7167fab5fff82d0f22
command:
- python
- -m
@ -72,7 +72,7 @@ spec:
spec:
containers:
- name: tensorflow
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d4960d3379af4735fd04dc7167fab5fff82d0f22
command:
- python
- -m

View File

@ -9,7 +9,7 @@ inputs:
# - {name: Metrics, type: Metrics}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [python2, /ml/confusion_matrix.py]
args: [
--predictions, {inputValue: Predictions},

View File

@ -11,7 +11,7 @@ inputs:
# - {name: Metrics, type: Metrics}
implementation:
container:
image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:1d55a27cf8b69696f3ab5c10687edf2fde0068c7
image: gcr.io/ml-pipeline/ml-pipeline-local-confusion-matrix:d4960d3379af4735fd04dc7167fab5fff82d0f22
command: [python2, /ml/roc.py]
args: [
--predictions, {inputValue: Predictions dir},

View File

@ -68,7 +68,7 @@ def kubeflow_training(output, project,
).apply(gcp.use_gcp_secret('user-gcp-sa'))
if use_gpu:
training.image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
training.image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:d4960d3379af4735fd04dc7167fab5fff82d0f22',
training.set_gpu_limit(1)
prediction = dataflow_tf_predict_op(

View File

@ -44,13 +44,13 @@
"EVAL_DATA = 'gs://ml-pipeline-playground/tfx/taxi-cab-classification/eval.csv'\n",
"HIDDEN_LAYER_SIZE = '1500'\n",
"STEPS = 3000\n",
"DATAFLOW_TFDV_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:1d55a27cf8b69696f3ab5c10687edf2fde0068c7'\n",
"DATAFLOW_TFT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:1d55a27cf8b69696f3ab5c10687edf2fde0068c7'\n",
"DATAFLOW_TFMA_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:1d55a27cf8b69696f3ab5c10687edf2fde0068c7'\n",
"DATAFLOW_TF_PREDICT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:1d55a27cf8b69696f3ab5c10687edf2fde0068c7'\n",
"KUBEFLOW_TF_TRAINER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:1d55a27cf8b69696f3ab5c10687edf2fde0068c7'\n",
"KUBEFLOW_TF_TRAINER_GPU_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:1d55a27cf8b69696f3ab5c10687edf2fde0068c7'\n",
"KUBEFLOW_DEPLOYER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:1d55a27cf8b69696f3ab5c10687edf2fde0068c7'\n",
"DATAFLOW_TFDV_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfdv:d4960d3379af4735fd04dc7167fab5fff82d0f22'\n",
"DATAFLOW_TFT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tft:d4960d3379af4735fd04dc7167fab5fff82d0f22'\n",
"DATAFLOW_TFMA_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tfma:d4960d3379af4735fd04dc7167fab5fff82d0f22'\n",
"DATAFLOW_TF_PREDICT_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-dataflow-tf-predict:d4960d3379af4735fd04dc7167fab5fff82d0f22'\n",
"KUBEFLOW_TF_TRAINER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer:d4960d3379af4735fd04dc7167fab5fff82d0f22'\n",
"KUBEFLOW_TF_TRAINER_GPU_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:d4960d3379af4735fd04dc7167fab5fff82d0f22'\n",
"KUBEFLOW_DEPLOYER_IMAGE = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-deployer:d4960d3379af4735fd04dc7167fab5fff82d0f22'\n",
"DEPLOYER_MODEL = 'notebook_tfx_taxi'\n",
"DEPLOYER_VERSION_DEV = 'dev'\n",
"DEPLOYER_VERSION_PROD = 'prod'\n",

View File

@ -36,7 +36,7 @@ def dataproc_create_cluster_op(
):
return dsl.ContainerOp(
name='Dataproc - Create cluster',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-create-cluster:d4960d3379af4735fd04dc7167fab5fff82d0f22',
arguments=[
'--project', project,
'--region', region,
@ -56,7 +56,7 @@ def dataproc_delete_cluster_op(
):
return dsl.ContainerOp(
name='Dataproc - Delete cluster',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-delete-cluster:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-delete-cluster:d4960d3379af4735fd04dc7167fab5fff82d0f22',
arguments=[
'--project', project,
'--region', region,
@ -76,7 +76,7 @@ def dataproc_analyze_op(
):
return dsl.ContainerOp(
name='Dataproc - Analyze',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-analyze:d4960d3379af4735fd04dc7167fab5fff82d0f22',
arguments=[
'--project', project,
'--region', region,
@ -103,7 +103,7 @@ def dataproc_transform_op(
):
return dsl.ContainerOp(
name='Dataproc - Transform',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-transform:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-transform:d4960d3379af4735fd04dc7167fab5fff82d0f22',
arguments=[
'--project', project,
'--region', region,
@ -141,7 +141,7 @@ def dataproc_train_op(
return dsl.ContainerOp(
name='Dataproc - Train XGBoost model',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-train:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-train:d4960d3379af4735fd04dc7167fab5fff82d0f22',
arguments=[
'--project', project,
'--region', region,
@ -174,7 +174,7 @@ def dataproc_predict_op(
):
return dsl.ContainerOp(
name='Dataproc - Predict with XGBoost model',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:1d55a27cf8b69696f3ab5c10687edf2fde0068c7',
image='gcr.io/ml-pipeline/ml-pipeline-dataproc-predict:d4960d3379af4735fd04dc7167fab5fff82d0f22',
arguments=[
'--project', project,
'--region', region,