diff --git a/docs/guides/on-cluster-build.md b/docs/guides/on-cluster-build.md index bb2c7855..808002d4 100644 --- a/docs/guides/on-cluster-build.md +++ b/docs/guides/on-cluster-build.md @@ -2,8 +2,6 @@ This guide describes how you can build a Function on Cluster with Tekton Pipelines. The on cluster build is enabled by fetching Function source code from a remote Git repository. -> Please note that the following approach requires administrator privileges on the cluster and the build is executed on a privileged container. - ## Prerequisite 1. Install Tekton Pipelines on the cluster. Please refer to [Tekton Pipelines documentation](https://github.com/tektoncd/pipeline/blob/main/docs/install.md) or run the following command: ```bash @@ -16,14 +14,20 @@ In each namespace that you would like to run Pipelines and deploy a Function you ```bash kubectl apply -f https://raw.githubusercontent.com/tektoncd/catalog/master/task/git-clone/0.4/git-clone.yaml ``` -2. Install the Buildpacks Tekton Task to be able to build the Function image: +2. Install the Functions Buildpacks Tekton Task to be able to build the Function image: ```bash -kubectl apply -f https://raw.githubusercontent.com/tektoncd/catalog/master/task/buildpacks/0.3/buildpacks.yaml +kubectl apply -f https://raw.githubusercontent.com/knative-sandbox/kn-plugin-func/main/pipelines/resources/tekton/task/func-buildpacks/0.1/func-buildpacks.yaml ``` 3. Install the `kn func` Deploy Tekton Task to be able to deploy the Function on in the Pipeline: ```bash kubectl apply -f https://raw.githubusercontent.com/knative-sandbox/kn-plugin-func/main/pipelines/resources/tekton/task/func-deploy/0.1/func-deploy.yaml ``` +4. Add permission to deploy on Knative to `default` Service Account: (This is not needed on OpenShift) +```bash +export NAMESPACE= +kubectl create clusterrolebinding $NAMESPACE:knative-serving-namespaced-admin \ +--clusterrole=knative-serving-namespaced-admin --serviceaccount=$NAMESPACE:default +``` ## Building a Function on Cluster 1. Create a Function and implement the business logic @@ -57,7 +61,8 @@ git push origin main ```bash kn func deploy ``` -If everything goes fine, you will prompted to provide credentials for the remote container registry that hosts the Function image. You should see output similar to the following: +If you are not logged in the container registry referenced in your function configuration, +you will prompted to provide credentials for the remote container registry that hosts the Function image. You should see output similar to the following: ```bash $ kn func deploy 🕕 Creating Pipeline resources @@ -74,16 +79,12 @@ Please provide credentials for image registry used by Pipeline. 1. In each namespace where Pipelines and Functions were deployed, uninstall following resources: ```bash export NAMESPACE= -kubectl delete serviceaccount knative-deployer-account -n $NAMESPACE -kubectl delete clusterrolebinding $NAMESPACE:knative-deployer-binding +kubectl delete clusterrolebinding $NAMESPACE:knative-serving-namespaced-admin kubectl delete task.tekton.dev git-clone -kubectl delete task.tekton.dev buildpacks +kubectl delete task.tekton.dev func-buildpacks +kubectl delete task.tekton.dev func-deploy ``` -2. Delete Knative Deployer Cluster Role -```bash -kubectl delete clusterrole kn-deployer -``` -3. Uninstall Tekton Pipelines +2. Uninstall Tekton Pipelines ```bash kubectl delete -f https://storage.googleapis.com/tekton-releases/pipeline/latest/release.yaml ``` diff --git a/k8s/secrets.go b/k8s/secrets.go index 2272efa8..0fd8780b 100644 --- a/k8s/secrets.go +++ b/k8s/secrets.go @@ -1,6 +1,7 @@ package k8s import ( + "bytes" "context" "encoding/base64" "encoding/json" @@ -82,29 +83,49 @@ func DeleteSecrets(ctx context.Context, namespaceOverride string, listOptions me return client.CoreV1().Secrets(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, listOptions) } -func CreateDockerRegistrySecret(ctx context.Context, name, namespaceOverride string, labels map[string]string, username, password, server string) (err error) { +func EnsureDockerRegistrySecretExist(ctx context.Context, name, namespaceOverride string, labels map[string]string, username, password, server string) (err error) { client, namespace, err := NewClientAndResolvedNamespace(namespaceOverride) if err != nil { return } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: labels, - }, - Type: corev1.SecretTypeDockerConfigJson, - Data: map[string][]byte{}, + // Check whether Secret with specified name exist + createSecret := false + currentSecret, err := GetSecret(ctx, name, namespace) + if err != nil { + if k8serrors.IsNotFound(err) { + createSecret = true + } else { + return + } } dockerConfigJSONContent, err := handleDockerCfgJSONContent(username, password, "", server) if err != nil { return } - secret.Data[corev1.DockerConfigJsonKey] = dockerConfigJSONContent - _, err = client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + // Check whether we need to create or update the Secret + const secretKey = "config.json" + if createSecret || !bytes.Equal(currentSecret.Data[secretKey], dockerConfigJSONContent) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: labels, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{}, + } + secret.Data[secretKey] = dockerConfigJSONContent + + // Decide whether create or update + if createSecret { + _, err = client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) + } else { + _, err = client.CoreV1().Secrets(namespace).Update(ctx, secret, metav1.UpdateOptions{}) + } + } return } diff --git a/pipelines/resources/tekton/task/func-buildpacks/0.1/func-buildpacks.yaml b/pipelines/resources/tekton/task/func-buildpacks/0.1/func-buildpacks.yaml new file mode 100644 index 00000000..e17edb6f --- /dev/null +++ b/pipelines/resources/tekton/task/func-buildpacks/0.1/func-buildpacks.yaml @@ -0,0 +1,195 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: func-buildpacks + labels: + app.kubernetes.io/version: "0.1" + annotations: + tekton.dev/categories: Image Build + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/tags: image-build + tekton.dev/displayName: "Knative Functions Buildpacks" + tekton.dev/platforms: "linux/amd64" +spec: + description: >- + The Knative Functions Buildpacks task builds source into a container image and pushes it to a registry, + using Cloud Native Buildpacks. This task is based on the Buildpacks Tekton task v 0.4. + + workspaces: + - name: source + description: Directory where application source is located. + - name: cache + description: Directory where cache is stored (when no cache image is provided). + optional: true + - name: dockerconfig + description: >- + An optional workspace that allows providing a .docker/config.json file + for Buildpacks lifecycle binary to access the container registry. + The file should be placed at the root of the Workspace with name config.json. + optional: true + + params: + - name: APP_IMAGE + description: The name of where to store the app image. + - name: BUILDER_IMAGE + description: The image on which builds will run (must include lifecycle and compatible buildpacks). + - name: SOURCE_SUBPATH + description: A subpath within the `source` input where the source to build is located. + default: "" + - name: ENV_VARS + type: array + description: Environment variables to set during _build-time_. + default: [] + - name: PROCESS_TYPE + description: The default process type to set on the image. + default: "web" + - name: RUN_IMAGE + description: Reference to a run image to use. + default: "" + - name: CACHE_IMAGE + description: The name of the persistent app cache image (if no cache workspace is provided). + default: "" + - name: SKIP_RESTORE + description: Do not write layer metadata or restore cached layers. + default: "false" + - name: USER_ID + description: The user ID of the builder image user. + default: "1000" + - name: GROUP_ID + description: The group ID of the builder image user. + default: "1000" + - name: PLATFORM_DIR + description: The name of the platform directory. + default: empty-dir + + results: + - name: APP_IMAGE_DIGEST + description: The digest of the built `APP_IMAGE`. + + stepTemplate: + env: + - name: CNB_PLATFORM_API + value: "0.4" + + steps: + - name: prepare + image: docker.io/library/bash:5.1.4@sha256:b208215a4655538be652b2769d82e576bc4d0a2bb132144c060efc5be8c3f5d6 + args: + - "--env-vars" + - "$(params.ENV_VARS[*])" + script: | + #!/usr/bin/env bash + set -e + + if [[ "$(workspaces.cache.bound)" == "true" ]]; then + echo "> Setting permissions on '$(workspaces.cache.path)'..." + chown -R "$(params.USER_ID):$(params.GROUP_ID)" "$(workspaces.cache.path)" + fi + + for path in "/tekton/home" "/layers" "$(workspaces.source.path)"; do + echo "> Setting permissions on '$path'..." + chown -R "$(params.USER_ID):$(params.GROUP_ID)" "$path" + + if [[ "$path" == "$(workspaces.source.path)" ]]; then + chmod 775 "$(workspaces.source.path)" + fi + done + + echo "> Parsing additional configuration..." + parsing_flag="" + envs=() + for arg in "$@"; do + if [[ "$arg" == "--env-vars" ]]; then + echo "-> Parsing env variables..." + parsing_flag="env-vars" + elif [[ "$parsing_flag" == "env-vars" ]]; then + envs+=("$arg") + fi + done + + echo "> Processing any environment variables..." + ENV_DIR="/platform/env" + + echo "--> Creating 'env' directory: $ENV_DIR" + mkdir -p "$ENV_DIR" + + for env in "${envs[@]}"; do + IFS='=' read -r key value string <<< "$env" + if [[ "$key" != "" && "$value" != "" ]]; then + path="${ENV_DIR}/${key}" + echo "--> Writing ${path}..." + echo -n "$value" > "$path" + fi + done + volumeMounts: + - name: layers-dir + mountPath: /layers + - name: $(params.PLATFORM_DIR) + mountPath: /platform + + - name: create + image: $(params.BUILDER_IMAGE) + imagePullPolicy: Always + command: ["/cnb/lifecycle/creator"] + env: + - name: DOCKER_CONFIG + value: $(workspaces.dockerconfig.path) + args: + - "-app=$(workspaces.source.path)/$(params.SOURCE_SUBPATH)" + - "-cache-dir=$(workspaces.cache.path)" + - "-cache-image=$(params.CACHE_IMAGE)" + - "-uid=$(params.USER_ID)" + - "-gid=$(params.GROUP_ID)" + - "-layers=/layers" + - "-platform=/platform" + - "-report=/layers/report.toml" + - "-process-type=$(params.PROCESS_TYPE)" + - "-skip-restore=$(params.SKIP_RESTORE)" + - "-previous-image=$(params.APP_IMAGE)" + - "-run-image=$(params.RUN_IMAGE)" + - "$(params.APP_IMAGE)" + volumeMounts: + - name: layers-dir + mountPath: /layers + - name: $(params.PLATFORM_DIR) + mountPath: /platform + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + + - name: results + image: docker.io/library/bash:5.1.4@sha256:b208215a4655538be652b2769d82e576bc4d0a2bb132144c060efc5be8c3f5d6 + script: | + #!/usr/bin/env bash + set -e + cat /layers/report.toml | grep "digest" | cut -d'"' -f2 | cut -d'"' -f2 | tr -d '\n' | tee $(results.APP_IMAGE_DIGEST.path) + + ############################################ + ##### Added part for Knative Functions ##### + ############################################ + + digest=$(cat $(results.APP_IMAGE_DIGEST.path)) + + func_file="$(workspaces.source.path)/func.yaml" + if [ "$(params.SOURCE_SUBPATH)" != "" ]; then + func_file="$(workspaces.source.path)/$(params.SOURCE_SUBPATH)/func.yaml" + fi + + echo "" + sed -i "s|^image:.*$|image: $(params.APP_IMAGE)|" "$func_file" + echo "Function image name: $(params.APP_IMAGE)" + + sed -i "s/^imageDigest:.*$/imageDigest: $digest/" "$func_file" + echo "Function image digest: $digest" + + ############################################ + volumeMounts: + - name: layers-dir + mountPath: /layers + + volumes: + - name: empty-dir + emptyDir: {} + - name: layers-dir + emptyDir: {} diff --git a/pipelines/tekton/client.go b/pipelines/tekton/client.go index 2011cd6b..b46c3026 100644 --- a/pipelines/tekton/client.go +++ b/pipelines/tekton/client.go @@ -14,18 +14,23 @@ const ( DefaultWaitingTimeout = 120 * time.Second ) -func NewTektonClient() (*v1beta1.TektonV1beta1Client, error) { +func NewTektonClientAndResolvedNamespace(defaultNamespace string) (*v1beta1.TektonV1beta1Client, string, error) { + namespace, err := k8s.GetNamespace(defaultNamespace) + if err != nil { + return nil, "", err + } + restConfig, err := k8s.GetClientConfig().ClientConfig() if err != nil { - return nil, fmt.Errorf("failed to create new tekton client: %w", err) + return nil, "", fmt.Errorf("failed to create new tekton client: %w", err) } client, err := v1beta1.NewForConfig(restConfig) if err != nil { - return nil, fmt.Errorf("failed to create new tekton client: %v", err) + return nil, "", fmt.Errorf("failed to create new tekton client: %v", err) } - return client, nil + return client, namespace, nil } func NewTektonClientset() (versioned.Interface, error) { diff --git a/pipelines/tekton/pipeplines_provider.go b/pipelines/tekton/pipeplines_provider.go index 92af1cb9..af46ef2e 100644 --- a/pipelines/tekton/pipeplines_provider.go +++ b/pipelines/tekton/pipeplines_provider.go @@ -73,21 +73,13 @@ func NewPipelinesProvider(opts ...Opt) *PipelinesProvider { // It ensures that all needed resources are present on the cluster so the PipelineRun can be initialized. // After the PipelineRun is being intitialized, the progress of the PipelineRun is being watched and printed to the output. func (pp *PipelinesProvider) Run(ctx context.Context, f fn.Function) error { - var err error - - if pp.namespace == "" { - pp.namespace, err = k8s.GetNamespace(pp.namespace) - if err != nil { - return err - } - } - pp.progressListener.Increment("Creating Pipeline resources") - client, err := NewTektonClient() + client, namespace, err := NewTektonClientAndResolvedNamespace(pp.namespace) if err != nil { return err } + pp.namespace = namespace // let's specify labels that will be applied to every resouce that is created for a Pipeline labels := map[string]string{labels.FunctionNameKey: f.Name} @@ -114,42 +106,22 @@ func (pp *PipelinesProvider) Run(ctx context.Context, f fn.Function) error { return err } - _, err = k8s.GetSecret(ctx, getPipelineSecretName(f), pp.namespace) - if errors.IsNotFound(err) { - pp.progressListener.Stopping() - creds, err := pp.credentialsProvider(ctx, registry) - if err != nil { - return err - } - pp.progressListener.Increment("Creating Pipeline resources") + pp.progressListener.Stopping() + creds, err := pp.credentialsProvider(ctx, registry) + if err != nil { + return err + } + pp.progressListener.Increment("Creating Pipeline resources") - if registry == name.DefaultRegistry { - registry = authn.DefaultAuthKey - } + if registry == name.DefaultRegistry { + registry = authn.DefaultAuthKey + } - err = k8s.CreateDockerRegistrySecret(ctx, getPipelineSecretName(f), pp.namespace, labels, creds.Username, creds.Password, registry) - if err != nil { - return err - } - } else if err != nil { + err = k8s.EnsureDockerRegistrySecretExist(ctx, getPipelineSecretName(f), pp.namespace, labels, creds.Username, creds.Password, registry) + if err != nil { return fmt.Errorf("problem in creating secret: %v", err) } - err = k8s.CreateServiceAccountWithSecret(ctx, getPipelineBuilderServiceAccountName(f), pp.namespace, labels, getPipelineSecretName(f)) - if err != nil { - if !errors.IsAlreadyExists(err) { - return fmt.Errorf("problem in creating service account: %v", err) - } - } - - // using ClusterRole `knative-serving-namespaced-admin` that should be present on the cluster after the installation of Knative Serving - err = k8s.CreateRoleBindingForServiceAccount(ctx, getPipelineDeployerRoleBindingName(f), pp.namespace, labels, getPipelineBuilderServiceAccountName(f), "ClusterRole", "knative-serving-namespaced-admin") - if err != nil { - if !errors.IsAlreadyExists(err) { - return fmt.Errorf("problem in creating role biding: %v", err) - } - } - pp.progressListener.Increment("Running Pipeline with the Function") pr, err := client.PipelineRuns(pp.namespace).Create(ctx, generatePipelineRun(f, labels), metav1.CreateOptions{}) if err != nil { @@ -201,8 +173,6 @@ func (pp *PipelinesProvider) Remove(ctx context.Context, f fn.Function) error { deleteFunctions := []func(context.Context, string, metav1.ListOptions) error{ deletePipelines, deletePipelineRuns, - k8s.DeleteRoleBindings, - k8s.DeleteServiceAccounts, k8s.DeleteSecrets, k8s.DeletePersistentVolumeClaims, } @@ -246,10 +216,9 @@ func (pp *PipelinesProvider) Remove(ctx context.Context, f fn.Function) error { // and prints detailed description of the currently executed Tekton Task. func (pp *PipelinesProvider) watchPipelineRunProgress(pr *v1beta1.PipelineRun) error { taskProgressMsg := map[string]string{ - "fetch-repository": "Fetching git repository with the function source code", - "build": "Building function image on the cluster", - "image-digest": "Retrieving digest of the produced function image", - "deploy": "Deploying function to the cluster", + taskNameFetchSources: "Fetching git repository with the function source code", + taskNameBuild: "Building function image on the cluster", + taskNameDeploy: "Deploying function to the cluster", } clientset, err := NewTektonClientset() diff --git a/pipelines/tekton/resources.go b/pipelines/tekton/resources.go index 2e519b48..f3cd053e 100644 --- a/pipelines/tekton/resources.go +++ b/pipelines/tekton/resources.go @@ -12,8 +12,8 @@ import ( fn "knative.dev/kn-plugin-func" ) -func deletePipelines(ctx context.Context, namespace string, listOptions metav1.ListOptions) (err error) { - client, err := NewTektonClient() +func deletePipelines(ctx context.Context, namespaceOverride string, listOptions metav1.ListOptions) (err error) { + client, namespace, err := NewTektonClientAndResolvedNamespace(namespaceOverride) if err != nil { return } @@ -21,8 +21,8 @@ func deletePipelines(ctx context.Context, namespace string, listOptions metav1.L return client.Pipelines(namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, listOptions) } -func deletePipelineRuns(ctx context.Context, namespace string, listOptions metav1.ListOptions) (err error) { - client, err := NewTektonClient() +func deletePipelineRuns(ctx context.Context, namespaceOverride string, listOptions metav1.ListOptions) (err error) { + client, namespace, err := NewTektonClientAndResolvedNamespace(namespaceOverride) if err != nil { return } @@ -60,14 +60,14 @@ func generatePipeline(f fn.Function, labels map[string]string) *pplnv1beta1.Pipe workspaces := []pplnv1beta1.PipelineWorkspaceDeclaration{ {Name: "source-workspace", Description: "Directory where function source is located."}, - {Name: "cache-workspace", Description: "Directory where Buildpacks cache is stored"}, + {Name: "cache-workspace", Description: "Directory where Buildpacks cache is stored."}, + {Name: "dockerconfig-workspace", Description: "Directory containing image registry credentials stored in `config.json` file.", Optional: true}, } tasks := pplnv1beta1.PipelineTaskList{ - taskFetchRepository(), - taskBuild("fetch-repository"), - taskImageDigest("build"), - taskFuncDeploy("image-digest"), + taskFetchSources(), + taskBuild(taskNameFetchSources), + taskDeploy(taskNameBuild), } return &pplnv1beta1.Pipeline{ @@ -105,8 +105,6 @@ func generatePipelineRun(f fn.Function, labels map[string]string) *pplnv1beta1.P Name: getPipelineName(f), }, - ServiceAccountName: getPipelineBuilderServiceAccountName(f), - Params: []pplnv1beta1.Param{ { Name: "gitRepository", @@ -145,6 +143,12 @@ func generatePipelineRun(f fn.Function, labels map[string]string) *pplnv1beta1.P }, SubPath: "cache", }, + { + Name: "dockerconfig-workspace", + Secret: &corev1.SecretVolumeSource{ + SecretName: getPipelineSecretName(f), + }, + }, }, }, } @@ -161,11 +165,3 @@ func getPipelineSecretName(f fn.Function) string { func getPipelinePvcName(f fn.Function) string { return fmt.Sprintf("%s-pvc", getPipelineName(f)) } - -func getPipelineBuilderServiceAccountName(f fn.Function) string { - return fmt.Sprintf("%s-builder-secret", getPipelineName(f)) -} - -func getPipelineDeployerRoleBindingName(f fn.Function) string { - return fmt.Sprintf("%s-deployer-binding", getPipelineName(f)) -} diff --git a/pipelines/tekton/tasks.go b/pipelines/tekton/tasks.go index 485f4575..5e714b21 100644 --- a/pipelines/tekton/tasks.go +++ b/pipelines/tekton/tasks.go @@ -2,12 +2,17 @@ package tekton import ( pplnv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - corev1 "k8s.io/api/core/v1" ) -func taskFetchRepository() pplnv1beta1.PipelineTask { +const ( + taskNameFetchSources = "fetch-sources" + taskNameBuild = "build" + taskNameDeploy = "deploy" +) + +func taskFetchSources() pplnv1beta1.PipelineTask { return pplnv1beta1.PipelineTask{ - Name: "fetch-repository", + Name: taskNameFetchSources, TaskRef: &pplnv1beta1.TaskRef{ Name: "git-clone", }, @@ -24,9 +29,9 @@ func taskFetchRepository() pplnv1beta1.PipelineTask { func taskBuild(runAfter string) pplnv1beta1.PipelineTask { return pplnv1beta1.PipelineTask{ - Name: "build", + Name: taskNameBuild, TaskRef: &pplnv1beta1.TaskRef{ - Name: "buildpacks", + Name: "func-buildpacks", }, RunAfter: []string{runAfter}, Workspaces: []pplnv1beta1.WorkspacePipelineTaskBinding{ @@ -37,6 +42,10 @@ func taskBuild(runAfter string) pplnv1beta1.PipelineTask { { Name: "cache", Workspace: "cache-workspace", + }, + { + Name: "dockerconfig", + Workspace: "dockerconfig-workspace", }}, Params: []pplnv1beta1.Param{ {Name: "APP_IMAGE", Value: *pplnv1beta1.NewArrayOrString("$(params.imageName)")}, @@ -46,61 +55,9 @@ func taskBuild(runAfter string) pplnv1beta1.PipelineTask { } } -// TODO this should be part of the future func-build Tekton Task as a post-build step -func taskImageDigest(runAfter string) pplnv1beta1.PipelineTask { - script := `#!/usr/bin/env bash -set -e - -func_file="/workspace/source/func.yaml" -if [ "$(params.contextDir)" != "" ]; then - func_file="/workspace/source/$(params.contextDir)/func.yaml" -fi - -sed -i "s|^image:.*$|image: $(params.image)|" "$func_file" -echo "Function image name: $(params.image)" - -sed -i "s/^imageDigest:.*$/imageDigest: $(params.digest)/" "$func_file" -echo "Function image digest: $(params.digest)" - ` - +func taskDeploy(runAfter string) pplnv1beta1.PipelineTask { return pplnv1beta1.PipelineTask{ - Name: "image-digest", - TaskSpec: &pplnv1beta1.EmbeddedTask{ - TaskSpec: pplnv1beta1.TaskSpec{ - Workspaces: []pplnv1beta1.WorkspaceDeclaration{ - {Name: "source"}, - }, - Steps: []pplnv1beta1.Step{ - { - Container: corev1.Container{ - Image: "docker.io/library/bash:5.1.4@sha256:b208215a4655538be652b2769d82e576bc4d0a2bb132144c060efc5be8c3f5d6", - }, - Script: script, - }, - }, - Params: []pplnv1beta1.ParamSpec{ - {Name: "image"}, - {Name: "digest"}, - {Name: "contextDir"}, - }, - }, - }, - RunAfter: []string{runAfter}, - Workspaces: []pplnv1beta1.WorkspacePipelineTaskBinding{{ - Name: "source", - Workspace: "source-workspace", - }}, - Params: []pplnv1beta1.Param{ - {Name: "image", Value: *pplnv1beta1.NewArrayOrString("$(params.imageName)")}, - {Name: "digest", Value: *pplnv1beta1.NewArrayOrString("$(tasks.build.results.APP_IMAGE_DIGEST)")}, - {Name: "contextDir", Value: *pplnv1beta1.NewArrayOrString("$(params.contextDir)")}, - }, - } -} - -func taskFuncDeploy(runAfter string) pplnv1beta1.PipelineTask { - return pplnv1beta1.PipelineTask{ - Name: "deploy", + Name: taskNameDeploy, TaskRef: &pplnv1beta1.TaskRef{ Name: "func-deploy", },