diff --git a/Gopkg.lock b/Gopkg.lock
index 66763ac64..14fe30ca4 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -70,6 +70,14 @@
packages = ["pkg/event"]
revision = "2b0383b8e4d67ffac446b17a7922bf7e5d9f5362"
+[[projects]]
+ branch = "master"
+ digest = "1:d6415e6b744ec877c21fe734067636b9ee149af77276b08a3d33dd8698abf947"
+ name = "github.com/knative/test-infra"
+ packages = ["."]
+ pruneopts = "T"
+ revision = "4a4a682ee1fd31f33e450406393c3553b9ec5c2a"
+
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
diff --git a/Gopkg.toml b/Gopkg.toml
index a80bbf0ac..74b37931f 100644
--- a/Gopkg.toml
+++ b/Gopkg.toml
@@ -1,6 +1,10 @@
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
+required = [
+ "github.com/knative/test-infra",
+]
+
ignored = [
"github.com/knative/docs/serving/samples/grpc-ping-go*",
]
@@ -9,3 +13,8 @@ ignored = [
go-tests = true
unused-packages = true
non-go = true
+
+[[prune.project]]
+ name = "github.com/knative/test-infra"
+ unused-packages = false
+ non-go = false
diff --git a/build/builds.md b/build/builds.md
index 7fbf2eb3b..91829be9b 100644
--- a/build/builds.md
+++ b/build/builds.md
@@ -17,6 +17,7 @@ A build runs until all `steps` have completed or until a failure occurs.
* [Source](#source)
* [Service Account](#service-account)
* [Volumes](#volumes)
+ * [Timeout](#timeout)
* [Examples](#examples)
---
@@ -47,6 +48,7 @@ following fields:
authentication information.
* [`volumes`](#volumes) - Specifies one or more volumes that you want to make
available to your build.
+ * [`timeout`](#timeout) - Specifies timeout after which the build will fail.
[kubernetes-overview]: https://kubernetes.io/docs/concepts/overview/working-with-objects/kubernetes-objects/#required-fields
@@ -156,7 +158,7 @@ complement the volumes that are implicitly
For example, use volumes to accomplish one of the following common tasks:
- * [Mount a Kubernetes secrets(./auth.md).
+ * [Mount a Kubernetes secret](./auth.md).
* Creat an `emptyDir` volume to act as a cache for use across multiple build
steps. Consider using a persistent volume for inter-build caching.
@@ -164,6 +166,12 @@ For example, use volumes to accomplish one of the following common tasks:
* Mount a host's Docker socket to use a `Dockerfile` for container image
builds.
+#### Timeout
+
+Optional. Specifies timeout for the build. Includes time required for allocating resources and execution of build.
+
+* Defaults to 10 minutes.
+* Refer to [Go's ParseDuration documentation](https://golang.org/pkg/time/#ParseDuration) for expected format.
### Examples
@@ -179,6 +187,7 @@ additional code samples, including working copies of the following snippets:
* [Mounting extra volumes](#using-an-extra-volume)
* [Pushing an image](#using-steps-to-push-images)
* [Authenticating with `ServiceAccount`](#using-a-serviceaccount)
+* [Timeout](#using-timeout)
#### Using `git`
@@ -331,6 +340,22 @@ Note: For a working copy of this `ServiceAccount` example, see the
[build/test/git-ssh](https://github.com/knative/build/tree/master/test/git-ssh)
code sample.
+#### Using `timeout`
+
+Specifying `timeout` for your `build`:
+
+```yaml
+spec:
+ timeout: 20m
+ source:
+ git:
+ url: https://github.com/knative/build.git
+ revision: master
+ steps:
+ - image: ubuntu
+ args: ["cat", "README.md"]
+```
+
---
Except as otherwise noted, the content of this page is licensed under the
diff --git a/community/REVIEWING.md b/community/REVIEWING.md
index ec2118aa7..7915a1c8c 100644
--- a/community/REVIEWING.md
+++ b/community/REVIEWING.md
@@ -92,34 +92,13 @@ A PR can be merged only after the following criteria are met:
This project uses
[Prow](https://github.com/kubernetes/test-infra/tree/master/prow) to
-automatically run tests for every PR. PRs with failing tests might not b
-e merged. If necessary, you can rerun the tests by simply adding the comment
+automatically run tests for every PR. PRs with failing tests might not be
+merged. If necessary, you can rerun the tests by simply adding the comment
`/retest` to your PR.
Prow has several other features that make PR management easier, like running the
go linter or assigning labels. For a full list of commands understood by Prow,
-see the [command help
-page](https://prow-internal.gcpnode.com/command-help?repo=knative%2Fknative).
-
-### Viewing test logs
-
-The Prow instance is internal to Google, which means that only Google
-employees can access the "Details" link of the test job (provided by
-Prow in the PR thread). However, if you're a Knative team member outside
-Google, and you are a member of the
-[knative-dev@](https://groups.google.com/forum/#!forum/knative-dev)
-Google group, you can see the test logs by following these instructions:
-
-1. Wait for Prow to finish the test execution. Note the PR number.
-
-2. Open the URL http://gcsweb.k8s.io/gcs/knative-prow/pr-logs/pull/knative_serving/###/pull-knative-serving-@@@-tests/
-where ### is the PR number and @@@ is the test type (_build_, _unit_ or _integration_).
-
-3. You'll see one or more numbered directories. The highest number is the latest
-test execution (called "build" by Prow).
-
-4. The raw test log is the text file named `build-log.txt` inside each numbered
-directory.
+see the [command help page](https://prow.knative.dev/command-help).
---
diff --git a/community/ROLES.md b/community/ROLES.md
index 7059fb3c5..e34c31ee7 100644
--- a/community/ROLES.md
+++ b/community/ROLES.md
@@ -46,7 +46,7 @@ table describes:
Member
Regular active contributor in the community
-
Sponsored by 2 reviewers
+
Sponsored by two members
Has made multiple contributions to the project
diff --git a/community/SLACK-GUIDELINES.md b/community/SLACK-GUIDELINES.md
index 84b6744e4..637e7f350 100644
--- a/community/SLACK-GUIDELINES.md
+++ b/community/SLACK-GUIDELINES.md
@@ -12,6 +12,9 @@ video recording or in another public space. Please be courteous to others.
from these commands and we are a global project - please be kind.
Note: `@all` is only to be used by admins.
+You can join the [Knative Slack](https://slack.knative.dev) instance at
+https://slack.knative.dev.
+
## Code of Conduct
The Knative [Code of Conduct](./CODE-OF-CONDUCT.md) applies throughout the
project, and includes all communication mediums.
diff --git a/community/WORKING-GROUPS.md b/community/WORKING-GROUPS.md
index aa1c349e9..04d06438e 100644
--- a/community/WORKING-GROUPS.md
+++ b/community/WORKING-GROUPS.md
@@ -23,8 +23,10 @@ The current working groups are:
* [API Core](#api-core)
* [Build](#build)
* [Events](#events)
+* [Networking](#networking)
+* [Observability](#observability)
+* [Productivity](#productivity)
* [Scaling](#scaling)
-* [Serving](#serving)
## API Core
@@ -51,8 +53,8 @@ Slack Channel | [#api](https://knative.slack.com)
Artifact | Link
-------------------------- | ----
Forum | [knative-dev@](https://groups.google.com/forum/#!forum/knative-dev)
-Community Meeting VC | [build-crd](https://hangouts.google.com/hangouts/_/google.com/build-crd)
-Community Meeting Calendar | Wednesdays 10:00a-10:30a PST [Calendar Invitation](https://calendar.google.com/event?action=TEMPLATE&tmeid=MTBkb3MwYnVrbDd0djE0a2kzcmpmbjZndm9fMjAxODA3MTFUMTcwMDAwWiBtYXR0bW9vckBnb29nbGUuY29t&tmsrc=mattmoor%40google.com)
+Community Meeting VC | [meet.google.com/hau-nwak-tgm](https://meet.google.com/hau-nwak-tgm) Or dial in: (US) +1 219-778-6103 PIN: 573 000#
+Community Meeting Calendar | Wednesdays 10:00a-10:30a PST [Calendar Invitation](https://calendar.google.com/event?action=TEMPLATE&tmeid=MTBkb3MwYnVrbDd0djE0a2kzcmpmbjZndm9fMjAxODA4MTVUMTcwMDAwWiBqYXNvbmhhbGxAZ29vZ2xlLmNvbQ&tmsrc=jasonhall%40google.com&scp=ALL)
Meeting Notes | [Notes](https://docs.google.com/document/d/1e7gMVFlJfkFdTcaWj2qETeRD9kSBG2Vh8mASPmQMYC0/edit)
Document Folder | [Folder](https://drive.google.com/corp/drive/folders/1ov16HvPam-v_FXAGEaUdHok6_hUAoIoe)
Slack Channel | [#build-crd](https://knative.slack.com)
@@ -79,6 +81,42 @@ Slack Channel | [#eventing](https://knative.slack.com/messages/C9JP
------------------------------------------------------------- | ----------- | ------- | -------
| Ville Aikas | Google | [vaikas-google](https://github.com/vaikas-google)
+## Networking
+
+Inbound and outbound network connectivity for [serving](https://github.com/knative/serving) workloads.
+Specific areas of interest include: load balancing, routing, DNS configuration and TLS support.
+
+Artifact | Link
+-------------------------- | ----
+Forum | [knative-dev@](https://groups.google.com/forum/#!forum/knative-dev)
+Community Meeting VC | Coming soon
+Community Meeting Calendar | Coming soon
+Meeting Notes | [Notes](https://drive.google.com/open?id=1EE1t5mTfnTir2lEasdTMRNtuPEYuPqQCZbU3NC9mHOI)
+Document Folder | [Folder](https://drive.google.com/corp/drive/folders/1oVDYbcEDdQ9EpUmkK6gE4C7aZ8u6ujsN)
+Slack Channel | [#networking](https://knative.slack.com)
+
+ | Leads | Company | Profile
+--------------------------------------------------------- | ---------------- | ------- | -------
+ | Nghia Tran | Google | [tcnghia](https://github.com/tcnghia)
+ | Mustafa Demirhan | Google | [mdemirhan](https://github.com/mdemirhan)
+
+## Observability
+
+Logging, monitoring & tracing infrastructure
+
+Artifact | Link
+-------------------------- | ----
+Forum | [knative-dev@](https://groups.google.com/forum/#!forum/knative-dev)
+Community Meeting VC | https://meet.google.com/kik-btis-sqz Or dial in: (US) +1 515-705-3725 PIN: 704 774#
+Community Meeting Calendar | [Calendar Invitation](https://calendar.google.com/event?action=TEMPLATE&tmeid=MDc4ZnRkZjFtbzZhZzBmdDMxYXBrM3B1YTVfMjAxODA4MDJUMTczMDAwWiBtZGVtaXJoYW5AZ29vZ2xlLmNvbQ&tmsrc=mdemirhan%40google.com&scp=ALL)
+Meeting Notes | [Notes](https://drive.google.com/open?id=1vWEpjf093Jsih3mKkpIvmWWbUQPxFkcyDxzNH15rQgE)
+Document Folder | [Folder](https://drive.google.com/corp/drive/folders/10HcpZlI1PbFyzinO6HjfHbzCkBXrqXMy)
+Slack Channel | [#observability](https://knative.slack.com)
+
+ | Leads | Company | Profile
+--------------------------------------------------------- | ---------------- | ------- | -------
+ | Mustafa Demirhan | Google | [mdemirhan](https://github.com/mdemirhan)
+
## Scaling
Autoscaling
@@ -96,23 +134,23 @@ Slack Channel | [#autoscaling](https://knative.slack.com)
------------------------------------------------------------- | -------------- | ------- | -------
| Joseph Burnett | Google | [josephburnett](https://github.com/josephburnett)
-## Serving
+## Productivity
-Logging infra, Monitoring infra, Trace, Load balancing/Istio, Domain names,
-Routing, Observability
+Project health, test framework, continuous integration & deployment, release, performance/scale/load testing infrastructure
Artifact | Link
-------------------------- | ----
Forum | [knative-dev@](https://groups.google.com/forum/#!forum/knative-dev)
-Community Meeting VC | [TODO](TODO)
-Community Meeting Calendar | [Calendar Invitation](TODO)
-Meeting Notes | [Notes](TODO)
-Document Folder | [Folder](https://drive.google.com/corp/drive/folders/1pfcc6z8oQl6S7bOl1MnfZJ2o32FtgvRB)
-Slack Channel | [#metrics](https://knative.slack.com)
+Community Meeting VC | [Hangouts](https://meet.google.com/sps-vbhg-rfx)
+Community Meeting Calendar | [Calendar Invitation](https://calendar.google.com/event?action=TEMPLATE&tmeid=NW5zM21rbHVwZWgyNHFoMGpyY2JhMjB2bHRfMjAxODA4MzBUMjEwMDAwWiBnb29nbGUuY29tXzE4dW40ZnVoNnJva3FmOGhtZmZ0bTVvcXE0QGc&tmsrc=google.com_18un4fuh6rokqf8hmfftm5oqq4%40group.calendar.google.com&scp=ALL)
+Meeting Notes | [Notes](https://docs.google.com/document/d/1aPRwYGD4XscRIqlBzbNsSB886PJ0G-vZYUAAUjoydko)
+Document Folder | [Folder](https://drive.google.com/corp/drive/folders/1oMYB4LQHjySuMChmcWYCyhH7-CSkz2r_)
+Slack Channel | [#productivity](https://knative.slack.com)
- | Leads | Company | Profile
---------------------------------------------------------- | ---------------- | ------- | -------
- | Mustafa Demirhan | Google | [mdemirhan](https://github.com/mdemirhan)
+ | Leads | Company | Profile
+--------------------------------------------------------- | -------------- | ------- | -------
+ | Jessie Zhu | Google | [jessiezcc](https://github.com/jessiezcc)
+ | Adriano Cunha | Google | [adrcunhua](https://github.com/adrcunha)
---
diff --git a/eventing/samples/event-flow/README.md b/eventing/samples/event-flow/README.md
index ea32ada3a..cc7f1a304 100644
--- a/eventing/samples/event-flow/README.md
+++ b/eventing/samples/event-flow/README.md
@@ -88,8 +88,8 @@ is a random number 1-10.
Now we want to consume these IoT events, so let's create the function to handle the events:
```shell
-kubectl apply -f event-flow/route.yaml
-kubectl apply -f event-flow/configuration.yaml
+kubectl apply -f route.yaml
+kubectl apply -f configuration.yaml
```
## Create an event source
@@ -103,10 +103,10 @@ in Pull mode to poll for the events from this topic.
Then let's create a GCP PubSub as an event source that we can bind to.
```shell
-kubectl apply -f event-flow/serviceaccount.yaml
-kubectl apply -f event-flow/serviceaccountbinding.yaml
-kubectl apply -f event-flow/eventsource.yaml
-kubectl apply -f event-flow/eventtype.yaml
+kubectl apply -f serviceaccount.yaml
+kubectl apply -f serviceaccountbinding.yaml
+kubectl apply -f eventsource.yaml
+kubectl apply -f eventtype.yaml
```
## Bind IoT events to our function
@@ -115,5 +115,5 @@ We have now created a function that we want to consume our IoT events, and we ha
source that's sending events via GCP PubSub, so let's wire the two together:
```shell
-kubectl apply -f event-flow/flow.yaml
+kubectl apply -f flow.yaml
```
diff --git a/eventing/samples/github-events/Dockerfile b/eventing/samples/github-events/Dockerfile
new file mode 100644
index 000000000..9f71b8506
--- /dev/null
+++ b/eventing/samples/github-events/Dockerfile
@@ -0,0 +1,27 @@
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM golang AS builder
+
+WORKDIR /go/src/github.com/knative/docs/
+ADD . /go/src/github.com/knative/docs/
+
+RUN CGO_ENABLED=0 go build ./eventing/samples/github-events
+
+FROM gcr.io/distroless/base
+
+COPY --from=builder /go/src/github.com/knative/docs/github-events /sample
+
+ENTRYPOINT ["/sample"]
+EXPOSE 8080
diff --git a/eventing/samples/github-events/README.md b/eventing/samples/github-events/README.md
new file mode 100644
index 000000000..49f5f1e4a
--- /dev/null
+++ b/eventing/samples/github-events/README.md
@@ -0,0 +1,180 @@
+# Reacting to GitHub Events
+
+In response to a pull request event, the sample app _legit_ Service will add
+`(looks pretty legit)` to the PR title.
+
+A GitHub webhook will be created on a repository and a Knative `Service` will be
+deployed to receive the webhook's event deliveries and forward them into a
+`Channel`, through a `Bus`, and out to the consumer via a `Subscription`. The
+`Flow` resource takes care of provisioning the webhook, the `Service`, the
+`Channel`, and the `Subscription`.
+
+## Prerequisites
+
+You will need:
+
+- A Kubernetes cluster with Knative serving installed. Follow the
+ [installation instructions](https://github.com/knative/docs/blob/master/install/README.md)
+ if you need to create one.
+- [Docker](https://www.docker.com/) installed and running on your local machine,
+ and a Docker Hub account configured (you'll use it for a container registry).
+- Knative eventing core installed on your Kubernetes cluster. You can install
+ with:
+ ```shell
+ kubectl apply -f https://storage.googleapis.com/knative-releases/eventing/latest/release.yaml
+ ```
+- A domain name that allows GitHub to call into the cluster: Follow the
+ [assign a static IP address](https://github.com/knative/docs/blob/master/serving/gke-assigning-static-ip-address.md)
+ and
+ [configure a custom domain](https://github.com/knative/docs/blob/master/serving/using-a-custom-domain.md)
+ instructions.
+
+## Configuring Knative
+
+To use this sample, you'll need to install the `stub` ClusterBus and the
+`github` EventSource:
+
+```shell
+# Installs ClusterBus
+kubectl apply -f https://storage.googleapis.com/knative-releases/eventing/latest/release-clusterbus-stub.yaml
+# Installs EventSource
+kubectl apply -f https://storage.googleapis.com/knative-releases/eventing/latest/release-source-github.yaml
+```
+
+## Granting permissions
+
+Because the `github` EventSource needs to create a Knative Service, you'll need
+to provision a special ServiceAccount with the necessary permissions.
+
+The `auth.yaml` file provisions a service
+account, and creates a role which can create a Knative Service in the `default`
+namespace. In a production environment, you might want to limit the access of
+this service account to only specific namespaces.
+
+```shell
+kubectl apply -f auth.yaml
+```
+
+## Building and deploying the sample
+
+1. Use Docker to build the sample code into a container. To build and push with
+ Docker Hub, run the following commands, replacing `{username}` with your
+ Docker Hub username:
+
+ ```shell
+ # Build the container on your local machine
+ # Note: The relative path points to the _root_ of the `knative/docs` repo
+ docker build -t {username}/github-events --file=Dockerfile ../../../
+
+ # Push the container to docker registry
+ docker push {username}/github-events
+ ```
+
+1. After the build has completed and the container is pushed to Docker Hub, you
+ can deploy the function into your cluster. **Ensure that the container image
+ value in `function.yaml` matches the container you built in the previous
+ step.** Apply the configuration using `kubectl`:
+
+ ```shell
+ kubectl apply -f function.yaml
+ ```
+
+1. Check that your service is running using:
+
+ ```shell
+ kubectl get ksvc -o "custom-columns=NAME:.metadata.name,READY:.status.conditions[2].status,REASON:.status.conditions[2].message"
+ NAME READY REASON
+ legit True
+ ```
+
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
+1. Create a [personal access token](https://github.com/settings/tokens) to
+ GitHub repo that the GitHub source can use to register webhooks with the
+ GitHub API. Also decide on a token that your code will use to authenticate
+ the incoming webhooks from GitHub (*accessToken*).
+
+ The token can be named anything you find convenient. This sample requires
+ full `repo` control to be able update the title of the _Pull Request_.
+ The Source requires `admin:repo_hook`, this allows it to create webhooks
+ into repos that your account is allowed to do so. Copy and save this token;
+ GitHub will force you to generate it again if misplaced.
+
+ Here I named my token "EventingSample" and have selected the recommended
+ scopes:
+
+ 
+
+ Update `githubsecret.yaml` with those
+ values. If your generated access token is `'asdfasfdsaf'` and you choose
+ your *secretToken* as `'personal_access_token_value'`, you'd modify
+ `githubsecret.yaml` like so:
+
+ ```yaml
+ apiVersion: v1
+ kind: Secret
+ metadata:
+ name: githubsecret
+ type: Opaque
+ stringData:
+ githubCredentials: >
+ {
+ "accessToken": "asdfasfdsaf",
+ "secretToken": "personal_access_token_value"
+ }
+ ```
+
+ Hint: you can makeup a random *accessToken* with:
+
+ ```shell
+ head -c 8 /dev/urandom | base64
+ ```
+
+ Then, apply the githubsecret using `kubectl`:
+
+ ```shell
+ kubectl apply -f githubsecret.yaml
+ ```
+
+1. Update the resource inside `flow.yaml` to the
+ org/repo of your choosing. Note that the personal access token must be valid
+ for the chosen org/repo.
+
+ Then create the flow sending GitHub Events to the service:
+
+ ```shell
+ kubectl apply -f flow.yaml
+ ```
+
+1. Create a PR for the repo you configured the webhook for, and you'll see that
+ the Title will be modified with the suffix `(looks pretty legit)`
+
+
+## Understanding what happened
+
+`TODO: similar to k8s-events.`
+
+
+
+## Cleaning up
+
+To clean up the function, `Flow`, auth, and secret:
+
+```shell
+kubectl delete -f function.yaml
+kubectl delete -f flow.yaml
+kubectl delete -f auth.yaml
+kubectl delete -f githubsecret.yaml
+```
+
+And then delete the [personal access token](https://github.com/settings/tokens)
+created from GitHub.
diff --git a/eventing/samples/github-events/auth.yaml b/eventing/samples/github-events/auth.yaml
new file mode 100644
index 000000000..d4ed3e811
--- /dev/null
+++ b/eventing/samples/github-events/auth.yaml
@@ -0,0 +1,43 @@
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: feed-sa
+ namespace: default
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: create-knative-service
+ namespace: default
+rules:
+- apiGroups: ["serving.knative.dev"]
+ resources: ["services"]
+ verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
+---
+# This enables the feed-sa to deploy the receive adapter.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: feed-sa-deploy
+ namespace: default
+subjects:
+ - kind: ServiceAccount
+ name: feed-sa
+ namespace: default
+roleRef:
+ kind: Role
+ name: create-knative-service
+ apiGroup: rbac.authorization.k8s.io
\ No newline at end of file
diff --git a/eventing/samples/github-events/flow.yaml b/eventing/samples/github-events/flow.yaml
new file mode 100644
index 000000000..cdf114386
--- /dev/null
+++ b/eventing/samples/github-events/flow.yaml
@@ -0,0 +1,37 @@
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: flows.knative.dev/v1alpha1
+kind: Flow
+metadata:
+ name: github-flow
+ namespace: default
+spec:
+ serviceAccountName: feed-sa
+ trigger:
+ eventType: dev.knative.github.pullrequest
+ resource: / # TODO: update this
+ service: github
+ parameters:
+ secretName: githubsecret
+ secretKey: githubCredentials
+ parametersFrom:
+ - secretKeyRef:
+ name: githubsecret
+ key: githubCredentials
+ action:
+ target:
+ kind: Service
+ apiVersion: serving.knative.dev/v1alpha1
+ name: legit
diff --git a/eventing/samples/github-events/function.go b/eventing/samples/github-events/function.go
new file mode 100644
index 000000000..831f48f33
--- /dev/null
+++ b/eventing/samples/github-events/function.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "context"
+ "encoding/json"
+ "flag"
+ "fmt"
+ ghclient "github.com/google/go-github/github"
+ "github.com/knative/eventing/pkg/event"
+ "golang.org/x/oauth2"
+ "gopkg.in/go-playground/webhooks.v3/github"
+ "log"
+ "net/http"
+ "os"
+ "strings"
+)
+
+const (
+ // Environment variable containing json credentials
+ envSecret = "GITHUB_SECRET"
+ // this is what we tack onto each PR title if not there already
+ titleSuffix = "looks pretty legit"
+)
+
+// GithubHandler holds necessary objects for communicating with the Github.
+type GithubHandler struct {
+ client *ghclient.Client
+ ctx context.Context
+}
+
+type GithubSecrets struct {
+ AccessToken string `json:"accessToken"`
+ SecretToken string `json:"secretToken"`
+}
+
+func (h *GithubHandler) newPullRequestPayload(ctx context.Context, pl *github.PullRequestPayload) {
+
+ title := pl.PullRequest.Title
+ log.Printf("GOT PR with Title: %q", title)
+
+ // Check the title and if it contains 'looks pretty legit' leave it alone
+ if strings.Contains(title, titleSuffix) {
+ // already modified, leave it alone.
+ return
+ }
+
+ newTitle := fmt.Sprintf("%s (%s)", title, titleSuffix)
+ updatedPR := ghclient.PullRequest{
+ Title: &newTitle,
+ }
+ newPR, response, err := h.client.PullRequests.Edit(h.ctx,
+ pl.Repository.Owner.Login, pl.Repository.Name, int(pl.Number), &updatedPR)
+ if err != nil {
+ log.Printf("Failed to update PR: %s\n%s", err, response)
+ return
+ }
+ if newPR.Title != nil {
+ log.Printf("New PR Title: %q", *newPR.Title)
+ } else {
+ log.Printf("New PR title is nil")
+ }
+}
+
+func main() {
+ flag.Parse()
+ githubSecrets := os.Getenv(envSecret)
+
+ var credentials GithubSecrets
+ err := json.Unmarshal([]byte(githubSecrets), &credentials)
+ if err != nil {
+ log.Fatalf("Failed to unmarshal credentials: %s", err)
+ return
+ }
+
+ // Set up the auth for being able to talk to Github.
+ ctx := context.Background()
+ ts := oauth2.StaticTokenSource(
+ &oauth2.Token{AccessToken: credentials.AccessToken},
+ )
+ tc := oauth2.NewClient(ctx, ts)
+
+ client := ghclient.NewClient(tc)
+
+ h := &GithubHandler{
+ client: client,
+ ctx: ctx,
+ }
+
+ log.Fatal(http.ListenAndServe(":8080", event.Handler(h.newPullRequestPayload)))
+}
diff --git a/eventing/samples/github-events/function.yaml b/eventing/samples/github-events/function.yaml
new file mode 100644
index 000000000..b553f2708
--- /dev/null
+++ b/eventing/samples/github-events/function.yaml
@@ -0,0 +1,34 @@
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: serving.knative.dev/v1alpha1
+kind: Service
+metadata:
+ name: legit
+spec:
+ runLatest:
+ configuration:
+ revisionTemplate:
+ metadata:
+ labels:
+ knative.dev/type: function
+ spec:
+ container:
+ image: docker.io/{username}/github-events # TODO: fill username out
+ env:
+ - name: GITHUB_SECRET
+ valueFrom:
+ secretKeyRef:
+ key: githubCredentials
+ name: githubsecret
diff --git a/eventing/samples/github-events/githubsecret.yaml b/eventing/samples/github-events/githubsecret.yaml
new file mode 100644
index 000000000..0a7f3da5e
--- /dev/null
+++ b/eventing/samples/github-events/githubsecret.yaml
@@ -0,0 +1,25 @@
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Secret
+metadata:
+ name: githubsecret
+type: Opaque
+stringData:
+ githubCredentials: >
+ {
+ "accessToken": "",
+ "secretToken": ""
+ }
diff --git a/eventing/samples/github-events/personal_access_token.png b/eventing/samples/github-events/personal_access_token.png
new file mode 100644
index 000000000..8f03ecc1d
Binary files /dev/null and b/eventing/samples/github-events/personal_access_token.png differ
diff --git a/eventing/samples/k8s-events/README.md b/eventing/samples/k8s-events/README.md
index d033646dd..6e56ecb4a 100644
--- a/eventing/samples/k8s-events/README.md
+++ b/eventing/samples/k8s-events/README.md
@@ -46,11 +46,12 @@ kubectl apply -f serviceaccount.yaml
1. Use Docker to build the sample code into a container. To build and push with
Docker Hub, run these commands replacing `{username}` with your Docker Hub
- username. Run the following from the _root_ of the `knative/docs` repo:
+ username:
```shell
# Build the container on your local machine
- docker build -t {username}/k8s-events --file=eventing/samples/k8s-events/Dockerfile .
+ # Note: The relative path points to the _root_ of the `knative/docs` repo
+ docker build -t {username}/k8s-events --file Dockerfile ../../../
# Push the container to docker registry
docker push {username}/k8s-events
@@ -62,21 +63,26 @@ kubectl apply -f serviceaccount.yaml
step.** Apply the configuration using `kubectl`:
```shell
- kubectl apply -f eventing/samples/k8s-events/function.yaml
+ kubectl apply -f function.yaml
```
1. Check that your service is running using:
```shell
- kubectl get services.serving.knative.dev -o "custom-columns=NAME:.metadata.name,READY:.status.conditions[2].status,REASON:.status.conditions[2].message"
+ kubectl get ksvc -o "custom-columns=NAME:.metadata.name,READY:.status.conditions[2].status,REASON:.status.conditions[2].message"
NAME READY REASON
read-k8s-events True
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
1. Create the flow sending Kubernetes Events to the service:
```shell
- kubectl apply -f eventing/samples/k8s-events/flow.yaml
+ kubectl apply -f flow.yaml
```
1. If you have the full knative install, you can read the function logs using
diff --git a/eventing/samples/k8s-events/function.yaml b/eventing/samples/k8s-events/function.yaml
index 46383e64f..59c966b90 100644
--- a/eventing/samples/k8s-events/function.yaml
+++ b/eventing/samples/k8s-events/function.yaml
@@ -24,4 +24,4 @@ spec:
spec:
container:
# Replace this image with your {username}/k8s-events container
- image: github.com/knative/docs/eventing/sample/k8s-events
+ image: github.com/knative/docs/eventing/samples/k8s-events
diff --git a/hack/update-deps.sh b/hack/update-deps.sh
new file mode 100644
index 000000000..987503fed
--- /dev/null
+++ b/hack/update-deps.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit
+set -o nounset
+set -o pipefail
+
+source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/library.sh
+
+cd ${REPO_ROOT_DIR}
+
+# Ensure we have everything we need under vendor/
+dep ensure
+
+# Keep the only dir in knative/test-infra we're interested in
+find vendor/github.com/knative/test-infra -mindepth 1 -maxdepth 1 ! -name scripts -exec rm -fr {} \;
diff --git a/images/knative-version.png b/images/knative-version.png
new file mode 100644
index 000000000..f3b95ea8e
Binary files /dev/null and b/images/knative-version.png differ
diff --git a/install/Knative-with-AKS.md b/install/Knative-with-AKS.md
index 5c72d22d8..42e6fb392 100644
--- a/install/Knative-with-AKS.md
+++ b/install/Knative-with-AKS.md
@@ -69,7 +69,7 @@ environment variables. First determine which region you'd like to run AKS in, al
1. Set `RESOURCE_GROUP` and `LOCATION` variables:
```bash
- export LOCATION=east-us
+ export LOCATION=eastus
export RESOURCE_GROUP=knative-group
export CLUSTER_NAME=knative-cluster
```
@@ -123,7 +123,7 @@ Knative depends on Istio.
1. Install Istio:
```bash
- kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml
```
1. Label the default namespace with `istio-injection=enabled`:
```bash
@@ -142,26 +142,45 @@ rerun the command to see the current status.
> Note: Instead of rerunning the command, you can add `--watch` to the above
command to view the component's status updates in real time. Use CTRL + C to exit watch mode.
-## Installing Knative Serving
+## Installing Knative components
-1. Next, we will install [Knative Serving](https://github.com/knative/serving)
-and its dependencies:
+You can install the Knative Serving and Build components together, or Build on its own.
+
+### Installing Knative Serving and Build components
+
+1. Run the `kubectl apply` command to install Knative and its dependencies:
```bash
- kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.0/release.yaml
+ kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.1/release.yaml
```
-1. Monitor the Knative components, until all of the components show a `STATUS` of
-`Running`:
+1. Monitor the Knative components until all of the components show a
+ `STATUS` of `Running`:
```bash
kubectl get pods -n knative-serving
+ kubectl get pods -n knative-build
```
+### Installing Knative Build only
+
+1. Run the `kubectl apply` command to install
+ [Knative Build](https://github.com/knative/build) and its dependencies:
+ ```bash
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/config/build/release.yaml
+ ```
+1. Monitor the Knative Build components until all of the components show a
+ `STATUS` of `Running`:
+ ```bash
+ kubectl get pods -n knative-build
+
Just as with the Istio components, it will take a few seconds for the Knative
-components to be up and running; you can rerun the command to see the current status.
+components to be up and running; you can rerun the `kubectl get` command to see
+the current status.
> Note: Instead of rerunning the command, you can add `--watch` to the above
- command to view the component's status updates in real time. Use CTRL + C to exit watch mode.
+ command to view the component's status updates in real time. Use CTRL + C to
+ exit watch mode.
-You are now ready to deploy an app to your new Knative cluster.
+You are now ready to deploy an app or create a build in your new Knative
+cluster.
## Deploying an app
diff --git a/install/Knative-with-GKE.md b/install/Knative-with-GKE.md
index 25ffbb6ae..24abd4656 100644
--- a/install/Knative-with-GKE.md
+++ b/install/Knative-with-GKE.md
@@ -14,12 +14,14 @@ specifications for Knative on Google Cloud Platform.
This guide assumes you are using bash in a Mac or Linux environment; some
commands will need to be adjusted for use in a Windows environment.
-### Installing the Google Cloud SDK
+### Installing the Google Cloud SDK and `kubectl`
-1. If you already have `kubectl`, run `kubectl version` to check your client version.
-
-1. If you already have `gcloud` installed with the `kubectl` component later than
- v1.10, you can skip these steps.
+1. If you already have `gcloud` installed with `kubectl` version 1.10 or newer,
+ you can skip these steps.
+ > Tip: To check which version of `kubectl` you have installed, enter:
+ ```
+ kubectl version
+ ```
1. Download and install the `gcloud` command line tool:
https://cloud.google.com/sdk/install
@@ -117,7 +119,7 @@ Knative depends on Istio.
1. Install Istio:
```bash
- kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml
```
1. Label the default namespace with `istio-injection=enabled`:
```bash
@@ -137,30 +139,27 @@ rerun the command to see the current status.
## Installing Knative components
-You have the option to install and use only the Knative components that you
-want. You can install only the component of Knative if you need that
-functionality, for example Knative serving is not required to create and run
-builds.
+You can install the Knative Serving and Build components together, or Build on its own.
-### Installing Knative Serving
+### Installing Knative Serving and Build components
-1. Run the `kubectl apply` command to install
- [Knative Serving](https://github.com/knative/serving) and its dependencies:
+1. Run the `kubectl apply` command to install Knative and its dependencies:
```bash
- kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.0/release.yaml
+ kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.1/release.yaml
```
-1. Monitor the Knative serving components until all of the components show a
+1. Monitor the Knative components until all of the components show a
`STATUS` of `Running`:
```bash
kubectl get pods -n knative-serving
+ kubectl get pods -n knative-build
```
-### Installing Knative Build
+### Installing Knative Build only
1. Run the `kubectl apply` command to install
[Knative Build](https://github.com/knative/build) and its dependencies:
```bash
- kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/config/build/release.yaml
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/config/build/release.yaml
```
1. Monitor the Knative Build components until all of the components show a
`STATUS` of `Running`:
@@ -178,7 +177,7 @@ the current status.
You are now ready to deploy an app or create a build in your new Knative
cluster.
-## Deploying apps or builds
+## What's next
Now that your cluster has Knative installed, you're ready to deploy an app or
create a build.
@@ -193,8 +192,8 @@ for getting started:
* You can view the available [sample apps](../serving/samples/README.md) and
deploy one of your choosing.
-* To get started by creating a build, see
- [Creating a simple Knative Build](../build/creating-builds.md)
+* You can follow the step-by-step
+ [Creating a simple Knative Build](../build/creating-builds.md) guide.
## Cleaning up
diff --git a/install/Knative-with-Gardener.md b/install/Knative-with-Gardener.md
index 68790b3ea..84d76852a 100644
--- a/install/Knative-with-Gardener.md
+++ b/install/Knative-with-Gardener.md
@@ -71,7 +71,7 @@ Knative depends on Istio.
1. Install Istio:
```bash
- kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml
```
2. Label the default namespace with `istio-injection=enabled`:
```bash
@@ -87,23 +87,45 @@ rerun the command to see the current status.
> command to view the component's status updates in real time. Use CTRL + C to
> exit watch mode.
-## Installing Knative Serving
+## Installing Knative components
-1. Next, we will install [Knative Serving](https://github.com/knative/serving)
- and its dependencies:
- `bash kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.0/release.yaml`
-1. Monitor the Knative components, until all of the components show a `STATUS`
- of `Running`: `bash kubectl get pods -n knative-serving`
+You can install the Knative Serving and Build components together, or Build on its own.
+
+### Installing Knative Serving and Build components
+
+1. Run the `kubectl apply` command to install Knative and its dependencies:
+ ```bash
+ kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.1/release.yaml
+ ```
+1. Monitor the Knative components until all of the components show a
+ `STATUS` of `Running`:
+ ```bash
+ kubectl get pods -n knative-serving
+ kubectl get pods -n knative-build
+ ```
+
+### Installing Knative Build only
+
+1. Run the `kubectl apply` command to install
+ [Knative Build](https://github.com/knative/build) and its dependencies:
+ ```bash
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/config/build/release.yaml
+ ```
+1. Monitor the Knative Build components until all of the components show a
+ `STATUS` of `Running`:
+ ```bash
+ kubectl get pods -n knative-build
Just as with the Istio components, it will take a few seconds for the Knative
-components to be up and running; you can rerun the command to see the current
-status.
+components to be up and running; you can rerun the `kubectl get` command to see
+the current status.
> Note: Instead of rerunning the command, you can add `--watch` to the above
-> command to view the component's status updates in real time. Use CTRL + C to
-> exit watch mode.
+ command to view the component's status updates in real time. Use CTRL + C to
+ exit watch mode.
-You are now ready to deploy an app to your new Knative cluster.
+You are now ready to deploy an app or create a build in your new Knative
+cluster.
## Alternative way to enable Knative with Gardener
@@ -137,10 +159,10 @@ spec:
And of course create the respectve `ConfigMaps`:
```
-curl https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml
+curl https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml
kubectl create configmap istio-chart-080 --from-file=istio.yaml
-curl https://github.com/knative/serving/releases/download/v0.1.0/release.yaml
+curl https://github.com/knative/serving/releases/download/v0.1.1/release.yaml
kubectl create configmap knative-chart-001 --from-file=release.yaml
```
diff --git a/install/Knative-with-IKS.md b/install/Knative-with-IKS.md
index fe76404ea..3d16c75ff 100644
--- a/install/Knative-with-IKS.md
+++ b/install/Knative-with-IKS.md
@@ -126,7 +126,7 @@ Knative depends on Istio.
1. Install Istio:
```bash
- kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml
```
1. Label the default namespace with `istio-injection=enabled`:
```bash
@@ -145,28 +145,45 @@ rerun the command to see the current status.
> command to view the component's status updates in real time. Use CTRL+C to
> exit watch mode.
-## Installing Knative Serving
+## Installing Knative components
-1. Next, we will install [Knative Serving](https://github.com/knative/serving)
- and its dependencies:
+You can install the Knative Serving and Build components together, or Build on its own.
+
+### Installing Knative Serving and Build components
+
+1. Run the `kubectl apply` command to install Knative and its dependencies:
```bash
- kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.0/release.yaml
+ kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.1/release.yaml
```
-1. Monitor the Knative components until all of the components show a `STATUS`
- of `Running`:
+1. Monitor the Knative components until all of the components show a
+ `STATUS` of `Running`:
```bash
kubectl get pods -n knative-serving
+ kubectl get pods -n knative-build
```
+### Installing Knative Build only
+
+1. Run the `kubectl apply` command to install
+ [Knative Build](https://github.com/knative/build) and its dependencies:
+ ```bash
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/config/build/release.yaml
+ ```
+1. Monitor the Knative Build components until all of the components show a
+ `STATUS` of `Running`:
+ ```bash
+ kubectl get pods -n knative-build
+
Just as with the Istio components, it will take a few seconds for the Knative
-components to be up and running; you can rerun the command to see the current
-status.
+components to be up and running; you can rerun the `kubectl get` command to see
+the current status.
-> Note: Instead of re-running the command, you can add `--watch` to the above
-> command to view the component's status updates in real time. Use CTRL+C to
-> exit watch mode.
+> Note: Instead of rerunning the command, you can add `--watch` to the above
+ command to view the component's status updates in real time. Use CTRL + C to
+ exit watch mode.
-You are now ready to deploy an app to your new Knative cluster.
+You are now ready to deploy an app or create a build in your new Knative
+cluster.
## Deploying an app
diff --git a/install/Knative-with-Minikube.md b/install/Knative-with-Minikube.md
index 57a7b822c..45a5ee807 100644
--- a/install/Knative-with-Minikube.md
+++ b/install/Knative-with-Minikube.md
@@ -59,7 +59,7 @@ Knative depends on Istio. Run the following to install Istio. (We are changing
`LoadBalancer` to `NodePort` for the `istio-ingress` service).
```shell
-curl -L https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml \
+curl -L https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml \
| sed 's/LoadBalancer/NodePort/' \
| kubectl apply -f -
@@ -85,12 +85,12 @@ rerun the command to see the current status.
Next, install [Knative Serving](https://github.com/knative/serving):
Because you have limited resources available, use the
-`https://github.com/knative/serving/releases/download/v0.1.0/release-lite.yaml`
+`https://github.com/knative/serving/releases/download/v0.1.1/release-lite.yaml`
file, which omits some of the monitoring components to reduce the memory used by
the Knative components. To use the provided `release-lite.yaml` release, run:
```shell
-curl -L https://github.com/knative/serving/releases/download/v0.1.0/release-lite.yaml \
+curl -L https://github.com/knative/serving/releases/download/v0.1.1/release-lite.yaml \
| sed 's/LoadBalancer/NodePort/' \
| kubectl apply -f -
```
diff --git a/install/Knative-with-OpenShift.md b/install/Knative-with-OpenShift.md
new file mode 100644
index 000000000..da45eb504
--- /dev/null
+++ b/install/Knative-with-OpenShift.md
@@ -0,0 +1,212 @@
+# Knative Install on OpenShift
+
+This guide walks you through the installation of the latest version of [Knative
+Serving](https://github.com/knative/serving) on an
+[OpenShift](https://github.com/openshift/origin) using pre-built images and
+demonstrates creating and deploying an image of a sample "hello world" app onto
+the newly created Knative cluster.
+
+You can find [guides for other platforms here](README.md).
+
+## Before you begin
+
+These instructions will run an OpenShift 3.10 (Kubernetes 1.10) cluster on your
+local machine using [`oc cluster up`](https://docs.openshift.org/latest/getting_started/administrators.html#running-in-a-docker-container)
+to test-drive knative.
+
+## Install `oc` (openshift cli)
+
+You can install the latest version of `oc`, the OpenShift CLI, into your local
+directory by downloading the right release tarball for your OS from the
+[releases page](https://github.com/openshift/origin/releases/tag/v3.10.0).
+
+```shell
+export OS=
+curl https://github.com/openshift/origin/releases/download/v3.10.0/openshift-origin-client-tools-v3.10.0-dd10d17-$OS-64bit.tar.gz -o oc.tar.gz
+tar zvf oc.tar.gz -x openshift-origin-client-tools-v3.10.0-dd10d17-$OS-64bit/oc --strip=1
+
+# You will now have the oc binary in your local directory
+```
+
+## Scripted cluster setup and installation
+
+For Linux and Mac, you can optionally run a
+[script](scripts/knative-with-openshift.sh) that automates the steps on this
+page.
+
+Once you have `oc` present on your machine and in your `PATH`, you can simply
+run [this script](scripts/knative-with-openshift.sh); it will:
+
+- Create a new OpenShift cluster on your local machine with `oc cluster up`
+- Install Istio and Knative serving
+- Log you in as the cluster administrator
+- Set up the default namespace for istio autoinjection
+
+Once the script completes, you'll be ready to test out Knative!
+
+## Creating a new OpenShift cluster
+
+Create a new OpenShift cluster on your local machine using `oc cluster up`:
+
+```shell
+oc cluster up --write-config
+
+# Enable admission webhooks
+sed -i -e 's/"admissionConfig":{"pluginConfig":null}/"admissionConfig": {\
+ "pluginConfig": {\
+ "ValidatingAdmissionWebhook": {\
+ "configuration": {\
+ "apiVersion": "v1",\
+ "kind": "DefaultAdmissionConfig",\
+ "disable": false\
+ }\
+ },\
+ "MutatingAdmissionWebhook": {\
+ "configuration": {\
+ "apiVersion": "v1",\
+ "kind": "DefaultAdmissionConfig",\
+ "disable": false\
+ }\
+ }\
+ }\
+}/' openshift.local.clusterup/kube-apiserver/master-config.yaml
+
+oc cluster up --server-loglevel=5
+```
+
+Once the cluster is up, login as the cluster administrator:
+
+```shell
+oc login -u system:admin
+```
+
+Now, we'll set up the default project for use with Knative.
+
+```shell
+oc project default
+
+# SCCs (Security Context Constraints) are the precursor to the PSP (Pod
+# Security Policy) mechanism in Kubernetes.
+oc adm policy add-scc-to-user privileged -z default -n default
+
+oc label namespace default istio-injection=enabled
+```
+
+## Installing Istio
+
+Knative depends on Istio. First, run the following to grant the necessary
+privileges to the service accounts istio will use:
+
+```shell
+oc adm policy add-scc-to-user anyuid -z istio-ingress-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z default -n istio-system
+oc adm policy add-scc-to-user anyuid -z prometheus -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-egressgateway-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-citadel-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-ingressgateway-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-cleanup-old-ca-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-mixer-post-install-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-mixer-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-pilot-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-sidecar-injector-service-account -n istio-system
+oc adm policy add-cluster-role-to-user cluster-admin -z istio-galley-service-account -n istio-system
+```
+
+Run the following to install Istio:
+
+```shell
+curl -L https://storage.googleapis.com/knative-releases/serving/latest/istio.yaml \
+ | sed 's/LoadBalancer/NodePort/' \
+ | oc apply -f -
+```
+
+Monitor the Istio components until all of the components show a `STATUS` of
+`Running` or `Completed`:
+
+```shell
+oc get pods -n istio-system
+```
+
+It will take a few minutes for all the components to be up and running; you can
+rerun the command to see the current status.
+
+> Note: Instead of rerunning the command, you can add `--watch` to the above
+ command to view the component's status updates in real time. Use CTRL+C to exit watch mode.
+
+## Installing Knative Serving
+
+Next, we'll install [Knative Serving](https://github.com/knative/serving).
+
+First, run the following to grant the necessary privileges to the service
+accounts istio will use:
+
+```shell
+oc adm policy add-scc-to-user anyuid -z build-controller -n knative-build
+oc adm policy add-scc-to-user anyuid -z controller -n knative-serving
+oc adm policy add-scc-to-user anyuid -z autoscaler -n knative-serving
+oc adm policy add-scc-to-user anyuid -z kube-state-metrics -n monitoring
+oc adm policy add-scc-to-user anyuid -z node-exporter -n monitoring
+oc adm policy add-scc-to-user anyuid -z prometheus-system -n monitoring
+oc adm policy add-cluster-role-to-user cluster-admin -z build-controller -n knative-build
+oc adm policy add-cluster-role-to-user cluster-admin -z controller -n knative-serving
+```
+
+Next, install Knative:
+
+```shell
+curl -L https://storage.googleapis.com/knative-releases/serving/latest/release-lite.yaml \
+ | sed 's/LoadBalancer/NodePort/' \
+ | oc apply -f -
+```
+
+Monitor the Knative components until all of the components show a `STATUS` of
+`Running`:
+
+```shell
+oc get pods -n knative-serving
+```
+
+Just as with the Istio components, it will take a few seconds for the Knative
+components to be up and running; you can rerun the command to see the current status.
+
+> Note: Instead of rerunning the command, you can add `--watch` to the above
+ command to view the component's status updates in real time. Use CTRL+C to exit watch mode.
+
+Now you can deploy an app to your newly created Knative cluster.
+
+## Deploying an app
+
+Now that your cluster has Knative installed, you're ready to deploy an app.
+
+If you'd like to follow a step-by-step guide for deploying your first app on
+Knative, check out the
+[Getting Started with Knative App Deployment](getting-started-knative-app.md)
+guide.
+
+If you'd like to view the available sample apps and deploy one of your choosing,
+head to the [sample apps](../serving/samples/README.md) repo.
+
+> Note: When looking up the IP address to use for accessing your app, you need to look up
+ the NodePort for the `knative-ingressgateway` as well as the IP address used for OpenShift.
+ You can use the following command to look up the value to use for the {IP_ADDRESS} placeholder
+ used in the samples:
+
+ ```shell
+ export IP_ADDRESS=$(oc get node -o 'jsonpath={.items[0].status.addresses[0].address}'):$(oc get svc knative-ingressgateway -n istio-system -o 'jsonpath={.spec.ports[?(@.port==80)].nodePort}')
+ ```
+
+## Cleaning up
+
+Delete your test cluster by running:
+
+```shell
+oc cluster down
+rm -rf openshift.local.clusterup
+```
+
+---
+
+Except as otherwise noted, the content of this page is licensed under the
+[Creative Commons Attribution 4.0 License](https://creativecommons.org/licenses/by/4.0/),
+and code samples are licensed under the
+[Apache 2.0 License](https://www.apache.org/licenses/LICENSE-2.0).
diff --git a/install/Knative-with-PKS.md b/install/Knative-with-PKS.md
index 11eb219eb..a78189710 100644
--- a/install/Knative-with-PKS.md
+++ b/install/Knative-with-PKS.md
@@ -32,7 +32,7 @@ Knative depends on Istio. Istio workloads require privileged mode for Init Conta
1. Install Istio:
```bash
- kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml
```
1. Label the default namespace with `istio-injection=enabled`:
```bash
@@ -50,26 +50,45 @@ rerun the command to see the current status.
> Note: Instead of rerunning the command, you can add `--watch` to the above
command to view the component's status updates in real time. Use CTRL + C to exit watch mode.
-## Installing Knative Serving
+## Installing Knative components
-1. Next, we will install [Knative Serving](https://github.com/knative/serving)
-and its dependencies:
+You can install the Knative Serving and Build components together, or Build on its own.
+
+### Installing Knative Serving and Build components
+
+1. Run the `kubectl apply` command to install Knative and its dependencies:
```bash
- kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.0/release.yaml
+ kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.1/release.yaml
```
-1. Monitor the Knative components, until all of the components show a `STATUS` of
-`Running`:
+1. Monitor the Knative components until all of the components show a
+ `STATUS` of `Running`:
```bash
kubectl get pods -n knative-serving
+ kubectl get pods -n knative-build
```
+### Installing Knative Build only
+
+1. Run the `kubectl apply` command to install
+ [Knative Build](https://github.com/knative/build) and its dependencies:
+ ```bash
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/config/build/release.yaml
+ ```
+1. Monitor the Knative Build components until all of the components show a
+ `STATUS` of `Running`:
+ ```bash
+ kubectl get pods -n knative-build
+
Just as with the Istio components, it will take a few seconds for the Knative
-components to be up and running; you can rerun the command to see the current status.
+components to be up and running; you can rerun the `kubectl get` command to see
+the current status.
> Note: Instead of rerunning the command, you can add `--watch` to the above
- command to view the component's status updates in real time. Use CTRL + C to exit watch mode.
+ command to view the component's status updates in real time. Use CTRL + C to
+ exit watch mode.
-You are now ready to deploy an app to your new Knative cluster.
+You are now ready to deploy an app or create a build in your new Knative
+cluster.
## Deploying an app
diff --git a/install/Knative-with-any-k8s.md b/install/Knative-with-any-k8s.md
index fbcf20678..5c2ea6467 100644
--- a/install/Knative-with-any-k8s.md
+++ b/install/Knative-with-any-k8s.md
@@ -19,7 +19,7 @@ Containers
1. Install Istio:
```bash
- kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.0/third_party/istio-0.8.0/istio.yaml
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/istio-0.8.0/istio.yaml
```
1. Label the default namespace with `istio-injection=enabled`:
```bash
@@ -35,28 +35,45 @@ rerun the command to see the current status.
> command to view the component's status updates in real time. Use CTRL + C to
> exit watch mode.
-## Installing Knative Serving
+## Installing Knative components
-1. Next, we will install [Knative Serving](https://github.com/knative/serving)
- and its dependencies:
+You can install the Knative Serving and Build components together, or Build on its own.
+
+### Installing Knative Serving and Build components
+
+1. Run the `kubectl apply` command to install Knative and its dependencies:
```bash
- kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.0/release.yaml
+ kubectl apply -f https://github.com/knative/serving/releases/download/v0.1.1/release.yaml
```
-1. Monitor the Knative components, until all of the components show a `STATUS`
- of `Running`:
+1. Monitor the Knative components until all of the components show a
+ `STATUS` of `Running`:
```bash
kubectl get pods -n knative-serving
+ kubectl get pods -n knative-build
```
+### Installing Knative Build only
+
+1. Run the `kubectl apply` command to install
+ [Knative Build](https://github.com/knative/build) and its dependencies:
+ ```bash
+ kubectl apply -f https://raw.githubusercontent.com/knative/serving/v0.1.1/third_party/config/build/release.yaml
+ ```
+1. Monitor the Knative Build components until all of the components show a
+ `STATUS` of `Running`:
+ ```bash
+ kubectl get pods -n knative-build
+
Just as with the Istio components, it will take a few seconds for the Knative
-components to be up and running; you can rerun the command to see the current
-status.
+components to be up and running; you can rerun the `kubectl get` command to see
+the current status.
> Note: Instead of rerunning the command, you can add `--watch` to the above
-> command to view the component's status updates in real time. Use CTRL + C to
-> exit watch mode.
+ command to view the component's status updates in real time. Use CTRL + C to
+ exit watch mode.
-You are now ready to deploy an app to your new Knative cluster.
+You are now ready to deploy an app or create a build in your new Knative
+cluster.
## Deploying an app
diff --git a/install/README.md b/install/README.md
index 8b4218bdd..da3d29337 100644
--- a/install/README.md
+++ b/install/README.md
@@ -9,7 +9,7 @@ sure which Kubernetes platform is right for you, see
[Picking the Right Solution](https://kubernetes.io/docs/setup/pick-right-solution/).
We provide information for installing Knative on
-[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs/), [IBM Cloud Kubernetes Service](https://www.ibm.com/cloud/container-service), [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/), [Minikube](https://kubernetes.io/docs/setup/minikube/) and [Pivotal Container Service](https://pivotal.io/platform/pivotal-container-service) clusters.
+[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/docs/), [IBM Cloud Kubernetes Service](https://www.ibm.com/cloud/container-service), [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/aks/), [Minikube](https://kubernetes.io/docs/setup/minikube/), [OpenShift](https://github.com/openshift/origin) and [Pivotal Container Service](https://pivotal.io/platform/pivotal-container-service) clusters.
## Installing Knative
@@ -21,6 +21,7 @@ Knative components on the following platforms:
* [Knative Install on Google Kubernetes Engine](Knative-with-GKE.md)
* [Knative Install on IBM Cloud Kubernetes Service](Knative-with-IKS.md)
* [Knative Install on Minikube](Knative-with-Minikube.md)
+* [Knative Install on OpenShift](Knative-with-OpenShift.md)
* [Knative Install on Pivotal Container Service](Knative-with-PKS.md)
If you already have a Kubernetes cluster you're comfortable installing
@@ -50,6 +51,10 @@ and set up an Istio IP range for outbound network access:
* [Configure outbound network access](../serving/outbound-network-access.md)
* [Configuring HTTPS with a custom certificate](../serving/using-an-ssl-cert.md)
+## Checking the Version of Your Knative Serving Installation
+
+* [Checking the version of your Knative Serving installation](check-install-version.md)
+
---
Except as otherwise noted, the content of this page is licensed under the
diff --git a/install/check-install-version.md b/install/check-install-version.md
new file mode 100644
index 000000000..bb15e7952
--- /dev/null
+++ b/install/check-install-version.md
@@ -0,0 +1,37 @@
+# Checking the Version of Your Knative Serving Installation
+
+If you want to check what version of Knative serving you have installed,
+enter the following command:
+
+```bash
+kubectl describe deploy controller -n knative-serving
+```
+
+This will return the description for the `knative-serving` controller; this
+information contains the link to the container that was used to install Knative:
+
+```yaml
+...
+Pod Template:
+ Labels: app=controller
+ Annotations: sidecar.istio.io/inject=false
+ Service Account: controller
+ Containers:
+ controller:
+ # Link to container used for Knative install
+ Image: gcr.io/knative-releases/github.com/knative/serving/cmd/controller@sha256:59abc8765d4396a3fc7cac27a932a9cc151ee66343fa5338fb7146b607c6e306
+...
+```
+
+Copy the full `gcr.io` link to the container and paste it into your browser.
+If you are already signed in to a Google account, you'll be taken to the Google
+Container Registry page for that container in the Google Cloud Platform console.
+If you aren't already signed in, you'll need to sign in a to a Google account
+before you can view the container details.
+
+On the container details page, you'll see a section titled
+"Container classification," and in that section is a list of tags. The versions
+of Knative you have installed will appear in the list as `v0.1.1`, or whatever
+verion you have installed:
+
+
\ No newline at end of file
diff --git a/install/getting-started-knative-app.md b/install/getting-started-knative-app.md
index dd91fb7cd..56f4b1ce7 100644
--- a/install/getting-started-knative-app.md
+++ b/install/getting-started-knative-app.md
@@ -77,16 +77,26 @@ Now that your service is created, Knative will perform the following steps:
To see if your app has been deployed succesfully, you need the host URL and
IP address created by Knative.
-1. To find the IP address for your service, enter
- `kubectl get svc knative-ingressgateway -n istio-system`. If your cluster is
- new, it can take sometime for the service to get asssigned an external IP address.
+Note: If your cluster is new, it can take some time before the service is
+asssigned an external IP address.
+
+1. To find the IP address for your service, enter:
+
+ ```shell
+ kubectl get svc knative-ingressgateway -n istio-system
+
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ knative-ingressgateway LoadBalancer 10.23.247.74 35.203.155.229 80:32380/TCP,443:32390/TCP,32400:32400/TCP 2d
+
+ ```
+ Take note of the `EXTERNAL-IP` address.
+
+ You can also export the IP address as a variable with the following command:
```shell
export IP_ADDRESS=$(kubectl get svc knative-ingressgateway -n istio-system -o 'jsonpath={.status.loadBalancer.ingress[0].ip}')
```
-
-
> Note: if you use minikube or a baremetal cluster that has no external load balancer, the
`EXTERNAL-IP` field is shown as ``. You need to use `NodeIP` and `NodePort` to
interact your app instead. To get your app's `NodeIP` and `NodePort`, enter the following command:
@@ -97,25 +107,47 @@ IP address created by Knative.
1. To find the host URL for your service, enter:
```shell
- export HOST_URL=$(kubectl get services.serving.knative.dev helloworld-go -o jsonpath='{.status.domain}')
+ kubectl get ksvc helloworld-go -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ NAME DOMAIN
+ helloworld-go helloworld-go.default.example.com
+ ```
+
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](check-install-version.md)
+ to learn how to see what version you have installed.
+
+ You can also export the host URL as a variable using the following command:
+
+ ```shell
+ export HOST_URL=$(kubectl get ksvc helloworld-go -o jsonpath='{.status.domain}')
```
If you changed the name from `helloworld-go` to something else when creating
- the the `.yaml` file, replace `helloworld-go` in the above command with the
+ the `.yaml` file, replace `helloworld-go` in the above commands with the
name you entered.
-1. Now you can make a request to your app to see the results. Replace
+1. Now you can make a request to your app and see the results. Replace
`IP_ADDRESS` with the `EXTERNAL-IP` you wrote down, and replace
`helloworld-go.default.example.com` with the domain returned in the previous
step.
-
- If you deployed your own app, you may want to customize this cURL
- request to interact with your application.
+
+ ```shell
+ curl -H "Host: helloworld-go.default.example.com" http://IP_ADDRESS
+ Hello World: Go Sample v1!
+ ```
+
+ If you exported the host URL And IP address as variables in the previous steps, you
+ can use those variables to simplify your cURL request:
```shell
curl -H "Host: ${HOST_URL}" http://${IP_ADDRESS}
Hello World: Go Sample v1!
```
+
+ If you deployed your own app, you might want to customize this cURL
+ request to interact with your application.
It can take a few seconds for Knative to scale up your application and return
a response.
diff --git a/install/scripts/knative-with-openshift.sh b/install/scripts/knative-with-openshift.sh
new file mode 100755
index 000000000..7bacbad70
--- /dev/null
+++ b/install/scripts/knative-with-openshift.sh
@@ -0,0 +1,94 @@
+#!/usr/bin/env bash
+
+# Turn colors in this script off by setting the NO_COLOR variable in your
+# environment to any value:
+#
+# $ NO_COLOR=1 test.sh
+NO_COLOR=${NO_COLOR:-""}
+if [ -z "$NO_COLOR" ]; then
+ header=$'\e[1;33m'
+ reset=$'\e[0m'
+else
+ header=''
+ reset=''
+fi
+
+function header_text {
+ echo "$header$*$reset"
+}
+
+header_text "Starting Knative test-drive on OpenShift!"
+
+echo "Using oc version:"
+oc version
+
+header_text "Writing config"
+oc cluster up --write-config
+sed -i -e 's/"admissionConfig":{"pluginConfig":null}/"admissionConfig": {\
+ "pluginConfig": {\
+ "ValidatingAdmissionWebhook": {\
+ "configuration": {\
+ "apiVersion": "v1",\
+ "kind": "DefaultAdmissionConfig",\
+ "disable": false\
+ }\
+ },\
+ "MutatingAdmissionWebhook": {\
+ "configuration": {\
+ "apiVersion": "v1",\
+ "kind": "DefaultAdmissionConfig",\
+ "disable": false\
+ }\
+ }\
+ }\
+}/' openshift.local.clusterup/kube-apiserver/master-config.yaml
+
+header_text "Starting OpenShift with 'oc cluster up'"
+oc cluster up --server-loglevel=5
+
+header_text "Logging in as system:admin and setting up default namespace"
+oc login -u system:admin
+oc project default
+oc adm policy add-scc-to-user privileged -z default -n default
+oc label namespace default istio-injection=enabled
+
+header_text "Setting up security policy for istio"
+oc adm policy add-scc-to-user anyuid -z istio-ingress-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z default -n istio-system
+oc adm policy add-scc-to-user anyuid -z prometheus -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-egressgateway-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-citadel-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-ingressgateway-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-cleanup-old-ca-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-mixer-post-install-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-mixer-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-pilot-service-account -n istio-system
+oc adm policy add-scc-to-user anyuid -z istio-sidecar-injector-service-account -n istio-system
+oc adm policy add-cluster-role-to-user cluster-admin -z istio-galley-service-account -n istio-system
+
+header_text "Installing istio"
+curl -L https://storage.googleapis.com/knative-releases/serving/latest/istio.yaml \
+ | sed 's/LoadBalancer/NodePort/' \
+ | oc apply -f -
+
+header_text "Waiting for istio to become ready"
+sleep 5; while echo && oc get pods -n istio-system | grep -v -E "(Running|Completed|STATUS)"; do sleep 5; done
+
+header_text "Setting up security policy for knative"
+oc adm policy add-scc-to-user anyuid -z build-controller -n knative-build
+oc adm policy add-scc-to-user anyuid -z controller -n knative-serving
+oc adm policy add-scc-to-user anyuid -z autoscaler -n knative-serving
+oc adm policy add-scc-to-user anyuid -z kube-state-metrics -n monitoring
+oc adm policy add-scc-to-user anyuid -z node-exporter -n monitoring
+oc adm policy add-scc-to-user anyuid -z prometheus-system -n monitoring
+oc adm policy add-cluster-role-to-user cluster-admin -z build-controller -n knative-build
+oc adm policy add-cluster-role-to-user cluster-admin -z controller -n knative-serving
+
+header_text "Installing Knative"
+curl -L https://storage.googleapis.com/knative-releases/serving/latest/release-lite.yaml \
+ | sed 's/LoadBalancer/NodePort/' \
+ | oc apply -f -
+
+header_text "Waiting for Knative to become ready"
+sleep 5; while echo && oc get pods -n knative-serving | grep -v -E "(Running|Completed|STATUS)"; do sleep 5; done
+
diff --git a/resources.md b/resources.md
new file mode 100644
index 000000000..ff0955fa9
--- /dev/null
+++ b/resources.md
@@ -0,0 +1,131 @@
+# Resources
+
+This page contains information about various tools and technologies
+that are useful to anyone developing on Knative.
+
+## Community Resources
+
+This section contains tools and technologies developed by members of the
+Knative community specifically for use with Knative.
+
+### [`knctl`](https://github.com/cppforlife/knctl)
+
+`knctl` is an under-development CLI for working with Knative.
+
+## Other Resources
+
+This section contains other tools and technologies that are useful when
+working with Knative.
+
+### [`go-containerregistry`](https://github.com/google/go-containerregistry/)
+
+`go-containerregistry` is a Go library used by `ko`, `kaniko`, `skaffold` and
+others, which enables support for pushing, pulling and managing images in a
+container image registry, without requiring Docker to be installed.
+
+It also provides packages to interact with images in a local Docker daemon,
+which does require that Docker be installed.
+
+This library also provides a CLI tool called
+[`crane`](https://github.com/google/go-containerregistry/blob/master/cmd/crane/doc/crane.md),
+which can be used to interact with and inspect images in a registry.
+
+### [`jib`](https://github.com/GoogleContainerTools/jib)
+
+`jib` is a tool, packaged as a Maven plugin and a Gradle plugin, that
+efficiently builds container images from Java source, without a Dockerfile,
+without requiring access to the Docker daemon.
+
+Like `ko`, when `jib` is invoked, it builds your Java source and pushes an
+image with that built source atop a
+[distroless](https://github.com/GoogleContainerTools/distroless) base image to
+produce small images that support fast incremental image builds.
+
+There are `BuildTemplate`s that wraps `jib` for use with Maven and Gradle, at
+https://github.com/knative/build-templates/blob/master/jib/. It expects that
+your `pom.xml` or `build.gradle` describes to `jib` where to push your image.
+The build templates take no parameters.
+
+### [`kaniko`](https://github.com/GoogleContainerTools/kaniko)
+
+`kaniko` is a tool that enables building a container image from source using
+the Dockerfile format, without requiring access to a Docker daemon. Removing
+this requirement means that `kaniko` is [safe to run on a Kubernetes
+cluster](https://github.com/kubernetes/kubernetes/issues/1806).
+
+By contrast, building an image using `docker build` necessarily requires the
+Docker daemon, which would give the build complete access to your entire
+cluster. So that's a very bad idea.
+
+`kaniko` expects to run inside a container, so it's a natural fit for the Build
+CRD [builder contract](...). `kaniko` is available as a builder at
+`gcr.io/kaniko-project/executor:latest`, and there's a `BuildTemplate` that
+wraps it at
+https://github.com/knative/build-templates/blob/master/kaniko/kaniko.yaml. It
+exposes one required parameter, `IMAGE`, which describes the name of the image
+to push to.
+
+More information here:
+https://github.com/knative/build-templates/tree/master/kaniko
+
+`kaniko` is unrelated to `ko`.
+
+### [`ko`](https://github.com/google/go-containerregistry/tree/master/cmd/ko)
+
+`ko` is a tool designed to make development of Go apps on Kubernetes easier, by
+abstracting away the container image being used, and instead referring to Go
+packages by their [import paths](https://golang.org/doc/code.html#ImportPaths)
+(e.g., `github.com/kaniko/serving/cmd/controller`)
+
+The typical usage is `ko apply -f config.yaml`, which reads in the config YAML,
+and looks for Go import paths representing runnable commands (i.e., `package
+main`). When it finds a matching import path, `ko` builds the package using `go
+build` then pushes a container image containing that binary on top of a base
+image (by default, `gcr.io/distroless/base`) to
+`$KO_DOCKER_REPO/unique-string`. After pushing those images, `ko` replaces
+instances of matched import paths with fully-qualified references to the images
+it pushed.
+
+So if `ko apply` was passed this config:
+
+```yaml
+...
+image: github.com/my/repo/cmd/foo
+...
+```
+
+...it would produce YAML like:
+
+```yaml
+...
+image: gcr.io/my-docker-repo/foo-zyxwvut@sha256:abcdef # image by digest
+...
+```
+
+(This assumes that you have set the environment variable
+`KO_DOCKER_REPO=gcr.io/my-docker-repo`)
+
+`ko apply` then passes this generated YAML config to `kubectl apply`.
+
+`ko` also supports:
+* `ko publish` to simply push images and not produce configs.
+* `ko resolve` to push images and output the generated configs, but not
+`kubectl apply` them.
+* `ko delete` to simply passthrough to `kubectl delete` for convenience.
+
+`ko` is used during development and release of Knative components, but is not
+intended to be required for _users_ of Knative -- they should only need to
+`kubectl apply` released configs generated by `ko`.
+
+### [`skaffold`](https://github.com/GoogleContainerTools/skaffold)
+
+`skaffold` is a CLI tool to aid in iterative development for Kubernetes.
+Typically, you would write a [YAML
+config](https://github.com/GoogleContainerTools/skaffold/blob/master/examples/annotated-skaffold.yaml)
+describing to Skaffold how to build and deploy your app, then run `skaffold
+dev`, which will watch your local source tree for changes and continuously
+builds and deploys based on your config when changes are detected.
+
+Skaffold supports many pluggable implementations for building and deploying.
+Skaffold contributors are working on support for Knative Build as a build
+plugin, and could support Knative Serving as a deployment plugin.
diff --git a/serving/accessing-logs.md b/serving/accessing-logs.md
index 2575ee10e..9a7f4473e 100644
--- a/serving/accessing-logs.md
+++ b/serving/accessing-logs.md
@@ -1,110 +1,116 @@
# Accessing logs
-If you have not yet installed the logging and monitoring components, go through the
-[installation instructions](./installing-logging-metrics-traces.md) to set up the
+If you have not yet installed the logging and monitoring components, go through the
+[installation instructions](./installing-logging-metrics-traces.md) to set up the
necessary components first.
## Kibana and Elasticsearch
-To open the Kibana UI (the visualization tool for [Elasticsearch](https://info.elastic.co),
-enter the following command:
+* To open the Kibana UI (the visualization tool for [Elasticsearch](https://info.elastic.co)),
+start a local proxy with the following command:
+ ```shell
+ kubectl proxy
+ ```
-```shell
-kubectl proxy
-```
+ This command starts a local proxy of Kibana on port 8001. For security reasons,
+ the Kibana UI is exposed only within the cluster.
-This command starts a local proxy of Kibana on port 8001. For security reasons, the
-Kibana UI is exposed only within the cluster.
+* Navigate to the
+[Kibana UI](http://localhost:8001/api/v1/namespaces/monitoring/services/kibana-logging/proxy/app/kibana).
+*It might take a couple of minutes for the proxy to work*.
-Navigate to the
-[Kibana UI](http://localhost:8001/api/v1/namespaces/monitoring/services/kibana-logging/proxy/app/kibana)
- (*It might take a couple of minutes for the proxy to work*).
+ The Discover tab of the Kibana UI looks like this:
-The Discover tab of the Kibana UI looks like this:
+ 
-
+ You can change the time frame of logs Kibana displays in the upper right corner
+ of the screen. The main search bar is across the top of the Discover page.
-You can change the time frame of logs Kibana displays in the upper right corner
-of the screen. The main search bar is across the top of the Discover page.
-
-As more logs are ingested, new fields will be discovered. To have them indexed,
-go to Management > Index Patterns > Refresh button (on top right) > Refresh
-fields.
+* As more logs are ingested, new fields will be discovered. To have them indexed,
+go to "Management" > "Index Patterns" > Refresh button (on top right) > "Refresh
+fields".
### Accessing configuration and revision logs
-To access the logs for a configuration, enter the following search query in Kibana:
+To access the logs for a configuration:
-```text
-kubernetes.labels.serving_knative_dev\/configuration: "configuration-example"
+* Find the configuration's name with the following command:
```
-
-Replace `configuration-example` with your configuration's name. Enter the following
-command to get your configuration's name:
-
-```shell
kubectl get configurations
```
-To access logs for a revision, enter the following search query in Kibana:
-
-```text
-kubernetes.labels.serving_knative_dev\/revision: "configuration-example-00001"
+* Replace `` and enter the following search query in Kibana:
+```
+kubernetes.labels.serving_knative_dev\/configuration:
```
-Replace `configuration-example-00001` with your revision's name.
+To access logs for a revision:
+* Find the revision's name with the following command:
+```
+kubectl get revisions
+```
+
+* Replace `` and enter the following search query in Kibana:
+```
+kubernetes.labels.serving_knative_dev\/revision:
+```
### Accessing build logs
-To access the logs for a build, enter the following search query in Kibana:
+To access logs for a [Knative Build](../build/README.md):
-```text
-kubernetes.labels.build\-name: "test-build"
+* Find the build's name in the specified in the `.yaml` file:
+ ```yaml
+ apiVersion: build.knative.dev/v1alpha1
+ kind: Build
+ metadata:
+ name:
+ ```
+ Or find build names with the following command:
+ ```
+ kubectl get builds
+ ```
+
+* Replace `` and enter the following search query in Kibana:
```
-
-Replace `test-build` with your build's name. The build name is specified in the `.yaml` file as follows:
-
-```yaml
-apiVersion: build.knative.dev/v1alpha1
-kind: Build
-metadata:
- name: test-build
+kubernetes.labels.build\-name:
```
### Accessing request logs
-To access to request logs, enter the following search in Kibana:
+To access the request logs, enter the following search in Kibana:
```text
tag: "requestlog.logentry.istio-system"
```
-Request logs contain details about requests served by the revision. Below is a sample request log:
+ Request logs contain details about requests served by the revision. Below is
+ a sample request log:
-```text
-@timestamp July 10th 2018, 10:09:28.000
-destinationConfiguration configuration-example
-destinationNamespace default
-destinationRevision configuration-example-00001
-destinationService configuration-example-00001-service.default.svc.cluster.local
-latency 1.232902ms
-method GET
-protocol http
-referer unknown
-requestHost route-example.default.example.com
-requestSize 0
-responseCode 200
-responseSize 36
-severity Info
-sourceNamespace istio-system
-sourceService unknown
-tag requestlog.logentry.istio-system
-traceId 986d6faa02d49533
-url /
-userAgent curl/7.60.0
-```
+ ```text
+ @timestamp July 10th 2018, 10:09:28.000
+ destinationConfiguration configuration-example
+ destinationNamespace default
+ destinationRevision configuration-example-00001
+ destinationService configuration-example-00001-service.default.svc.cluster.local
+ latency 1.232902ms
+ method GET
+ protocol http
+ referer unknown
+ requestHost route-example.default.example.com
+ requestSize 0
+ responseCode 200
+ responseSize 36
+ severity Info
+ sourceNamespace istio-system
+ sourceService unknown
+ tag requestlog.logentry.istio-system
+ traceId 986d6faa02d49533
+ url /
+ userAgent curl/7.60.0
+ ```
### Accessing end to end request traces
diff --git a/serving/accessing-metrics.md b/serving/accessing-metrics.md
index f1a1f86aa..374977e24 100644
--- a/serving/accessing-metrics.md
+++ b/serving/accessing-metrics.md
@@ -1,30 +1,31 @@
# Accessing metrics
-You access metrics through the [Grafana](https://grafana.com/) UI. Grafana is the visualization tool
-for [Prometheus](https://prometheus.io/). To open Grafana, enter the following command:
+You access metrics through the [Grafana](https://grafana.com/) UI. Grafana is
+the visualization tool for [Prometheus](https://prometheus.io/).
-```shell
+1. To open Grafana, enter the following command:
+```
kubectl port-forward -n monitoring $(kubectl get pods -n monitoring --selector=app=grafana --output=jsonpath="{.items..metadata.name}") 3000
```
-This starts a local proxy of Grafana on port 3000. For security reasons, the Grafana UI is exposed only within
-the cluster.
+ * This starts a local proxy of Grafana on port 3000. For security reasons, the Grafana UI is exposed only within the cluster.
-Navigate to the Grafana UI at [http://localhost:3000](http://localhost:3000).
-Select the **Home** button on the top of the page to see the list of pre-installed dashboards (screenshot below):
+2. Navigate to the Grafana UI at [http://localhost:3000](http://localhost:3000).
+
+3. Select the **Home** button on the top of the page to see the list of pre-installed dashboards (screenshot below):

-The following dashboards are pre-installed with Knative Serving:
+ The following dashboards are pre-installed with Knative Serving:
-* **Revision HTTP Requests:** HTTP request count, latency, and size metrics per revision and per configuration
-* **Nodes:** CPU, memory, network, and disk metrics at node level
-* **Pods:** CPU, memory, and network metrics at pod level
-* **Deployment:** CPU, memory, and network metrics aggregated at deployment level
-* **Istio, Mixer and Pilot:** Detailed Istio mesh, Mixer, and Pilot metrics
-* **Kubernetes:** Dashboards giving insights into cluster health, deployments, and capacity usage
+ * **Revision HTTP Requests:** HTTP request count, latency, and size metrics per revision and per configuration
+ * **Nodes:** CPU, memory, network, and disk metrics at node level
+ * **Pods:** CPU, memory, and network metrics at pod level
+ * **Deployment:** CPU, memory, and network metrics aggregated at deployment level
+ * **Istio, Mixer and Pilot:** Detailed Istio mesh, Mixer, and Pilot metrics
+ * **Kubernetes:** Dashboards giving insights into cluster health, deployments, and capacity usage
-To sign in as an administrator and modify or add dashboards, sign in with username `admin` and password `admin`.
-Before you expose the Grafana UI outside the cluster, make sure to change the password.
+4. Set up an administrator account to modify or add dashboards by signing in with username: `admin` and password: `admin`.
+ * Before you expose the Grafana UI outside the cluster, make sure to change the password.
---
diff --git a/serving/debugging-application-issues.md b/serving/debugging-application-issues.md
index 66d9e1724..51aa1ffc6 100644
--- a/serving/debugging-application-issues.md
+++ b/serving/debugging-application-issues.md
@@ -38,7 +38,7 @@ kubectl get route -o yaml
The `conditions` in `status` provide the reason if there is any failure. For
details, see Knative
-[Error Conditions and Reporting](../spec/errors.md)(currently some of them
+[Error Conditions and Reporting](https://github.com/knative/serving/blob/master/docs/spec/errors.md)(currently some of them
are not implemented yet).
## Check Revision status
@@ -77,7 +77,7 @@ If you see this condition, check the following to continue debugging:
If you see other conditions, to debug further:
* Look up the meaning of the conditions in Knative
- [Error Conditions and Reporting](../spec/errors.md). Note: some of them
+ [Error Conditions and Reporting](https://github.com/knative/serving/blob/master/docs/spec/errors.md). Note: some of them
are not implemented yet. An alternative is to
[check Pod status](#check-pod-status).
* If you are using `BUILD` to deploy and the `BuidComplete` condition is not
diff --git a/serving/installing-logging-metrics-traces.md b/serving/installing-logging-metrics-traces.md
index 18f22c0b3..1568c3936 100644
--- a/serving/installing-logging-metrics-traces.md
+++ b/serving/installing-logging-metrics-traces.md
@@ -1,72 +1,112 @@
# Monitoring, Logging and Tracing Installation
-Knative Serving offers two different monitoring setups:
-One that uses Elasticsearch, Kibana, Prometheus and Grafana and
-another that uses Stackdriver, Prometheus and Grafana. See below
-for installation instructions for these two setups. You can install
-only one of these two setups and side-by-side installation of these two are not supported.
+Knative Serving offers two different monitoring setups:
+[Elasticsearch, Kibana, Prometheus and Grafana](#Elasticsearch,-Kibana,-Prometheus-&-Grafana-Setup)
+or
+[Stackdriver, Prometheus and Grafana](#Stackdriver,-Prometheus-&-Grafana-Setup).
+You can install only one of these two setups and side-by-side installation of
+these two are not supported.
## Elasticsearch, Kibana, Prometheus & Grafana Setup
-*If you installed Knative Serving using [Easy Install](../install/README.md#Installing-Knative) guide,
-skip this step and continue to [Create Elasticsearch Indices](#Create-Elasticsearch-Indices)*
+If you installed the
+[full Knative release](../install/README.md#Installing-Knative),
+skip this step and continue to
+[Create Elasticsearch Indices](#Create-Elasticsearch-Indices)
+- Install Knative monitoring components from the root of the [Serving repository](https://github.com/knative/serving):
-Run:
+ ```shell
+ kubectl apply -R -f config/monitoring/100-common \
+ -f config/monitoring/150-elasticsearch \
+ -f third_party/config/monitoring/common \
+ -f third_party/config/monitoring/elasticsearch \
+ -f config/monitoring/200-common \
+ -f config/monitoring/200-common/100-istio.yaml
+ ```
-```shell
-kubectl apply -R -f config/monitoring/100-common \
- -f config/monitoring/150-elasticsearch \
- -f third_party/config/monitoring/common \
- -f third_party/config/monitoring/elasticsearch \
- -f config/monitoring/200-common \
- -f config/monitoring/200-common/100-istio.yaml
-```
+- The installation is complete when logging & monitoring components are all
+ reported `Running` or `Completed`:
-Monitor logging & monitoring components, until all of the components report Running or Completed:
+ ```shell
+ kubectl get pods -n monitoring --watch
+ ```
-```shell
-kubectl get pods -n monitoring --watch
-```
+ ```
+ NAME READY STATUS RESTARTS AGE
+ elasticsearch-logging-0 1/1 Running 0 2d
+ elasticsearch-logging-1 1/1 Running 0 2d
+ fluentd-ds-5kc85 1/1 Running 0 2d
+ fluentd-ds-vhrcq 1/1 Running 0 2d
+ fluentd-ds-xghk9 1/1 Running 0 2d
+ grafana-798cf569ff-v4q74 1/1 Running 0 2d
+ kibana-logging-7d474fbb45-6qb8x 1/1 Running 0 2d
+ kube-state-metrics-75bd4f5b8b-8t2h2 4/4 Running 0 2d
+ node-exporter-cr6bh 2/2 Running 0 2d
+ node-exporter-mf6k7 2/2 Running 0 2d
+ node-exporter-rhzr7 2/2 Running 0 2d
+ prometheus-system-0 1/1 Running 0 2d
+ prometheus-system-1 1/1 Running 0 2d
+ ```
-CTRL+C when it's done.
+ CTRL+C to exit watch.
### Create Elasticsearch Indices
-We will create two indexes in ElasticSearch - one for application logs and one for request traces.
-To create the indexes, open Kibana Index Management UI at this [link](http://localhost:8001/api/v1/namespaces/monitoring/services/kibana-logging/proxy/app/kibana#/management/kibana/index)
-(*it might take a couple of minutes for the proxy to work the first time after the installation*).
-Within the "Configure an index pattern" page, enter `logstash-*` to `Index pattern` and select `@timestamp`
-from `Time Filter field name` and click on `Create` button. See below for a screenshot:
+To visualize logs with Kibana, you need to set which Elasticsearch indices to explore. We will create two indices in Elasticsearch using `Logstash` for application logs and `Zipkin`
+for request traces.
+
+- To open the Kibana UI (the visualization tool for
+ [Elasticsearch](https://info.elastic.co)), start a local proxy with the
+ following command:
+
+ ```shell
+ kubectl proxy
+ ```
+
+ This command starts a local proxy of Kibana on port 8001. For security
+ reasons, the Kibana UI is exposed only within the cluster.
+
+- Navigate to the
+ [Kibana UI](http://localhost:8001/api/v1/namespaces/monitoring/services/kibana-logging/proxy/app/kibana).
+ _It might take a couple of minutes for the proxy to work_.
+
+- Within the "Configure an index pattern" page, enter `logstash-*` to
+ `Index pattern` and select `@timestamp` from `Time Filter field name` and
+ click on `Create` button.

-To create the second index, select `Create Index Pattern` button on top left of the page.
-Enter `zipkin*` to `Index pattern` and select `timestamp_millis` from `Time Filter field name`
-and click on `Create` button.
+- To create the second index, select `Create Index Pattern` button on top left
+ of the page. Enter `zipkin*` to `Index pattern` and select `timestamp_millis`
+ from `Time Filter field name` and click on `Create` button.
-Next, visit instructions below to access to logs, metrics and traces:
-* [Accessing Logs](./accessing-logs.md)
-* [Accessing Metrics](./accessing-metrics.md)
-* [Accessing Traces](./accessing-traces.md)
+## Stackdriver, Prometheus & Grafana Setup
-## Stackdriver(logs), Prometheus & Grafana Setup
+If your Knative Serving is not built on a Google Cloud Platform (GCP) based
+cluster or you want to send logs to another GCP project, you need to build your
+own Fluentd image and modify the configuration first. See
-If your Knative Serving is not built on a GCP based cluster or you want to send logs to
-another GCP project, you need to build your own Fluentd image and modify the
-configuration first. See
+1. Install
+ [Fluentd image on Knative Serving](https://github.com/knative/serving/blob/master/image/fluentd/README.md).
+2. [Set up a logging plugin](setting-up-a-logging-plugin.md).
+3. Install Knative monitoring components:
-1. [Fluentd image on Knative Serving](/image/fluentd/README.md)
-2. [Setting up a logging plugin](setting-up-a-logging-plugin.md)
+ ```shell
+ kubectl apply -R -f config/monitoring/100-common \
+ -f config/monitoring/150-stackdriver-prod \
+ -f third_party/config/monitoring/common \
+ -f config/monitoring/200-common \
+ -f config/monitoring/200-common/100-istio.yaml
+ ```
-```shell
-kubectl apply -R -f config/monitoring/100-common \
- -f config/monitoring/150-stackdriver \
- -f third_party/config/monitoring/common \
- -f config/monitoring/200-common \
- -f config/monitoring/200-common/100-istio.yaml
-```
+## Learn More
+
+- Learn more about accessing logs, metrics, and traces:
+ - [Accessing Logs](./accessing-logs.md)
+ - [Accessing Metrics](./accessing-metrics.md)
+ - [Accessing Traces](./accessing-traces.md)
---
diff --git a/serving/outbound-network-access.md b/serving/outbound-network-access.md
index f7588d974..ff8b197a0 100644
--- a/serving/outbound-network-access.md
+++ b/serving/outbound-network-access.md
@@ -14,7 +14,7 @@ depending on your platform:
* For Google Container Engine (GKE) run the following command to determine the scope. Make sure
to replace the variables or export these values first.
```shell
- gcloud container clusters describe ${CLUSTER_ID} \
+ gcloud container clusters describe ${CLUSTER_ID} \
--zone=${GCP_ZONE} | grep -e clusterIpv4Cidr -e servicesIpv4Cidr
```
* For IBM Cloud Private run the following command:
diff --git a/serving/samples/gitwebhook-go/README.md b/serving/samples/gitwebhook-go/README.md
index 458015103..178fd04c0 100644
--- a/serving/samples/gitwebhook-go/README.md
+++ b/serving/samples/gitwebhook-go/README.md
@@ -102,13 +102,19 @@ service "gitwebhook" created
1. Retrieve the hostname for this service, using the following command:
```shell
- $ kubectl get services.serving.knative.dev gitwebhook \
+ $ kubectl get ksvc gitwebhook \
-o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
gitwebhook gitwebhook.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Browse on GitHub to the repository where you want to create a webhook.
1. Click **Settings**, then **Webhooks**, then **Add webhook**.
1. Enter the **Payload URL** as `http://{DOMAIN}`, with the value of DOMAIN listed above.
diff --git a/serving/samples/helloworld-csharp/README.md b/serving/samples/helloworld-csharp/README.md
index a804bf17c..13725f995 100644
--- a/serving/samples/helloworld-csharp/README.md
+++ b/serving/samples/helloworld-csharp/README.md
@@ -130,11 +130,17 @@ folder) you're ready to build and deploy the sample app.
1. To find the URL for your service, use
```
- kubectl get services.serving.knative.dev helloworld-csharp -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ kubectl get ksvc helloworld-csharp -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
helloworld-csharp helloworld-csharp.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app to see the result. Replace
`{IP_ADDRESS}` with the address you see returned in the previous step.
diff --git a/serving/samples/helloworld-elixir/.gitignore b/serving/samples/helloworld-elixir/.gitignore
new file mode 100644
index 000000000..3064370c9
--- /dev/null
+++ b/serving/samples/helloworld-elixir/.gitignore
@@ -0,0 +1,28 @@
+# App artifacts
+/_build
+/db
+/deps
+/*.ez
+
+# Generated on crash by the VM
+erl_crash.dump
+
+# Generated on crash by NPM
+npm-debug.log
+/assets/package-lock.json
+
+# Static artifacts
+/assets/node_modules
+
+# Since we are building assets from assets/,
+# we ignore priv/static. You may want to comment
+# this depending on your deployment strategy.
+/priv/static/
+
+# Files matching config/*.secret.exs pattern contain sensitive
+# data and you should not commit them into version control.
+#
+# Alternatively, you may comment the line below and commit the
+# secrets files as long as you replace their contents by environment
+# variables.
+/config/*.secret.exs
diff --git a/serving/samples/helloworld-elixir/Dockerfile b/serving/samples/helloworld-elixir/Dockerfile
new file mode 100644
index 000000000..20f684162
--- /dev/null
+++ b/serving/samples/helloworld-elixir/Dockerfile
@@ -0,0 +1,27 @@
+FROM elixir:alpine
+ARG APP_NAME=hello
+ARG PHOENIX_SUBDIR=.
+ENV MIX_ENV=prod REPLACE_OS_VARS=true TERM=xterm
+WORKDIR /opt/app
+RUN apk update \
+ && apk --no-cache --update add nodejs nodejs-npm \
+ && mix local.rebar --force \
+ && mix local.hex --force
+COPY . .
+RUN mix do deps.get, deps.compile, compile
+RUN cd ${PHOENIX_SUBDIR}/assets \
+ && npm install \
+ && ./node_modules/brunch/bin/brunch build -p \
+ && cd .. \
+ && mix phx.digest
+RUN mix release --env=prod --verbose \
+ && mv _build/prod/rel/${APP_NAME} /opt/release \
+ && mv /opt/release/bin/${APP_NAME} /opt/release/bin/start_server
+FROM alpine:latest
+RUN apk update && apk --no-cache --update add bash openssl-dev
+ENV PORT=8080 MIX_ENV=prod REPLACE_OS_VARS=true
+WORKDIR /opt/app
+EXPOSE 8080
+COPY --from=0 /opt/release .
+ENV RUNNER_LOG_DIR /var/log
+CMD ["/opt/app/bin/start_server", "foreground", "boot_var=/tmp"]
diff --git a/serving/samples/helloworld-elixir/README.md b/serving/samples/helloworld-elixir/README.md
new file mode 100644
index 000000000..477ae7d3e
--- /dev/null
+++ b/serving/samples/helloworld-elixir/README.md
@@ -0,0 +1,300 @@
+# Hello World - Elixir Sample
+
+A simple web application written in [Elixir](https://elixir-lang.org/) using the
+[Phoenix Framework](https://phoenixframework.org/).
+The application prints all environment variables to the main page.
+
+# Set up Elixir and Phoenix Locally
+
+Following the [Phoenix Installation Guide](https://hexdocs.pm/phoenix/installation.html)
+is the best way to get your computer set up for developing,
+building, running, and packaging Elixir Web applications.
+
+# Running Locally
+
+To start your Phoenix server:
+
+ * Install dependencies with `mix deps.get`
+ * Install Node.js dependencies with `cd assets && npm install`
+ * Start Phoenix endpoint with `mix phx.server`
+
+Now you can visit [`localhost:4000`](http://localhost:4000) from your browser.
+
+# Recreating the sample code
+
+1. Generate a new project.
+
+ ```shell
+ mix phoenix.new helloelixir
+ ```
+
+ When asked, if you want to `Fetch and install dependencies? [Yn]` select `y`
+
+1. Follow the direction in the output to change directories into
+ start your local server with `mix phoenix.server`
+
+1. In the new directory, create a new Dockerfile for packaging
+ your application for deployment
+
+ ```docker
+ # Start from a base image for elixir
+ FROM elixir:alpine
+
+ # Set up Elixir and Phoenix
+ ARG APP_NAME=hello
+ ARG PHOENIX_SUBDIR=.
+ ENV MIX_ENV=prod REPLACE_OS_VARS=true TERM=xterm
+ WORKDIR /opt/app
+
+ # Compile assets.
+ RUN apk update \
+ && apk --no-cache --update add nodejs nodejs-npm \
+ && mix local.rebar --force \
+ && mix local.hex --force
+ COPY . .
+
+ # Download and compile dependencies, then compile Web app.
+ RUN mix do deps.get, deps.compile, compile
+ RUN cd ${PHOENIX_SUBDIR}/assets \
+ && npm install \
+ && ./node_modules/brunch/bin/brunch build -p \
+ && cd .. \
+ && mix phx.digest
+
+ # Create a release version of the application
+ RUN mix release --env=prod --verbose \
+ && mv _build/prod/rel/${APP_NAME} /opt/release \
+ && mv /opt/release/bin/${APP_NAME} /opt/release/bin/start_server
+
+ # Prepare final layer
+ FROM alpine:latest
+ RUN apk update && apk --no-cache --update add bash openssl-dev
+ ENV PORT=8080 MIX_ENV=prod REPLACE_OS_VARS=true
+ WORKDIR /opt/app
+
+ # Document that the service listens on port 8080.
+ EXPOSE 8080
+ COPY --from=0 /opt/release .
+ ENV RUNNER_LOG_DIR /var/log
+
+ # Command to execute the application.
+ CMD ["/opt/app/bin/start_server", "foreground", "boot_var=/tmp"]
+ ```
+
+1. Create a new file, `service.yaml` and copy the following Service
+ definition into the file. Make sure to replace `{username}` with
+ your Docker Hub username.
+
+ ```yaml
+ apiVersion: serving.knative.dev/v1alpha1
+ kind: Service
+ metadata:
+ name: helloworld-elixir
+ namespace: default
+ spec:
+ runLatest:
+ configuration:
+ revisionTemplate:
+ spec:
+ container:
+ image: docker.io/{username}/helloworld-elixir
+ env:
+ - name: TARGET
+ value: "elixir Sample v1"
+ ```
+
+# Building and deploying the sample
+
+The sample in this directory is ready to build and deploy without changes.
+You can deploy the sample as is, or use you created version following the
+directions above.
+
+1. Generate a new `secret_key_base` in the `config/prod.secret.exs` file.
+ Phoenix applications use a secrets file on production deployments and, by
+ default, that file is not checked into source control. We have provides
+ shell of an example on `config/prod.secret.exs.sample` and you can use the
+ following command to generate a new prod secrets file.
+
+ ```shell
+ SECRET_KEY_BASE=$(elixir -e ":crypto.strong_rand_bytes(48) |> Base.encode64 |> IO.puts")
+ sed "s|SECRET+KEY+BASE|$SECRET_KEY_BASE|" config/prod.secret.exs.sample >config/prod.secret.exs
+ ```
+
+1. Use Docker to build the sample code into a container. To build and push
+ with Docker Hub, run these commands replacing `{username}` with your Docker
+ Hub username:
+
+ ```shell
+ # Build the container on your local machine
+ docker build -t {username}/helloworld-elixir .
+
+ # Push the container to docker registry
+ docker push {username}/helloworld-elixir
+ ```
+
+1. After the build has completed and the container is pushed to docker hub, you
+ can deploy the app into your cluster. Ensure that the container image value
+ in `service.yaml` matches the container you built in
+ the previous step. Apply the configuration using `kubectl`:
+
+ ```shell
+ kubectl apply -f service.yaml
+ ```
+
+1. Now that your service is created, Knative will perform the following steps:
+ * Create a new immutable revision for this version of the app.
+ * Network programming to create a route, ingress, service, and load balance for your app.
+ * Automatically scale your pods up and down (including to zero active pods).
+
+1. To find the IP address for your service, use
+ `kubectl get svc knative-ingressgateway -n istio-system` to get the ingress IP for your
+ cluster. If your cluster is new, it may take sometime for the service to get asssigned
+ an external IP address.
+
+ ```
+ kubectl get svc knative-ingressgateway -n istio-system
+
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+knative-ingressgateway LoadBalancer 10.35.254.218 35.225.171.32 80:32380/TCP,443:32390/TCP,32400:32400/TCP 1h
+ ```
+
+1. To find the URL for your service, use
+
+ ```
+ kubectl get ksvc helloworld-elixir -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+
+ NAME DOMAIN
+ helloworld-elixir helloworld-elixir.default.example.com
+ ```
+
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
+1. Now you can make a request to your app to see the results. Replace
+ `{IP_ADDRESS}` with the address you see returned in the previous step.
+
+ ```shell
+ curl -H "Host: helloworld-elixir.default.example.com" http://{IP_ADDRESS}
+
+ ...
+ # HTML from your application is returned.
+ ```
+
+ Here is the HTML returned from our deployed sample application:
+
+ ```HTML
+
+
+
+
+
+
+
+
+
+ Hello Knative
+
+
+
+
+
+ <%= for key <- Enum.sort(Map.keys(@env)) do %>
+
<%= key %> = <%= Map.get(@env, key) %>
+ <% end %>
+
diff --git a/serving/samples/helloworld-elixir/lib/hello_web/views/error_helpers.ex b/serving/samples/helloworld-elixir/lib/hello_web/views/error_helpers.ex
new file mode 100644
index 000000000..fa06eae03
--- /dev/null
+++ b/serving/samples/helloworld-elixir/lib/hello_web/views/error_helpers.ex
@@ -0,0 +1,44 @@
+defmodule HelloWeb.ErrorHelpers do
+ @moduledoc """
+ Conveniences for translating and building error messages.
+ """
+
+ use Phoenix.HTML
+
+ @doc """
+ Generates tag for inlined form input errors.
+ """
+ def error_tag(form, field) do
+ Enum.map(Keyword.get_values(form.errors, field), fn (error) ->
+ content_tag :span, translate_error(error), class: "help-block"
+ end)
+ end
+
+ @doc """
+ Translates an error message using gettext.
+ """
+ def translate_error({msg, opts}) do
+ # When using gettext, we typically pass the strings we want
+ # to translate as a static argument:
+ #
+ # # Translate "is invalid" in the "errors" domain
+ # dgettext "errors", "is invalid"
+ #
+ # # Translate the number of files with plural rules
+ # dngettext "errors", "1 file", "%{count} files", count
+ #
+ # Because the error messages we show in our forms and APIs
+ # are defined inside Ecto, we need to translate them dynamically.
+ # This requires us to call the Gettext module passing our gettext
+ # backend as first argument.
+ #
+ # Note we use the "errors" domain, which means translations
+ # should be written to the errors.po file. The :count option is
+ # set by Ecto and indicates we should also apply plural rules.
+ if count = opts[:count] do
+ Gettext.dngettext(HelloWeb.Gettext, "errors", msg, msg, count, opts)
+ else
+ Gettext.dgettext(HelloWeb.Gettext, "errors", msg, opts)
+ end
+ end
+end
diff --git a/serving/samples/helloworld-elixir/lib/hello_web/views/error_view.ex b/serving/samples/helloworld-elixir/lib/hello_web/views/error_view.ex
new file mode 100644
index 000000000..90f183947
--- /dev/null
+++ b/serving/samples/helloworld-elixir/lib/hello_web/views/error_view.ex
@@ -0,0 +1,16 @@
+defmodule HelloWeb.ErrorView do
+ use HelloWeb, :view
+
+ # If you want to customize a particular status code
+ # for a certain format, you may uncomment below.
+ # def render("500.html", _assigns) do
+ # "Internal Server Error"
+ # end
+
+ # By default, Phoenix returns the status message from
+ # the template name. For example, "404.html" becomes
+ # "Not Found".
+ def template_not_found(template, _assigns) do
+ Phoenix.Controller.status_message_from_template(template)
+ end
+end
diff --git a/serving/samples/helloworld-elixir/lib/hello_web/views/hello_view.ex b/serving/samples/helloworld-elixir/lib/hello_web/views/hello_view.ex
new file mode 100644
index 000000000..dc29df264
--- /dev/null
+++ b/serving/samples/helloworld-elixir/lib/hello_web/views/hello_view.ex
@@ -0,0 +1,3 @@
+defmodule HelloWeb.HelloView do
+ use HelloWeb, :view
+end
diff --git a/serving/samples/helloworld-elixir/lib/hello_web/views/layout_view.ex b/serving/samples/helloworld-elixir/lib/hello_web/views/layout_view.ex
new file mode 100644
index 000000000..59c874284
--- /dev/null
+++ b/serving/samples/helloworld-elixir/lib/hello_web/views/layout_view.ex
@@ -0,0 +1,3 @@
+defmodule HelloWeb.LayoutView do
+ use HelloWeb, :view
+end
diff --git a/serving/samples/helloworld-elixir/lib/hello_web/views/page_view.ex b/serving/samples/helloworld-elixir/lib/hello_web/views/page_view.ex
new file mode 100644
index 000000000..9a271179a
--- /dev/null
+++ b/serving/samples/helloworld-elixir/lib/hello_web/views/page_view.ex
@@ -0,0 +1,3 @@
+defmodule HelloWeb.PageView do
+ use HelloWeb, :view
+end
diff --git a/serving/samples/helloworld-elixir/mix.exs b/serving/samples/helloworld-elixir/mix.exs
new file mode 100644
index 000000000..6127c4c80
--- /dev/null
+++ b/serving/samples/helloworld-elixir/mix.exs
@@ -0,0 +1,44 @@
+defmodule Hello.Mixfile do
+ use Mix.Project
+
+ def project do
+ [
+ app: :hello,
+ version: "0.0.1",
+ elixir: "~> 1.4",
+ elixirc_paths: elixirc_paths(Mix.env),
+ compilers: [:phoenix, :gettext] ++ Mix.compilers,
+ start_permanent: Mix.env == :prod,
+ deps: deps()
+ ]
+ end
+
+ # Configuration for the OTP application.
+ #
+ # Type `mix help compile.app` for more information.
+ def application do
+ [
+ mod: {Hello.Application, []},
+ extra_applications: [:logger, :runtime_tools]
+ ]
+ end
+
+ # Specifies which paths to compile per environment.
+ defp elixirc_paths(:test), do: ["lib", "test/support"]
+ defp elixirc_paths(_), do: ["lib"]
+
+ # Specifies your project dependencies.
+ #
+ # Type `mix help deps` for examples and options.
+ defp deps do
+ [
+ {:phoenix, "~> 1.3.2"},
+ {:phoenix_pubsub, "~> 1.0"},
+ {:phoenix_html, "~> 2.10"},
+ {:phoenix_live_reload, "~> 1.0", only: :dev},
+ {:gettext, "~> 0.11"},
+ {:cowboy, "~> 1.0"},
+ {:distillery, "~> 1.5"}
+ ]
+ end
+end
diff --git a/serving/samples/helloworld-elixir/mix.lock b/serving/samples/helloworld-elixir/mix.lock
new file mode 100644
index 000000000..f87e0b7e0
--- /dev/null
+++ b/serving/samples/helloworld-elixir/mix.lock
@@ -0,0 +1,15 @@
+%{
+ "cowboy": {:hex, :cowboy, "1.1.2", "61ac29ea970389a88eca5a65601460162d370a70018afe6f949a29dca91f3bb0", [:rebar3], [{:cowlib, "~> 1.0.2", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, "~> 1.3.2", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm"},
+ "cowlib": {:hex, :cowlib, "1.0.2", "9d769a1d062c9c3ac753096f868ca121e2730b9a377de23dec0f7e08b1df84ee", [:make], [], "hexpm"},
+ "distillery": {:hex, :distillery, "1.5.2", "eec18b2d37b55b0bcb670cf2bcf64228ed38ce8b046bb30a9b636a6f5a4c0080", [:mix], [], "hexpm"},
+ "file_system": {:hex, :file_system, "0.2.5", "a3060f063b116daf56c044c273f65202e36f75ec42e678dc10653056d3366054", [:mix], [], "hexpm"},
+ "gettext": {:hex, :gettext, "0.15.0", "40a2b8ce33a80ced7727e36768499fc9286881c43ebafccae6bab731e2b2b8ce", [:mix], [], "hexpm"},
+ "mime": {:hex, :mime, "1.3.0", "5e8d45a39e95c650900d03f897fbf99ae04f60ab1daa4a34c7a20a5151b7a5fe", [:mix], [], "hexpm"},
+ "phoenix": {:hex, :phoenix, "1.3.2", "2a00d751f51670ea6bc3f2ba4e6eb27ecb8a2c71e7978d9cd3e5de5ccf7378bd", [:mix], [{:cowboy, "~> 1.0", [hex: :cowboy, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 1.0", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:plug, "~> 1.3.3 or ~> 1.4", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 2.2 or ~> 3.0", [hex: :poison, repo: "hexpm", optional: false]}], "hexpm"},
+ "phoenix_html": {:hex, :phoenix_html, "2.11.2", "86ebd768258ba60a27f5578bec83095bdb93485d646fc4111db8844c316602d6", [:mix], [{:plug, "~> 1.5", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm"},
+ "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.1.5", "8d4c9b1ef9ca82deee6deb5a038d6d8d7b34b9bb909d99784a49332e0d15b3dc", [:mix], [{:file_system, "~> 0.2.1 or ~> 0.3", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.0 or ~> 1.2 or ~> 1.3", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm"},
+ "phoenix_pubsub": {:hex, :phoenix_pubsub, "1.0.2", "bfa7fd52788b5eaa09cb51ff9fcad1d9edfeb68251add458523f839392f034c1", [:mix], [], "hexpm"},
+ "plug": {:hex, :plug, "1.5.1", "1ff35bdecfb616f1a2b1c935ab5e4c47303f866cb929d2a76f0541e553a58165", [:mix], [{:cowboy, "~> 1.0.1 or ~> 1.1 or ~> 2.3", [hex: :cowboy, repo: "hexpm", optional: true]}, {:mime, "~> 1.0", [hex: :mime, repo: "hexpm", optional: false]}], "hexpm"},
+ "poison": {:hex, :poison, "3.1.0", "d9eb636610e096f86f25d9a46f35a9facac35609a7591b3be3326e99a0484665", [:mix], [], "hexpm"},
+ "ranch": {:hex, :ranch, "1.3.2", "e4965a144dc9fbe70e5c077c65e73c57165416a901bd02ea899cfd95aa890986", [:rebar3], [], "hexpm"},
+}
diff --git a/serving/samples/helloworld-elixir/priv/gettext/en/LC_MESSAGES/errors.po b/serving/samples/helloworld-elixir/priv/gettext/en/LC_MESSAGES/errors.po
new file mode 100644
index 000000000..cdec3a113
--- /dev/null
+++ b/serving/samples/helloworld-elixir/priv/gettext/en/LC_MESSAGES/errors.po
@@ -0,0 +1,11 @@
+## `msgid`s in this file come from POT (.pot) files.
+##
+## Do not add, change, or remove `msgid`s manually here as
+## they're tied to the ones in the corresponding POT file
+## (with the same domain).
+##
+## Use `mix gettext.extract --merge` or `mix gettext.merge`
+## to merge POT files into PO files.
+msgid ""
+msgstr ""
+"Language: en\n"
diff --git a/serving/samples/helloworld-elixir/priv/gettext/errors.pot b/serving/samples/helloworld-elixir/priv/gettext/errors.pot
new file mode 100644
index 000000000..6988141a6
--- /dev/null
+++ b/serving/samples/helloworld-elixir/priv/gettext/errors.pot
@@ -0,0 +1,10 @@
+## This file is a PO Template file.
+##
+## `msgid`s here are often extracted from source code.
+## Add new translations manually only if they're dynamic
+## translations that can't be statically extracted.
+##
+## Run `mix gettext.extract` to bring this file up to
+## date. Leave `msgstr`s empty as changing them here as no
+## effect: edit them in PO (`.po`) files instead.
+
diff --git a/serving/samples/helloworld-elixir/rel/config.exs b/serving/samples/helloworld-elixir/rel/config.exs
new file mode 100644
index 000000000..7452b3f5f
--- /dev/null
+++ b/serving/samples/helloworld-elixir/rel/config.exs
@@ -0,0 +1,53 @@
+# Import all plugins from `rel/plugins`
+# They can then be used by adding `plugin MyPlugin` to
+# either an environment, or release definition, where
+# `MyPlugin` is the name of the plugin module.
+Path.join(["rel", "plugins", "*.exs"])
+|> Path.wildcard()
+|> Enum.map(&Code.eval_file(&1))
+
+use Mix.Releases.Config,
+ # This sets the default release built by `mix release`
+ default_release: :default,
+ # This sets the default environment used by `mix release`
+ default_environment: Mix.env()
+
+# For a full list of config options for both releases
+# and environments, visit https://hexdocs.pm/distillery/configuration.html
+
+
+# You may define one or more environments in this file,
+# an environment's settings will override those of a release
+# when building in that environment, this combination of release
+# and environment configuration is called a profile
+
+environment :dev do
+ # If you are running Phoenix, you should make sure that
+ # server: true is set and the code reloader is disabled,
+ # even in dev mode.
+ # It is recommended that you build with MIX_ENV=prod and pass
+ # the --env flag to Distillery explicitly if you want to use
+ # dev mode.
+ set dev_mode: true
+ set include_erts: false
+ set cookie: :"Bps5@RVvPgL9c~C~D(DCQ5*Iu! Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app to see the results. Replace
`{IP_ADDRESS}` with the address you see returned in the previous step.
diff --git a/serving/samples/helloworld-haskell/.gitignore b/serving/samples/helloworld-haskell/.gitignore
new file mode 100644
index 000000000..9a6cf65ad
--- /dev/null
+++ b/serving/samples/helloworld-haskell/.gitignore
@@ -0,0 +1,6 @@
+.stack-work/
+*.cabal
+*~
+/.idea/
+/dist/
+/out/
diff --git a/serving/samples/helloworld-haskell/Dockerfile b/serving/samples/helloworld-haskell/Dockerfile
new file mode 100644
index 000000000..8a9626b4c
--- /dev/null
+++ b/serving/samples/helloworld-haskell/Dockerfile
@@ -0,0 +1,21 @@
+# Use the existing Haskell image as our base
+FROM haskell:8.2.2 as builder
+
+# Checkout our code onto the Docker container
+WORKDIR /app
+ADD . /app
+
+# Build and test our code, then install the “helloworld-haskell-exe” executable
+RUN stack setup
+RUN stack build --copy-bins
+
+# Copy the "helloworld-haskell-exe" executable to the image using docker multi stage build
+FROM fpco/haskell-scratch:integer-gmp
+WORKDIR /root/
+COPY --from=builder /root/.local/bin/helloworld-haskell-exe .
+
+# Expose a port to run our application
+EXPOSE 8080
+
+# Run the server command
+CMD ["./helloworld-haskell-exe"]
diff --git a/serving/samples/helloworld-haskell/README.md b/serving/samples/helloworld-haskell/README.md
new file mode 100644
index 000000000..0cd30e662
--- /dev/null
+++ b/serving/samples/helloworld-haskell/README.md
@@ -0,0 +1,204 @@
+# Hello World - Haskell sample
+
+A simple web app written in Haskell that you can use for testing.
+It reads in an env variable `TARGET` and prints "Hello World: ${TARGET}!". If
+TARGET is not specified, it will use "NOT SPECIFIED" as the TARGET.
+
+## Prerequisites
+
+* A Kubernetes cluster with Knative installed. Follow the
+ [installation instructions](https://github.com/knative/docs/blob/master/install/README.md) if you need
+ to create one.
+* [Docker](https://www.docker.com) installed and running on your local machine,
+ and a Docker Hub account configured (we'll use it for a container registry).
+
+## Recreating the sample code
+
+While you can clone all of the code from this directory, hello world
+apps are generally more useful if you build them step-by-step. The
+following instructions recreate the source files from this folder.
+
+1. Create a new file named `stack.yaml` and paste the following code:
+
+ ```yaml
+ flags: {}
+ packages:
+ - .
+ extra-deps: []
+ resolver: lts-10.7
+ ```
+1. Create a new file named `package.yaml` and paste the following code
+
+ ```yaml
+ name: helloworld-haskell
+ version: 0.1.0.0
+ dependencies:
+ - base >= 4.7 && < 5
+ - scotty
+ - text
+
+ executables:
+ helloworld-haskell-exe:
+ main: Main.hs
+ source-dirs: app
+ ghc-options:
+ - -threaded
+ - -rtsopts
+ - -with-rtsopts=-N
+ ```
+
+1. Create a `app` folder, then create a new file named `Main.hs` in that folder
+ and paste the following code. This code creates a basic web server which
+ listens on port 8080:
+
+ ```haskell
+ {-# LANGUAGE OverloadedStrings #-}
+
+ import Data.Maybe
+ import Data.Monoid ((<>))
+ import Data.Text.Lazy (Text)
+ import Data.Text.Lazy
+ import System.Environment (lookupEnv)
+ import Web.Scotty (ActionM, ScottyM, scotty)
+ import Web.Scotty.Trans
+
+ main :: IO ()
+ main = do
+ t <- fromMaybe "NOT SPECIFIED" <$> lookupEnv "TARGET"
+ scotty 8080 (route t)
+
+ route :: String -> ScottyM()
+ route t = get "/" $ hello t
+
+ hello :: String -> ActionM()
+ hello t = text $ pack ("Hello world: " ++ t)
+ ```
+
+1. In your project directory, create a file named `Dockerfile` and copy the code
+ block below into it.
+
+ ```docker
+ # Use the existing Haskell image as our base
+ FROM haskell:8.2.2 as builder
+
+ # Checkout our code onto the Docker container
+ WORKDIR /app
+ ADD . /app
+
+ # Build and test our code, then install the “helloworld-haskell-exe” executable
+ RUN stack setup
+ RUN stack build --copy-bins
+
+ # Copy the "helloworld-haskell-exe" executable to the image using docker multi stage build
+ FROM fpco/haskell-scratch:integer-gmp
+ WORKDIR /root/
+ COPY --from=builder /root/.local/bin/helloworld-haskell-exe .
+
+ # Expose a port to run our application
+ EXPOSE 8080
+
+ # Run the server command
+ CMD ["./helloworld-haskell-exe"]
+ ```
+
+1. Create a new file, `service.yaml` and copy the following service definition
+ into the file. Make sure to replace `{username}` with your Docker Hub username.
+
+```yaml
+apiVersion: serving.knative.dev/v1alpha1
+kind: Service
+metadata:
+ name: helloworld-haskell
+ namespace: default
+spec:
+ runLatest:
+ configuration:
+ revisionTemplate:
+ spec:
+ container:
+ image: docker.io/{username}/helloworld-haskell
+ env:
+ - name: TARGET
+ value: "Haskell Sample v1"
+```
+
+## Build and deploy this sample
+
+Once you have recreated the sample code files (or used the files in the sample
+folder) you're ready to build and deploy the sample app.
+
+1. Use Docker to build the sample code into a container. To build and push with
+ Docker Hub, enter these commands replacing `{username}` with your
+ Docker Hub username:
+
+ ```shell
+ # Build the container on your local machine
+ docker build -t {username}/helloworld-haskell .
+
+ # Push the container to docker registry
+ docker push {username}/helloworld-haskell
+ ```
+
+1. After the build has completed and the container is pushed to Docker Hub, you
+ can deploy the app into your cluster. Ensure that the container image value
+ in `service.yaml` matches the container you built in
+ the previous step. Apply the configuration using `kubectl`:
+
+ ```shell
+ kubectl apply -f service.yaml
+ ```
+
+1. Now that your service is created, Knative will perform the following steps:
+ * Create a new immutable revision for this version of the app.
+ * Network programming to create a route, ingress, service, and load balance for your app.
+ * Automatically scale your pods up and down (including to zero active pods).
+
+1. To find the IP address for your service, enter
+ `kubectl get svc knative-ingressgateway -n istio-system` to get the ingress IP for your
+ cluster. If your cluster is new, it may take some time for the service to get assigned
+ an external IP address.
+
+ ```shell
+ kubectl get svc knative-ingressgateway -n istio-system
+
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ knative-ingressgateway LoadBalancer 10.23.247.74 35.203.155.229 80:32380/TCP,443:32390/TCP,32400:32400/TCP 2d
+
+ ```
+
+ For minikube or bare-metal, get IP_ADDRESS by running the following command
+
+ ```shell
+ echo $(kubectl get node -o 'jsonpath={.items[0].status.addresses[0].address}'):$(kubectl get svc knative-ingressgateway -n istio-system -o 'jsonpath={.spec.ports[?(@.port==80)].nodePort}')
+
+ ```
+
+1. To find the URL for your service, enter:
+ ```
+ kubectl get ksvc helloworld-haskell -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ NAME DOMAIN
+ helloworld-haskell helloworld-haskell.default.example.com
+ ```
+
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
+1. Now you can make a request to your app and see the result. Replace
+ `{IP_ADDRESS}` with the address you see returned in the previous step.
+
+ ```shell
+ curl -H "Host: helloworld-haskell.default.example.com" http://{IP_ADDRESS}
+ Hello world: Haskell Sample v1
+ ```
+
+## Removing the sample app deployment
+
+To remove the sample app from your cluster, delete the service record:
+
+```shell
+kubectl delete -f service.yaml
+```
+
diff --git a/serving/samples/helloworld-haskell/app/Main.hs b/serving/samples/helloworld-haskell/app/Main.hs
new file mode 100644
index 000000000..6f69ab9ce
--- /dev/null
+++ b/serving/samples/helloworld-haskell/app/Main.hs
@@ -0,0 +1,20 @@
+{-# LANGUAGE OverloadedStrings #-}
+
+import Data.Maybe
+import Data.Monoid ((<>))
+import Data.Text.Lazy (Text)
+import Data.Text.Lazy
+import System.Environment (lookupEnv)
+import Web.Scotty (ActionM, ScottyM, scotty)
+import Web.Scotty.Trans
+
+main :: IO ()
+main = do
+ t <- fromMaybe "NOT SPECIFIED" <$> lookupEnv "TARGET"
+ scotty 8080 (route t)
+
+route :: String -> ScottyM()
+route t = get "/" $ hello t
+
+hello :: String -> ActionM()
+hello t = text $ pack ("Hello world: " ++ t)
diff --git a/serving/samples/helloworld-haskell/package.yaml b/serving/samples/helloworld-haskell/package.yaml
new file mode 100644
index 000000000..12178e943
--- /dev/null
+++ b/serving/samples/helloworld-haskell/package.yaml
@@ -0,0 +1,15 @@
+name: helloworld-haskell
+version: 0.1.0.0
+dependencies:
+- base >= 4.7 && < 5
+- scotty
+- text
+
+executables:
+ helloworld-haskell-exe:
+ main: Main.hs
+ source-dirs: app
+ ghc-options:
+ - -threaded
+ - -rtsopts
+ - -with-rtsopts=-N
diff --git a/serving/samples/helloworld-haskell/service.yaml b/serving/samples/helloworld-haskell/service.yaml
new file mode 100644
index 000000000..07a7e0a83
--- /dev/null
+++ b/serving/samples/helloworld-haskell/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: serving.knative.dev/v1alpha1
+kind: Service
+metadata:
+ name: helloworld-haskell
+ namespace: default
+spec:
+ runLatest:
+ configuration:
+ revisionTemplate:
+ spec:
+ container:
+ image: docker.io/{username}/helloworld-haskell
+ env:
+ - name: TARGET
+ value: "Haskell Sample v1"
diff --git a/serving/samples/helloworld-haskell/stack.yaml b/serving/samples/helloworld-haskell/stack.yaml
new file mode 100644
index 000000000..e63cd13e1
--- /dev/null
+++ b/serving/samples/helloworld-haskell/stack.yaml
@@ -0,0 +1,5 @@
+flags: {}
+packages:
+- .
+extra-deps: []
+resolver: lts-10.7
diff --git a/serving/samples/helloworld-java/README.md b/serving/samples/helloworld-java/README.md
index f8190cf55..2b31a5bd9 100644
--- a/serving/samples/helloworld-java/README.md
+++ b/serving/samples/helloworld-java/README.md
@@ -154,11 +154,17 @@ folder) you're ready to build and deploy the sample app.
1. To find the URL for your service, use
```
- kubectl get services.serving.knative.dev helloworld-java -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ kubectl get ksvc helloworld-java -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
helloworld-java helloworld-java.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app to see the result. Replace
`{IP_ADDRESS}` with the address you see returned in the previous step.
diff --git a/serving/samples/helloworld-nodejs/README.md b/serving/samples/helloworld-nodejs/README.md
index 5b5448b86..5a0a27ced 100644
--- a/serving/samples/helloworld-nodejs/README.md
+++ b/serving/samples/helloworld-nodejs/README.md
@@ -172,11 +172,17 @@ folder) you're ready to build and deploy the sample app.
1. To find the URL for your service, use
```
- kubectl get services.serving.knative.dev helloworld-nodejs -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ kubectl get ksvc helloworld-nodejs -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
helloworld-nodejs helloworld-nodejs.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app to see the result. Replace
`{IP_ADDRESS}` with the address you see returned in the previous step.
diff --git a/serving/samples/helloworld-php/README.md b/serving/samples/helloworld-php/README.md
index 5a1fc5007..667271a4f 100644
--- a/serving/samples/helloworld-php/README.md
+++ b/serving/samples/helloworld-php/README.md
@@ -113,11 +113,17 @@ you're ready to build and deploy the sample app.
1. To find the URL for your service, use
```
- kubectl get services.serving.knative.dev helloworld-php -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ kubectl get ksvc helloworld-php -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
helloworld-php helloworld-php.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app to see the result. Replace
`{IP_ADDRESS}` with the address you see returned in the previous step.
diff --git a/serving/samples/helloworld-python/README.md b/serving/samples/helloworld-python/README.md
index f1b3640b6..298864ef9 100644
--- a/serving/samples/helloworld-python/README.md
+++ b/serving/samples/helloworld-python/README.md
@@ -125,11 +125,17 @@ folder) you're ready to build and deploy the sample app.
1. To find the URL for your service, use
```
- kubectl get services.serving.knative.dev helloworld-python -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ kubectl get ksvc helloworld-python -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
helloworld-python helloworld-python.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app to see the result. Replace `{IP_ADDRESS}`
with the address you see returned in the previous step.
diff --git a/serving/samples/helloworld-ruby/README.md b/serving/samples/helloworld-ruby/README.md
index 403b4c064..53732b9d4 100644
--- a/serving/samples/helloworld-ruby/README.md
+++ b/serving/samples/helloworld-ruby/README.md
@@ -140,10 +140,15 @@ you're ready to build and deploy the sample app.
1. To find the URL for your service, use
```
- kubectl get services.serving.knative.dev helloworld-ruby -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ kubectl get ksvc helloworld-ruby -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
helloworld-ruby helloworld-ruby.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
1. Now you can make a request to your app to see the result. Replace `{IP_ADDRESS}`
with the address you see returned in the previous step.
diff --git a/serving/samples/helloworld-rust/README.md b/serving/samples/helloworld-rust/README.md
index 9a8ee1d8e..0010c984d 100644
--- a/serving/samples/helloworld-rust/README.md
+++ b/serving/samples/helloworld-rust/README.md
@@ -156,11 +156,17 @@ folder) you're ready to build and deploy the sample app.
1. To find the URL for your service, enter:
```
- kubectl get services.serving.knative.dev helloworld-rust -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ kubectl get ksvc helloworld-rust -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
helloworld-rust helloworld-rust.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app and see the result. Replace
`{IP_ADDRESS}` with the address you see returned in the previous step.
diff --git a/serving/samples/source-to-url-go/README.md b/serving/samples/source-to-url-go/README.md
index c0f468a0e..e4286e7f8 100644
--- a/serving/samples/source-to-url-go/README.md
+++ b/serving/samples/source-to-url-go/README.md
@@ -189,16 +189,6 @@ container for the application.
revisionName: app-from-source-00007
```
-
-1. After the build has completed and the container is pushed to Docker Hub, you
- can deploy the app into your cluster. Ensure that the container image value
- in `service.yaml` matches the container you built in
- the previous step. Apply the configuration using `kubectl`:
-
- ```shell
- kubectl apply -f service.yaml
- ```
-
1. Now that your service is created, Knative will perform the following steps:
* Fetch the revision specified from GitHub and build it into a container
* Push the container to Docker Hub
@@ -220,11 +210,17 @@ container for the application.
1. To find the URL for your service, type:
```shell
- $ kubectl get services.serving.knative.dev app-from-source -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
+ $ kubectl get ksvc app-from-source -o=custom-columns=NAME:.metadata.name,DOMAIN:.status.domain
NAME DOMAIN
app-from-source app-from-source.default.example.com
```
+ > Note: `ksvc` is an alias for `services.serving.knative.dev`. If you have
+ an older version (version 0.1.0) of Knative installed, you'll need to use
+ the long name until you upgrade to version 0.1.1 or higher. See
+ [Checking Knative Installation Version](../../../install/check-install-version.md)
+ to learn how to see what version you have installed.
+
1. Now you can make a request to your app to see the result. Replace
`{IP_ADDRESS}` with the address that you got in the previous step:
diff --git a/serving/samples/telemetry-go/README.md b/serving/samples/telemetry-go/README.md
index f904c0e1c..aee717105 100644
--- a/serving/samples/telemetry-go/README.md
+++ b/serving/samples/telemetry-go/README.md
@@ -2,100 +2,163 @@
This sample runs a simple web server that makes calls to other in-cluster services
and responds to requests with "Hello World!".
-The purpose of this sample is to show generating metrics, logs and distributed traces
-(see [Logs](../../accessing-logs.md), [Metrics](../../accessing-metrics.md), and [Traces](../../accessing-traces.md) for more information).
-This sample also creates a dedicated Prometheus instances rather than using the one
-that is installed by default as a showcase of installing dedicated Prometheus instances.
+The purpose of this sample is to show generating [metrics](../../accessing-metrics.md),
+[logs](../../accessing-logs.md) and distributed [traces](../../accessing-traces.md).
+This sample also shows how to create a dedicated Prometheus instance rather than
+using the default installation.
## Prerequisites
-1. [Install Knative Serving](https://github.com/knative/docs/blob/master/install/README.md)
-2. [Install Knative monitoring component](../../installing-logging-metrics-traces.md)
-3. Install [docker](https://www.docker.com/)
-
+1. A Kubernetes cluster with [Knative Serving](https://github.com/knative/docs/blob/master/install/README.md)
+installed.
+2. Check if Knative monitoring components are installed:
+```
+kubectl get pods -n monitoring
+```
+ * If pods aren't found, install [Knative monitoring component](../../installing-logging-metrics-traces.md).
+3. Install [Docker](https://docs.docker.com/get-started/#prepare-your-docker-environment).
+4. Check out the code:
+```
+go get -d github.com/knative/docs/serving/samples/telemetry-go
+```
## Setup
-Build the app container and publish it to your registry of choice:
+Build the application container and publish it to a container registry:
-```shell
-REPO="gcr.io/"
+1. Move into the sample directory:
+```
+cd $GOPATH/src/github.com/knative/docs
+```
-# Build and publish the container, run from the root directory.
+2. Set your preferred container registry:
+```
+export REPO="gcr.io/"
+```
+ This example shows how to use Google Container Registry (GCR). You will need
+ a Google Cloud Project and to enable the [Google Container Registry
+API](https://console.cloud.google.com/apis/library/containerregistry.googleapis.com).
+
+3. Use Docker to build your application container:
+```
docker build \
--tag "${REPO}/serving/samples/telemetry-go" \
--file=serving/samples/telemetry-go/Dockerfile .
+```
+
+4. Push your container to a container registry:
+```
docker push "${REPO}/serving/samples/telemetry-go"
+```
-# Replace the image reference with our published image.
-perl -pi -e "s@github.com/knative/docs/serving/samples/telemetry-go@${REPO}/serving/samples/telemetry-go@g" serving/samples/telemetry-go/*.yaml
+5. Replace the image reference path with our published image path in the
+configuration file (`serving/samples/telemetry-go/sample.yaml`):
+ * Manually replace:
+ `image: github.com/knative/docs/serving/samples/telemetry-go` with
+ `image: /serving/samples/telemetry-go`
-# Deploy the Knative Serving sample
+ Or
+
+ * Use run this command:
+ ```
+ perl -pi -e "s@github.com/knative/docs@${REPO}@g" serving/samples/telemetry-go/sample.yaml
+ ```
+
+## Deploy the Service
+
+Deploy this application to Knative Serving:
+```
kubectl apply -f serving/samples/telemetry-go/
```
-## Exploring
+## Explore the Service
-Once deployed, you can inspect the created resources with `kubectl` commands:
+Inspect the created resources with the `kubectl` commands:
-```shell
-# This will show the route that we created:
-kubectl get route -o yaml
+ * View the created Route resource:
+ ```
+ kubectl get route -o yaml
+ ```
-# This will show the configuration that we created:
-kubectl get configurations -o yaml
+ * View the created Configuration resource:
+ ```
+ kubectl get configurations -o yaml
+ ```
-# This will show the Revision that was created by our configuration:
-kubectl get revisions -o yaml
+ * View the Revision that was created by the Configuration:
+ ```
+ kubectl get revisions -o yaml
+ ```
+
+## Access the Service
+
+To access this service via `curl`, you need to determine its ingress address.
+
+1. To determine if your service is ready:
+ Check the status of your Knative gateway:
+ ```
+ kubectl get svc knative-ingressgateway -n istio-system --watch
+ ```
+
+ When the service is ready, you'll see an IP address in the `EXTERNAL-IP` field:
+ ```
+ NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
+ knative-ingressgateway LoadBalancer 10.23.247.74 35.203.155.229 80:32380/TCP,443:32390/TCP,32400:32400/TCP 2d
+ ```
+ CTRL+C to end watch.
+
+ Check the status of your route:
+ ```
+ kubectl get route -o yaml
+ ```
+ When the route is ready, you'll see the following fields reported as:
+ ```YAML
+ status:
+ conditions:
+ ...
+ status: "True"
+ type: Ready
+ domain: telemetrysample-route.default.example.com
+ ```
+
+2. Export the ingress hostname and IP as environment
+variables:
```
-
-To access this service via `curl`, we first need to determine its ingress address:
-```shell
-watch kubectl get svc knative-ingressgateway -n istio-system
-NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
-knative-ingressgateway LoadBalancer 10.23.247.74 35.203.155.229 80:32380/TCP,443:32390/TCP,32400:32400/TCP 2d
-```
-
-Once the `EXTERNAL-IP` gets assigned to the cluster, you can run:
-
-```shell
-# Put the Host name into an environment variable.
export SERVICE_HOST=`kubectl get route telemetrysample-route -o jsonpath="{.status.domain}"`
-
-# Put the ingress IP into an environment variable.
export SERVICE_IP=`kubectl get svc knative-ingressgateway -n istio-system -o jsonpath="{.status.loadBalancer.ingress[*].ip}"`
+```
-# Curl the ingress IP "as-if" DNS were properly configured.
+3. Make a request to the service to see the `Hello World!` message:
+```
curl --header "Host:$SERVICE_HOST" http://${SERVICE_IP}
-Hello World!
```
-Generate some logs to STDOUT and files under `/var/log` in `Json` or plain text formats.
-
-```shell
+4. Make a request to the `/log` endpoint to generate logs to the `stdout` file
+and generate files under `/var/log` in both `JSON` and plain text formats:
+```
curl --header "Host:$SERVICE_HOST" http://${SERVICE_IP}/log
-Sending logs done.
```
-## Accessing logs
-You can access to the logs from Kibana UI - see [Logs](../../accessing-logs.md) for more information.
+## Access Logs
+You can access to the logs from Kibana UI - see [Logs](../../accessing-logs.md)
+for more information.
-## Accessing per request traces
-You can access to per request traces from Zipkin UI - see [Traces](../../accessing-traces.md) for more information.
+## Access per Request Traces
+You can access to per request traces from Zipkin UI - see [Traces](../../accessing-traces.md)
+for more information.
-## Accessing custom metrics
-You can see published metrics using Prometheus UI. To access to the UI, forward the Prometheus server to your machine:
-
-```bash
+## Accessing Custom Metrics
+You can see published metrics using Prometheus UI. To access to the UI, forward
+the Prometheus server to your machine:
+```
kubectl port-forward $(kubectl get pods --selector=app=prometheus,prometheus=test --output=jsonpath="{.items[0].metadata.name}") 9090
```
Then browse to http://localhost:9090.
-## Cleaning up
+## Clean up
To clean up the sample service:
-
-```shell
-kubectl delete -f serving/samples/telemetrysample-go/
+```
+kubectl delete -f serving/samples/telemetry-go/
```
diff --git a/serving/samples/telemetry-go/configuration.yaml b/serving/samples/telemetry-go/sample.yaml
similarity index 100%
rename from serving/samples/telemetry-go/configuration.yaml
rename to serving/samples/telemetry-go/sample.yaml
diff --git a/serving/samples/thumbnailer-go/README.md b/serving/samples/thumbnailer-go/README.md
index 6fce3399d..8f1c27bcd 100644
--- a/serving/samples/thumbnailer-go/README.md
+++ b/serving/samples/thumbnailer-go/README.md
@@ -109,8 +109,12 @@ perl -pi -e "s@DOCKER_REPO_OVERRIDE@$REPO@g" sample.yaml
# Install the Kaniko build template used to build this sample (in the
# build-templates repo).
kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/kaniko/kaniko.yaml
+
+# Create the Knative route and configuration for the application
+kubectl apply -f sample.yaml
```
+
Now, if you look at the `status` of the revision, you will see that a build is in progress:
```shell
@@ -201,4 +205,4 @@ curl -H "Host: $SERVICE_HOST" \
Although this demo uses an external application, the Knative Serving deployment
steps would be similar for any 'dockerized' app you may already have.
-Just copy the `thumbnailer.yaml` and change a few variables.
+Just copy the `sample.yaml` and change a few variables.
diff --git a/serving/using-external-dns.md b/serving/using-external-dns.md
new file mode 100644
index 000000000..e0806023b
--- /dev/null
+++ b/serving/using-external-dns.md
@@ -0,0 +1,161 @@
+# Using ExternalDNS to automate DNS setup
+
+[ExternalDNS](https://github.com/kubernetes-incubator/external-dns) is a tool
+that synchronizes exposed Kubernetes Services and Ingresses with DNS providers.
+
+This doc explains how to set up ExternalDNS within a Knative cluster using
+[Google Cloud DNS](https://cloud.google.com/dns/) to automate the process
+of publishing the Knative domain.
+
+## Prerequisite
+
+1. A Google Kubernetes Engine cluster with Cloud DNS scope.
+ You can create a GKE cluster with Cloud DNS scope by entering the following command:
+ ```shell
+ gcloud container clusters create "external-dns" \
+ --scopes "https://www.googleapis.com/auth/ndev.clouddns.readwrite"
+ ```
+1. [Knative Serving](https://github.com/knative/docs/blob/master/install/README.md) installed on your cluster.
+1. A public domain that will be used in Knative.
+1. Knative configured to use your custom domain.
+```shell
+kubectl edit cm config-domain -n knative-serving
+```
+This command opens your default text editor and allows you to edit the config
+map.
+```
+apiVersion: v1
+data:
+ example.com: ""
+kind: ConfigMap
+[...]
+```
+Edit the file to replace `example.com` with the domain you'd like to use and
+save your changes. In this example, we use domain `external-dns-test.my-org.do`
+ for all routes:
+```
+apiVersion: v1
+data:
+ external-dns-test.my-org.do: ""
+kind: ConfigMap
+[...]
+```
+
+## Setup steps
+
+This guide uses Google Cloud Platform as an example to show how to set up
+ExternalDNS. You can find detailed instructions for other cloud providers in the
+[ExternalDNS documentation](https://github.com/kubernetes-incubator/external-dns#deploying-to-a-cluster).
+
+### Choose a DNS provider
+
+Skip this step if you already have a DNS provider for your domain.
+
+Here is a [list](https://github.com/kubernetes-incubator/external-dns#the-latest-release-v05)
+of DNS providers supported by ExternalDNS. Choose a DNS provider from the list.
+
+### Create a DNS zone for managing DNS records
+
+Skip this step if you already have a zone for managing the DNS records of your
+custom domain.
+
+A DNS zone which will contain the managed DNS records needs to be created.
+Assume your custom domain is `external-dns-test.my-org.do`.
+
+Use the following command to create a DNS zone with [Google Cloud DNS](https://cloud.google.com/dns/):
+```shell
+gcloud dns managed-zones create "external-dns-zone" \
+ --dns-name "external-dns-test.my-org.do." \
+ --description "Automatically managed zone by kubernetes.io/external-dns"
+```
+Make a note of the nameservers that were assigned to your new zone.
+```shell
+gcloud dns record-sets list \
+ --zone "external-dns-zone" \
+ --name "external-dns-test.my-org.do." \
+ --type NS
+```
+You should see output similar to the following:
+```
+NAME TYPE TTL DATA
+external-dns-test.my-org.do. NS 21600 ns-cloud-e1.googledomains.com.,ns-cloud-e2.googledomains.com.,ns-cloud-e3.googledomains.com.,ns-cloud-e4.googledomains.com.
+```
+In this case, the DNS nameservers are `ns-cloud-{e1-e4}.googledomains.com`.
+Yours could differ slightly, e.g. {a1-a4}, {b1-b4} etc.
+
+If this zone has the parent zone, you need to add NS records of this zone into
+the parent zone so that this zone can be found from the parent.
+Assuming the parent zone is `my-org-do` and the parent domain is `my-org.do`,
+and the parent zone is also hosted at Google Cloud DNS, you can follow these
+steps to add the NS records of this zone into the parent zone:
+```shell
+gcloud dns record-sets transaction start --zone "my-org-do"
+gcloud dns record-sets transaction add ns-cloud-e{1..4}.googledomains.com. \
+ --name "external-dns-test.my-org.do." --ttl 300 --type NS --zone "my-org-do"
+gcloud dns record-sets transaction execute --zone "my-org-do"
+```
+
+### Deploy ExternalDNS
+
+Use the following command to apply the [manifest](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/gke.md#manifest-for-clusters-without-rbac-enabled) to install ExternalDNS
+```shell
+cat <
+EOF
+```
+Note that you need to set the argument `domain-filter` to your custom domain.
+
+You should see ExternalDNS is installed by running:
+```shell
+kubectl get deployment external-dns
+```
+
+### Configuring Knative Gateway service
+
+In order to publish the Knative Gateway service, the annotation
+`external-dns.alpha.kubernetes.io/hostname: '*.external-dns-test.my-org.do'`
+needs to be added into Knative gateway service:
+```shell
+kubectl edit svc knative-ingressgateway -n istio-system
+```
+This command opens your default text editor and allows you to add the
+annotation to `knative-ingressgateway` service. After you've added your
+annotation, your file may look similar to this:
+```
+apiVersion: v1
+kind: Service
+metadata:
+ annotations:
+ external-dns.alpha.kubernetes.io/hostname: '*.external-dns-test.my-org.do'
+ ...
+```
+
+### Verify ExternalDNS works
+
+After roughly two minutes, check that a corresponding DNS record for your
+service was created.
+
+```shell
+gcloud dns record-sets list --zone "external-dns-zone" --name "*.external-dns-test.my-org.do."
+```
+You should see output similar to:
+
+```
+NAME TYPE TTL DATA
+*.external-dns-test.my-org.do. A 300 35.231.248.30
+*.external-dns-test.my-org.do. TXT 300 "heritage=external-dns,external-dns/owner=my-identifier,external-dns/resource=service/istio-system/knative-ingressgateway"
+```
+
+### Verify domain has been published
+
+You can check if the domain has been published to the Internet be entering
+the following command:
+```shell
+host test.external-dns-test.my-org.do
+```
+You should see the below result after the domain is published:
+```
+test.external-dns-test.my-org.do has address 35.231.248.30
+```
+> Note: The process of publishing the domain to the Internet can take several
+minutes.
diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh
index 7798fe58d..12e5c86be 100755
--- a/test/e2e-tests.sh
+++ b/test/e2e-tests.sh
@@ -23,11 +23,7 @@
# Calling this script without arguments will create a new cluster in
# project $PROJECT_ID, run the tests and delete the cluster.
-# Load github.com/knative/test-infra/images/prow-tests/scripts/e2e-tests.sh
-[ -f /workspace/e2e-tests.sh ] \
- && source /workspace/e2e-tests.sh \
- || eval "$(docker run --entrypoint sh gcr.io/knative-tests/test-infra/prow-tests -c 'cat e2e-tests.sh')"
-[ -v KNATIVE_TEST_INFRA ] || exit 1
+source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
# Script entry point.
diff --git a/test/presubmit-tests.sh b/test/presubmit-tests.sh
index 4ede9b244..0aec68778 100755
--- a/test/presubmit-tests.sh
+++ b/test/presubmit-tests.sh
@@ -18,11 +18,7 @@
# It is started by prow for each PR.
# For convenience, it can also be executed manually.
-# Load github.com/knative/test-infra/images/prow-tests/scripts/presubmit-tests.sh
-[ -f /workspace/presubmit-tests.sh ] \
- && source /workspace/presubmit-tests.sh \
- || eval "$(docker run --entrypoint sh gcr.io/knative-tests/test-infra/prow-tests -c 'cat presubmit-tests.sh')"
-[ -v KNATIVE_TEST_INFRA ] || exit 1
+source $(dirname $0)/../vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
function build_tests() {
header "TODO(#67): Write build tests"
diff --git a/vendor/github.com/knative/test-infra/scripts/README.md b/vendor/github.com/knative/test-infra/scripts/README.md
new file mode 100644
index 000000000..5ff9ccbce
--- /dev/null
+++ b/vendor/github.com/knative/test-infra/scripts/README.md
@@ -0,0 +1,3 @@
+# Helper scripts
+
+This directory contains helper scripts used by Prow test jobs, as well and local development scripts.
diff --git a/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh b/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
new file mode 100755
index 000000000..1262f9821
--- /dev/null
+++ b/vendor/github.com/knative/test-infra/scripts/e2e-tests.sh
@@ -0,0 +1,313 @@
+#!/bin/bash
+
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is a helper script for Knative E2E test scripts. To use it:
+# 1. Source this script.
+# 2. [optional] Write the teardown() function, which will tear down your test
+# resources.
+# 3. [optional] Write the dump_extra_cluster_state() function. It will be called
+# when a test fails, and can dump extra information about the current state of
+# the cluster (tipically using kubectl).
+# 4. Call the initialize() function passing $@ (without quotes).
+# 5. Write logic for the end-to-end tests. Run all go tests using report_go_test()
+# and call fail_test() or success() if any of them failed. The envitronment
+# variables DOCKER_REPO_OVERRIDE, K8S_CLUSTER_OVERRIDE and K8S_USER_OVERRIDE
+# will be set accordingly to the test cluster. You can also use the following
+# boolean (0 is false, 1 is true) environment variables for the logic:
+# EMIT_METRICS: true if --emit-metrics is passed.
+# USING_EXISTING_CLUSTER: true if the test cluster is an already existing one,
+# and not a temporary cluster created by kubetest.
+# All environment variables above are marked read-only.
+# Notes:
+# 1. Calling your script without arguments will create a new cluster in the GCP
+# project $PROJECT_ID and run the tests against it.
+# 2. Calling your script with --run-tests and the variables K8S_CLUSTER_OVERRIDE,
+# K8S_USER_OVERRIDE and DOCKER_REPO_OVERRIDE set will immediately start the
+# tests against the cluster.
+
+source $(dirname ${BASH_SOURCE})/library.sh
+
+# Build a resource name based on $E2E_BASE_NAME, a suffix and $BUILD_NUMBER.
+# Restricts the name length to 40 chars (the limit for resource names in GCP).
+# Name will have the form $E2E_BASE_NAME-$BUILD_NUMBER.
+# Parameters: $1 - name suffix
+function build_resource_name() {
+ local prefix=${E2E_BASE_NAME}-$1
+ local suffix=${BUILD_NUMBER}
+ # Restrict suffix length to 20 chars
+ if [[ -n "${suffix}" ]]; then
+ suffix=${suffix:${#suffix}<20?0:-20}
+ fi
+ echo "${prefix:0:20}${suffix}"
+}
+
+# Test cluster parameters
+readonly E2E_BASE_NAME=k$(basename ${REPO_ROOT_DIR})
+readonly E2E_CLUSTER_NAME=$(build_resource_name e2e-cls)
+readonly E2E_NETWORK_NAME=$(build_resource_name e2e-net)
+readonly E2E_CLUSTER_REGION=us-central1
+readonly E2E_CLUSTER_ZONE=${E2E_CLUSTER_REGION}-a
+readonly E2E_CLUSTER_NODES=3
+readonly E2E_CLUSTER_MACHINE=n1-standard-4
+readonly TEST_RESULT_FILE=/tmp/${E2E_BASE_NAME}-e2e-result
+
+# Tear down the test resources.
+function teardown_test_resources() {
+ header "Tearing down test environment"
+ # Free resources in GCP project.
+ if (( ! USING_EXISTING_CLUSTER )) && [[ "$(type -t teardown)" == "function" ]]; then
+ teardown
+ fi
+
+ # Delete Knative Serving images when using prow.
+ if (( IS_PROW )); then
+ echo "Images in ${DOCKER_REPO_OVERRIDE}:"
+ gcloud container images list --repository=${DOCKER_REPO_OVERRIDE}
+ delete_gcr_images ${DOCKER_REPO_OVERRIDE}
+ else
+ # Delete the kubernetes source downloaded by kubetest
+ rm -fr kubernetes kubernetes.tar.gz
+ fi
+}
+
+# Exit test, dumping current state info.
+# Parameters: $1 - error message (optional).
+function fail_test() {
+ [[ -n $1 ]] && echo "ERROR: $1"
+ dump_cluster_state
+ exit 1
+}
+
+# Download the k8s binaries required by kubetest.
+function download_k8s() {
+ local version=${SERVING_GKE_VERSION}
+ if [[ "${version}" == "latest" ]]; then
+ # Fetch latest valid version
+ local versions="$(gcloud container get-server-config \
+ --project=${GCP_PROJECT} \
+ --format='value(validMasterVersions)' \
+ --region=${E2E_CLUSTER_REGION})"
+ local gke_versions=(`echo -n ${versions//;/ /}`)
+ # Get first (latest) version, excluding the "-gke.#" suffix
+ version="${gke_versions[0]%-*}"
+ echo "Latest GKE is ${version}, from [${versions//;/, /}]"
+ elif [[ "${version}" == "default" ]]; then
+ echo "ERROR: `default` GKE version is not supported yet"
+ return 1
+ fi
+ # Download k8s to staging dir
+ version=v${version}
+ local staging_dir=${GOPATH}/src/k8s.io/kubernetes/_output/gcs-stage
+ rm -fr ${staging_dir}
+ staging_dir=${staging_dir}/${version}
+ mkdir -p ${staging_dir}
+ pushd ${staging_dir}
+ export KUBERNETES_PROVIDER=gke
+ export KUBERNETES_RELEASE=${version}
+ curl -fsSL https://get.k8s.io | bash
+ local result=$?
+ if [[ ${result} -eq 0 ]]; then
+ mv kubernetes/server/kubernetes-server-*.tar.gz .
+ mv kubernetes/client/kubernetes-client-*.tar.gz .
+ rm -fr kubernetes
+ # Create an empty kubernetes test tarball; we don't use it but kubetest will fetch it
+ tar -czf kubernetes-test.tar.gz -T /dev/null
+ fi
+ popd
+ return ${result}
+}
+
+# Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too.
+# This is intended to be called when a test fails to provide debugging information.
+function dump_cluster_state() {
+ echo "***************************************"
+ echo "*** TEST FAILED ***"
+ echo "*** Start of information dump ***"
+ echo "***************************************"
+ echo ">>> All resources:"
+ kubectl get all --all-namespaces
+ echo ">>> Services:"
+ kubectl get services --all-namespaces
+ echo ">>> Events:"
+ kubectl get events --all-namespaces
+ [[ "$(type -t dump_extra_cluster_state)" == "function" ]] && dump_extra_cluster_state
+ echo "***************************************"
+ echo "*** TEST FAILED ***"
+ echo "*** End of information dump ***"
+ echo "***************************************"
+}
+
+# Create a test cluster with kubetest and call the current script again.
+function create_test_cluster() {
+ header "Creating test cluster"
+ # Smallest cluster required to run the end-to-end-tests
+ local CLUSTER_CREATION_ARGS=(
+ --gke-create-args="--enable-autoscaling --min-nodes=1 --max-nodes=${E2E_CLUSTER_NODES} --scopes=cloud-platform"
+ --gke-shape={\"default\":{\"Nodes\":${E2E_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}}
+ --provider=gke
+ --deployment=gke
+ --cluster="${E2E_CLUSTER_NAME}"
+ --gcp-zone="${E2E_CLUSTER_ZONE}"
+ --gcp-network="${E2E_NETWORK_NAME}"
+ --gke-environment=prod
+ )
+ if (( ! IS_PROW )); then
+ CLUSTER_CREATION_ARGS+=(--gcp-project=${PROJECT_ID:?"PROJECT_ID must be set to the GCP project where the tests are run."})
+ else
+ CLUSTER_CREATION_ARGS+=(--gcp-service-account=/etc/service-account/service-account.json)
+ fi
+ # SSH keys are not used, but kubetest checks for their existence.
+ # Touch them so if they don't exist, empty files are create to satisfy the check.
+ touch $HOME/.ssh/google_compute_engine.pub
+ touch $HOME/.ssh/google_compute_engine
+ # Clear user and cluster variables, so they'll be set to the test cluster.
+ # DOCKER_REPO_OVERRIDE is not touched because when running locally it must
+ # be a writeable docker repo.
+ export K8S_USER_OVERRIDE=
+ export K8S_CLUSTER_OVERRIDE=
+ # Get the current GCP project
+ export GCP_PROJECT=${PROJECT_ID}
+ [[ -z ${GCP_PROJECT} ]] && export GCP_PROJECT=$(gcloud config get-value project)
+ # Assume test failed (see more details at the end of this script).
+ echo -n "1"> ${TEST_RESULT_FILE}
+ local test_cmd_args="--run-tests"
+ (( EMIT_METRICS )) && test_cmd_args+=" --emit-metrics"
+ echo "Test script is ${E2E_SCRIPT}"
+ download_k8s || return 1
+ kubetest "${CLUSTER_CREATION_ARGS[@]}" \
+ --up \
+ --down \
+ --extract local \
+ --gcp-node-image ${SERVING_GKE_IMAGE} \
+ --test-cmd "${E2E_SCRIPT}" \
+ --test-cmd-args "${test_cmd_args}"
+ echo "Test subprocess exited with code $?"
+ # Delete target pools and health checks that might have leaked.
+ # See https://github.com/knative/serving/issues/959 for details.
+ # TODO(adrcunha): Remove once the leak issue is resolved.
+ local http_health_checks="$(gcloud compute target-pools list \
+ --project=${GCP_PROJECT} --format='value(healthChecks)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \
+ grep httpHealthChecks | tr '\n' ' ')"
+ local target_pools="$(gcloud compute target-pools list \
+ --project=${GCP_PROJECT} --format='value(name)' --filter="instances~-${E2E_CLUSTER_NAME}-" | \
+ tr '\n' ' ')"
+ if [[ -n "${target_pools}" ]]; then
+ echo "Found leaked target pools, deleting"
+ gcloud compute forwarding-rules delete -q --project=${GCP_PROJECT} --region=${E2E_CLUSTER_REGION} ${target_pools}
+ gcloud compute target-pools delete -q --project=${GCP_PROJECT} --region=${E2E_CLUSTER_REGION} ${target_pools}
+ fi
+ if [[ -n "${http_health_checks}" ]]; then
+ echo "Found leaked health checks, deleting"
+ gcloud compute http-health-checks delete -q --project=${GCP_PROJECT} ${http_health_checks}
+ fi
+ local result="$(cat ${TEST_RESULT_FILE})"
+ echo "Test result code is $result"
+ exit ${result}
+}
+
+# Setup the test cluster for running the tests.
+function setup_test_cluster() {
+ # Fail fast during setup.
+ set -o errexit
+ set -o pipefail
+
+ # Set the required variables if necessary.
+ if [[ -z ${K8S_USER_OVERRIDE} ]]; then
+ export K8S_USER_OVERRIDE=$(gcloud config get-value core/account)
+ fi
+
+ if [[ -z ${K8S_CLUSTER_OVERRIDE} ]]; then
+ USING_EXISTING_CLUSTER=0
+ export K8S_CLUSTER_OVERRIDE=$(kubectl config current-context)
+ acquire_cluster_admin_role ${K8S_USER_OVERRIDE} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_ZONE}
+ # Make sure we're in the default namespace. Currently kubetest switches to
+ # test-pods namespace when creating the cluster.
+ kubectl config set-context $K8S_CLUSTER_OVERRIDE --namespace=default
+ fi
+ readonly USING_EXISTING_CLUSTER
+
+ if [[ -z ${DOCKER_REPO_OVERRIDE} ]]; then
+ export DOCKER_REPO_OVERRIDE=gcr.io/$(gcloud config get-value project)/${E2E_BASE_NAME}-e2e-img
+ fi
+
+ echo "- Cluster is ${K8S_CLUSTER_OVERRIDE}"
+ echo "- User is ${K8S_USER_OVERRIDE}"
+ echo "- Docker is ${DOCKER_REPO_OVERRIDE}"
+
+ trap teardown_test_resources EXIT
+
+ if (( USING_EXISTING_CLUSTER )) && [[ "$(type -t teardown)" == "function" ]]; then
+ echo "Deleting any previous SUT instance"
+ teardown
+ fi
+
+ readonly K8S_CLUSTER_OVERRIDE
+ readonly K8S_USER_OVERRIDE
+ readonly DOCKER_REPO_OVERRIDE
+
+ # Handle failures ourselves, so we can dump useful info.
+ set +o errexit
+ set +o pipefail
+}
+
+function success() {
+ # kubetest teardown might fail and thus incorrectly report failure of the
+ # script, even if the tests pass.
+ # We store the real test result to return it later, ignoring any teardown
+ # failure in kubetest.
+ # TODO(adrcunha): Get rid of this workaround.
+ echo -n "0"> ${TEST_RESULT_FILE}
+ echo "**************************************"
+ echo "*** ALL TESTS PASSED ***"
+ echo "**************************************"
+ exit 0
+}
+
+RUN_TESTS=0
+EMIT_METRICS=0
+USING_EXISTING_CLUSTER=1
+E2E_SCRIPT=""
+
+# Parse flags and initialize the test cluster.
+function initialize() {
+ # Normalize calling script path; we can't use readlink because it's not available everywhere
+ E2E_SCRIPT=$0
+ [[ ${E2E_SCRIPT} =~ ^[\./].* ]] || E2E_SCRIPT="./$0"
+ E2E_SCRIPT="$(cd ${E2E_SCRIPT%/*} && echo $PWD/${E2E_SCRIPT##*/})"
+ readonly E2E_SCRIPT
+
+ cd ${REPO_ROOT_DIR}
+ for parameter in $@; do
+ case $parameter in
+ --run-tests) RUN_TESTS=1 ;;
+ --emit-metrics) EMIT_METRICS=1 ;;
+ *)
+ echo "error: unknown option ${parameter}"
+ echo "usage: $0 [--run-tests][--emit-metrics]"
+ exit 1
+ ;;
+ esac
+ shift
+ done
+ readonly RUN_TESTS
+ readonly EMIT_METRICS
+
+ if (( ! RUN_TESTS )); then
+ create_test_cluster
+ else
+ setup_test_cluster
+ fi
+}
diff --git a/vendor/github.com/knative/test-infra/scripts/library.sh b/vendor/github.com/knative/test-infra/scripts/library.sh
new file mode 100755
index 000000000..d9a2e0bdb
--- /dev/null
+++ b/vendor/github.com/knative/test-infra/scripts/library.sh
@@ -0,0 +1,309 @@
+#!/bin/bash
+
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is a collection of useful bash functions and constants, intended
+# to be used in test scripts and the like. It doesn't do anything when
+# called from command line.
+
+# Default GKE version to be used with Knative Serving
+readonly SERVING_GKE_VERSION=latest
+readonly SERVING_GKE_IMAGE=cos
+
+# Public images and yaml files.
+readonly KNATIVE_ISTIO_YAML=https://storage.googleapis.com/knative-releases/serving/latest/istio.yaml
+readonly KNATIVE_SERVING_RELEASE=https://storage.googleapis.com/knative-releases/serving/latest/release.yaml
+readonly KNATIVE_BUILD_RELEASE=https://storage.googleapis.com/knative-releases/build/latest/release.yaml
+readonly KNATIVE_EVENTING_RELEASE=https://storage.googleapis.com/knative-releases/eventing/latest/release.yaml
+
+# Useful environment variables
+[[ -n "${PROW_JOB_ID}" ]] && IS_PROW=1 || IS_PROW=0
+readonly IS_PROW
+readonly REPO_ROOT_DIR="$(git rev-parse --show-toplevel)"
+
+# Display a box banner.
+# Parameters: $1 - character to use for the box.
+# $2 - banner message.
+function make_banner() {
+ local msg="$1$1$1$1 $2 $1$1$1$1"
+ local border="${msg//[-0-9A-Za-z _.,]/$1}"
+ echo -e "${border}\n${msg}\n${border}"
+}
+
+# Simple header for logging purposes.
+function header() {
+ local upper="$(echo $1 | tr a-z A-Z)"
+ make_banner "=" "${upper}"
+}
+
+# Simple subheader for logging purposes.
+function subheader() {
+ make_banner "-" "$1"
+}
+
+# Simple warning banner for logging purposes.
+function warning() {
+ make_banner "!" "$1"
+}
+
+# Remove ALL images in the given GCR repository.
+# Parameters: $1 - GCR repository.
+function delete_gcr_images() {
+ for image in $(gcloud --format='value(name)' container images list --repository=$1); do
+ echo "Checking ${image} for removal"
+ delete_gcr_images ${image}
+ for digest in $(gcloud --format='get(digest)' container images list-tags ${image} --limit=99999); do
+ local full_image="${image}@${digest}"
+ echo "Removing ${full_image}"
+ gcloud container images delete -q --force-delete-tags ${full_image}
+ done
+ done
+}
+
+# Waits until the given object doesn't exist.
+# Parameters: $1 - the kind of the object.
+# $2 - object's name.
+# $3 - namespace (optional).
+function wait_until_object_does_not_exist() {
+ local KUBECTL_ARGS="get $1 $2"
+ local DESCRIPTION="$1 $2"
+
+ if [[ -n $3 ]]; then
+ KUBECTL_ARGS="get -n $3 $1 $2"
+ DESCRIPTION="$1 $3/$2"
+ fi
+ echo -n "Waiting until ${DESCRIPTION} does not exist"
+ for i in {1..150}; do # timeout after 5 minutes
+ kubectl ${KUBECTL_ARGS} 2>&1 > /dev/null || return 0
+ echo -n "."
+ sleep 2
+ done
+ echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist"
+ kubectl ${KUBECTL_ARGS}
+ return 1
+}
+
+# Waits until all pods are running in the given namespace.
+# Parameters: $1 - namespace.
+function wait_until_pods_running() {
+ echo -n "Waiting until all pods in namespace $1 are up"
+ for i in {1..150}; do # timeout after 5 minutes
+ local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)"
+ # All pods must be running
+ local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l)
+ if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then
+ local all_ready=1
+ while read pod ; do
+ local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`)
+ # All containers must be ready
+ [[ -z ${status[0]} ]] && all_ready=0 && break
+ [[ -z ${status[1]} ]] && all_ready=0 && break
+ [[ ${status[0]} -lt 1 ]] && all_ready=0 && break
+ [[ ${status[1]} -lt 1 ]] && all_ready=0 && break
+ [[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break
+ done <<< $(echo "${pods}" | grep -v Completed)
+ if (( all_ready )); then
+ echo -e "\nAll pods are up:\n${pods}"
+ return 0
+ fi
+ fi
+ echo -n "."
+ sleep 2
+ done
+ echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}"
+ kubectl get pods -n $1
+ return 1
+}
+
+# Waits until the given service has an external IP address.
+# Parameters: $1 - namespace.
+# $2 - service name.
+function wait_until_service_has_external_ip() {
+ echo -n "Waiting until service $2 in namespace $1 has an external IP"
+ for i in {1..150}; do # timeout after 15 minutes
+ local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}")
+ if [[ -n "${ip}" ]]; then
+ echo -e "\nService $2.$1 has IP $ip"
+ return 0
+ fi
+ echo -n "."
+ sleep 6
+ done
+ echo -e "\n\nERROR: timeout waiting for service $svc.$ns to have an external IP"
+ kubectl get pods -n $1
+ return 1
+}
+
+# Returns the name of the pod of the given app.
+# Parameters: $1 - app name.
+# $2 - namespace (optional).
+function get_app_pod() {
+ local namespace=""
+ [[ -n $2 ]] && namespace="-n $2"
+ kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[0].metadata.name}"
+}
+
+# Sets the given user as cluster admin.
+# Parameters: $1 - user
+# $2 - cluster name
+# $3 - cluster zone
+function acquire_cluster_admin_role() {
+ # Get the password of the admin and use it, as the service account (or the user)
+ # might not have the necessary permission.
+ local password=$(gcloud --format="value(masterAuth.password)" \
+ container clusters describe $2 --zone=$3)
+ kubectl config set-credentials cluster-admin \
+ --username=admin --password=${password}
+ kubectl config set-context $(kubectl config current-context) \
+ --user=cluster-admin
+ kubectl create clusterrolebinding cluster-admin-binding \
+ --clusterrole=cluster-admin \
+ --user=$1
+ # Reset back to the default account
+ gcloud container clusters get-credentials \
+ $2 --zone=$3 --project $(gcloud config get-value project)
+}
+
+# Runs a go test and generate a junit summary through bazel.
+# Parameters: $1... - parameters to go test
+function report_go_test() {
+ # Just run regular go tests if not on Prow.
+ if (( ! IS_PROW )); then
+ go test $@
+ return
+ fi
+ local report=$(mktemp)
+ local summary=$(mktemp)
+ local failed=0
+ # Run tests in verbose mode to capture details.
+ # go doesn't like repeating -v, so remove if passed.
+ local args=("${@/-v}")
+ go test -race -v ${args[@]} > ${report} || failed=$?
+ # Tests didn't run.
+ [[ ! -s ${report} ]] && return 1
+ # Create WORKSPACE file, required to use bazel
+ touch WORKSPACE
+ local targets=""
+ # Parse the report and generate fake tests for each passing/failing test.
+ while read line ; do
+ local fields=(`echo -n ${line}`)
+ local field0="${fields[0]}"
+ local field1="${fields[1]}"
+ local name=${fields[2]}
+ # Ignore subtests (those containing slashes)
+ if [[ -n "${name##*/*}" ]]; then
+ if [[ ${field1} == PASS: || ${field1} == FAIL: ]]; then
+ # Populate BUILD.bazel
+ local src="${name}.sh"
+ echo "exit 0" > ${src}
+ if [[ ${field1} == "FAIL:" ]]; then
+ read error
+ echo "cat < ${src}
+ echo "${error}" >> ${src}
+ echo "ERROR-EOF" >> ${src}
+ echo "exit 1" >> ${src}
+ fi
+ chmod +x ${src}
+ echo "sh_test(name=\"${name}\", srcs=[\"${src}\"])" >> BUILD.bazel
+ elif [[ ${field0} == FAIL || ${field0} == ok ]]; then
+ # Update the summary with the result for the package
+ echo "${line}" >> ${summary}
+ # Create the package structure, move tests and BUILD file
+ local package=${field1/github.com\//}
+ mkdir -p ${package}
+ targets="${targets} //${package}/..."
+ mv *.sh BUILD.bazel ${package}
+ fi
+ fi
+ done < ${report}
+ # If any test failed, show the detailed report.
+ # Otherwise, just show the summary.
+ # Exception: when emitting metrics, dump the full report.
+ if (( failed )) || [[ "$@" == *" -emitmetrics"* ]]; then
+ cat ${report}
+ else
+ cat ${summary}
+ fi
+ # Always generate the junit summary.
+ bazel test ${targets} > /dev/null 2>&1
+ return ${failed}
+}
+
+# Install the latest stable Knative/serving in the current cluster.
+function start_latest_knative_serving() {
+ header "Starting Knative Serving"
+ subheader "Installing Istio"
+ kubectl apply -f ${KNATIVE_ISTIO_YAML} || return 1
+ wait_until_pods_running istio-system || return 1
+ kubectl label namespace default istio-injection=enabled || return 1
+ subheader "Installing Knative Serving"
+ kubectl apply -f ${KNATIVE_SERVING_RELEASE} || return 1
+ wait_until_pods_running knative-serving || return 1
+ wait_until_pods_running knative-build || return 1
+}
+
+# Install the latest stable Knative/build in the current cluster.
+function start_latest_knative_build() {
+ header "Starting Knative Build"
+ subheader "Installing Istio"
+ kubectl apply -f ${KNATIVE_ISTIO_YAML} || return 1
+ wait_until_pods_running istio-system || return 1
+ subheader "Installing Knative Build"
+ kubectl apply -f ${KNATIVE_BUILD_RELEASE} || return 1
+ wait_until_pods_running knative-build || return 1
+}
+
+# Run dep-collector, installing it first if necessary.
+# Parameters: $1..$n - parameters passed to dep-collector.
+function run_dep_collector() {
+ local local_dep_collector="$(which dep-collector)"
+ if [[ -z ${local_dep_collector} ]]; then
+ go get -u github.com/mattmoor/dep-collector
+ fi
+ dep-collector $@
+}
+
+# Run dep-collector to update licenses.
+# Parameters: $1 - output file, relative to repo root dir.
+# $2...$n - directories and files to inspect.
+function update_licenses() {
+ cd ${REPO_ROOT_DIR} || return 1
+ local dst=$1
+ shift
+ run_dep_collector $@ > ./${dst}
+}
+
+# Run dep-collector to check for forbidden liceses.
+# Parameters: $1...$n - directories and files to inspect.
+function check_licenses() {
+ # Fetch the google/licenseclassifier for its license db
+ go get -u github.com/google/licenseclassifier
+ # Check that we don't have any forbidden licenses in our images.
+ run_dep_collector -check $@
+}
+
+# Check links in all .md files in the repo.
+function check_links_in_markdown() {
+ local checker="markdown-link-check"
+ if ! hash ${checker} 2>/dev/null; then
+ warning "${checker} not installed, not checking links in .md files"
+ return 0
+ fi
+ local failed=0
+ for md_file in $(find ${REPO_ROOT_DIR} -name \*.md); do
+ ${checker} -q ${md_file} || failed=1
+ done
+ return ${failed}
+}
diff --git a/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh b/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
new file mode 100755
index 000000000..384e3c5ae
--- /dev/null
+++ b/vendor/github.com/knative/test-infra/scripts/presubmit-tests.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is a helper script to run the presubmit tests. To use it:
+# 1. Source this script.
+# 2. Define the functions build_tests(), unit_tests() and
+# integration_tests(). They should run all tests (i.e., not fail
+# fast), and return 0 if all passed, 1 if a failure occurred.
+# The environment variables RUN_BUILD_TESTS, RUN_UNIT_TESTS and
+# RUN_INTEGRATION_TESTS are set to 0 (false) or 1 (true) accordingly.
+# If --emit-metrics is passed, EMIT_METRICS will be set to 1.
+# 3. Call the main() function passing $@ (without quotes).
+#
+# Running the script without parameters, or with the --all-tests
+# flag, causes all tests to be executed, in the right order.
+# Use the flags --build-tests, --unit-tests and --integration-tests
+# to run a specific set of tests. The flag --emit-metrics is used
+# to emit metrics when running the tests.
+
+source $(dirname ${BASH_SOURCE})/library.sh
+
+# Extensions or file patterns that don't require presubmit tests.
+readonly NO_PRESUBMIT_FILES=(\.md \.png ^OWNERS)
+
+# Options set by command-line flags.
+RUN_BUILD_TESTS=0
+RUN_UNIT_TESTS=0
+RUN_INTEGRATION_TESTS=0
+EMIT_METRICS=0
+
+# Exit presubmit tests if only documentation files were changed.
+function exit_if_presubmit_not_required() {
+ if [[ -n "${PULL_PULL_SHA}" ]]; then
+ # On a presubmit job
+ local changes="$(git diff --name-only ${PULL_PULL_SHA} ${PULL_BASE_SHA})"
+ local no_presubmit_pattern="${NO_PRESUBMIT_FILES[*]}"
+ local no_presubmit_pattern="\(${no_presubmit_pattern// /\\|}\)$"
+ echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${changes}"
+ if [[ -z "$(echo "${changes}" | grep -v ${no_presubmit_pattern})" ]]; then
+ # Nothing changed other than files that don't require presubmit tests
+ header "Commit only contains changes that don't affect tests, skipping"
+ exit 0
+ fi
+ fi
+}
+
+# Process flags and run tests accordingly.
+function main() {
+ exit_if_presubmit_not_required
+
+ local all_parameters=$@
+ [[ -z $1 ]] && all_parameters="--all-tests"
+
+ for parameter in ${all_parameters}; do
+ case ${parameter} in
+ --all-tests)
+ RUN_BUILD_TESTS=1
+ RUN_UNIT_TESTS=1
+ RUN_INTEGRATION_TESTS=1
+ shift
+ ;;
+ --build-tests)
+ RUN_BUILD_TESTS=1
+ shift
+ ;;
+ --unit-tests)
+ RUN_UNIT_TESTS=1
+ shift
+ ;;
+ --integration-tests)
+ RUN_INTEGRATION_TESTS=1
+ shift
+ ;;
+ --emit-metrics)
+ EMIT_METRICS=1
+ shift
+ ;;
+ *)
+ echo "error: unknown option ${parameter}"
+ exit 1
+ ;;
+ esac
+ done
+
+ readonly RUN_BUILD_TESTS
+ readonly RUN_UNIT_TESTS
+ readonly RUN_INTEGRATION_TESTS
+ readonly EMIT_METRICS
+
+ cd ${REPO_ROOT_DIR}
+
+ # Tests to be performed, in the right order if --all-tests is passed.
+
+ local result=0
+ if (( RUN_BUILD_TESTS )); then
+ build_tests || result=1
+ fi
+ if (( RUN_UNIT_TESTS )); then
+ unit_tests || result=1
+ fi
+ if (( RUN_INTEGRATION_TESTS )); then
+ integration_tests || result=1
+ fi
+ exit ${result}
+}
diff --git a/vendor/github.com/knative/test-infra/scripts/release.sh b/vendor/github.com/knative/test-infra/scripts/release.sh
new file mode 100755
index 000000000..abf89ddf4
--- /dev/null
+++ b/vendor/github.com/knative/test-infra/scripts/release.sh
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+# Copyright 2018 The Knative Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This is a helper script for Knative release scripts. To use it:
+# 1. Source this script.
+# 2. Call the parse_flags() function passing $@ (without quotes).
+# 3. Call the run_validation_tests() passing the script or executable that
+# runs the release validation tests.
+# 4. Write logic for the release process. Use the following boolean (0 is
+# false, 1 is true) environment variables for the logic:
+# SKIP_TESTS: true if --skip-tests is passed. This is handled automatically
+# by the run_validation_tests() function.
+# TAG_RELEASE: true if --tag-release is passed. In this case, the
+# environment variable TAG will contain the release tag in the
+# form vYYYYMMDD-.
+# PUBLISH_RELEASE: true if --publish is passed. In this case, the environment
+# variable KO_FLAGS will be updated with the -L option.
+# SKIP_TESTS, TAG_RELEASE and PUBLISH_RELEASE default to false for safety.
+# All environment variables above, except KO_FLAGS, are marked read-only once
+# parse_flags() is called.
+
+source $(dirname ${BASH_SOURCE})/library.sh
+
+# Simple banner for logging purposes.
+function banner() {
+ make_banner "@" "$1"
+}
+
+# Tag images in the yaml file with a tag. If not tag is passed, does nothing.
+# Parameters: $1 - yaml file to parse for images.
+# $2 - registry where the images are stored.
+# $3 - tag to apply (optional).
+function tag_images_in_yaml() {
+ [[ -z $3 ]] && return 0
+ echo "Tagging images with $3"
+ for image in $(grep -o "$2/[a-z\./-]\+@sha256:[0-9a-f]\+" $1); do
+ gcloud -q container images add-tag ${image} ${image%%@*}:$3
+ done
+}
+
+# Copy the given yaml file to a GCS bucket. Image is tagged :latest, and optionally :$2.
+# Parameters: $1 - yaml file to copy.
+# $2 - destination bucket name.
+# $3 - tag to apply (optional).
+function publish_yaml() {
+ gsutil cp $1 gs://$2/latest/
+ [[ -n $3 ]] && gsutil cp $1 gs://$2/previous/$3/
+}
+
+SKIP_TESTS=0
+TAG_RELEASE=0
+PUBLISH_RELEASE=0
+TAG=""
+KO_FLAGS="-P -L"
+
+# Parses flags and sets environment variables accordingly.
+function parse_flags() {
+ cd ${REPO_ROOT_DIR}
+ for parameter in $@; do
+ case $parameter in
+ --skip-tests) SKIP_TESTS=1 ;;
+ --tag-release) TAG_RELEASE=1 ;;
+ --notag-release) TAG_RELEASE=0 ;;
+ --publish)
+ PUBLISH_RELEASE=1
+ # Remove -L from ko flags
+ KO_FLAGS="${KO_FLAGS/-L}"
+ ;;
+ --nopublish)
+ PUBLISH_RELEASE=0
+ # Add -L to ko flags
+ KO_FLAGS="-L ${KO_FLAGS}"
+ shift
+ ;;
+ *)
+ echo "error: unknown option ${parameter}"
+ exit 1
+ ;;
+ esac
+ shift
+ done
+
+ TAG=""
+ if (( TAG_RELEASE )); then
+ # Currently we're not considering the tags in refs/tags namespace.
+ commit=$(git describe --always --dirty)
+ # Like kubernetes, image tag is vYYYYMMDD-commit
+ TAG="v$(date +%Y%m%d)-${commit}"
+ fi
+
+ readonly SKIP_TESTS
+ readonly TAG_RELEASE
+ readonly PUBLISH_RELEASE
+ readonly TAG
+}
+
+# Run tests (unless --skip-tests was passed). Conveniently displays a banner indicating so.
+# Parameters: $1 - executable that runs the tests.
+function run_validation_tests() {
+ if (( ! SKIP_TESTS )); then
+ banner "Running release validation tests"
+ # Run tests.
+ $1
+ fi
+}