Migrate envoy api v3 (#116)

Signed-off-by: Marcos Yacob <marcos.yacob@hpe.com>
This commit is contained in:
Marcos Yacob 2023-06-02 12:42:20 -03:00 committed by GitHub
parent 5c56598fba
commit fc623ee0ad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
57 changed files with 1555 additions and 422 deletions

View File

@ -11,7 +11,7 @@ env:
TERM: xterm
jobs:
test-all:
runs-on: ubuntu-18.04
runs-on: ubuntu-20.04
timeout-minutes: 30
steps:
- name: Checkout

6
k8s/envoy-jwt-auth-helper/.gitignore vendored Normal file
View File

@ -0,0 +1,6 @@
# Binary
envoy-jwt-auth-helper
# Editor specific configuration
.idea
.vscode

View File

@ -0,0 +1,15 @@
FROM golang:latest as build-stage
WORKDIR /app
COPY . .
RUN go mod download
RUN go build
FROM debian:buster-slim as production-stage
RUN apt update && DEBIAN_FRONTEND=noninteractive apt full-upgrade -y && \
apt install -y dumb-init iputils-ping curl procps
RUN mkdir /opt/helper
COPY --from=build-stage /app/envoy-jwt-auth-helper /opt/helper
ENTRYPOINT ["/usr/bin/dumb-init", "/opt/helper/envoy-jwt-auth-helper"]
CMD []

View File

@ -0,0 +1,70 @@
# Envoy JWT Auth Helper
Simple gRPC service that implements [Envoy's External Authorization Filter](https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/http/ext_authz/v3/ext_authz.proto#envoy-v3-api-msg-extensions-filters-http-ext-authz-v3-extauthz).
_Envoy JWT Auth Helper_ needs to be configured as an External Authorization filter for Envoy. Then, for every HTTP request sent to the Envoy forward proxy, it obtains a JWT-SVID from the SPIRE Agent and inject it as a new request header. Finally the request is sent back to Envoy.
On the other side, when the HTTP request arrives at the reverse proxy, the Envoy External Authorization module send the request to the _Envoy JWT Auth Helper_ which extracts the JWT-SVID from the header and connect to the SPIRE Agent to perform the validation. Once validated, the request is sent back to Envoy. If validation fails the request is denied.
## Modes
This simple authentication server supports 2 modes:
### jwt_injection
Connects to the SPIRE Agent to fetch a JWT-SVID which then is injected it into the request as a new header.
### jwt_svid_validator
Extracts the added header from the request and connects to the SPIRE Agent to validate it.
## Build
```console
go build
```
## Run:
```console
./envoy-jwt-auth-helper -config envoy-jwt-auth-helper.conf
```
## Configuration example:
```
socket_path = "unix:///tmp/agent.sock"
host = "127.0.0.1"
port = 9010
jwt_mode = "jwt_svid_validator"
audience = "spiffe://example.org/myservice"
```
## As Envoy External Authorization filter
Include an External Authorization Filter in the Envoy configuration that connects to the service. This is accomplish by adding a new HTTP filter:
``` console
http_filters:
- name: envoy.ext_authz
config:
grpc_service:
envoy_grpc:
cluster_name: ext-authz
timeout: 0.5s
```
And the corresponding cluster:
``` console
- name: ext-authz
connect_timeout: 1s
type: strict_dns
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 9010
```
Note that the cluster is configured to talk to `127.0.0.1:9010`, the host and port set on the [configuration example](#configuration-example).

View File

@ -0,0 +1,16 @@
# Path to the domain socket used to communicate with the Workload API
socket_path = "unix:///run/spire/sockets/agent.sock"
# Host where the app will be listening
host = "127.0.0.1"
# Port where the app will be listening
port = 9010
# Options: "jwt_injection", "jwt_svid_validator"
jwt_mode = "jwt_svid_validator"
# JWT audience value
# Used in:
# - AUTH module: jwt_injection (for JWT injection, set in the JWT-SVID)
# - AUTH module: jwt_svid_validator (for JWT validation, compared against the JWT-SVID)
audience = "spiffe://example.org/myservice"

View File

@ -0,0 +1,29 @@
module github.com/spiffe/envoy-jwt-auth-helper
go 1.20
require (
github.com/envoyproxy/go-control-plane v0.11.0
github.com/gogo/googleapis v1.4.1
github.com/golang/protobuf v1.5.3
github.com/hashicorp/hcl v1.0.0
github.com/spiffe/go-spiffe/v2 v2.1.3
google.golang.org/genproto v0.0.0-20230327215041-6ac7f18bb9d5
google.golang.org/grpc v1.54.0
)
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 // indirect
github.com/envoyproxy/protoc-gen-validate v0.10.1 // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/zeebo/errs v1.3.0 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
)

View File

@ -0,0 +1,125 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195 h1:58f1tJ1ra+zFINPlwLWvQsR9CzAKt2e+EWV2yX9oXQ4=
github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.11.0 h1:jtLewhRR2vMRNnq2ZZUoCjUlgut+Y0+sDDWPOfwOi1o=
github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8=
github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/spiffe/go-spiffe/v2 v2.1.3 h1:P5L9Ixo5eqJiHnktAU0UD/6UfHsQs7yAtc8a/FFUi9M=
github.com/spiffe/go-spiffe/v2 v2.1.3/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20230327215041-6ac7f18bb9d5 h1:Kd6tRRHXw8z4TlPlWi+NaK10gsePL6GdZBQChptOLGA=
google.golang.org/genproto v0.0.0-20230327215041-6ac7f18bb9d5/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -0,0 +1,52 @@
package main
import (
"context"
"flag"
"log"
"net"
"strconv"
auth "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3"
authExternal "github.com/spiffe/envoy-jwt-auth-helper/pkg/auth"
"github.com/spiffe/envoy-jwt-auth-helper/pkg/config"
"github.com/spiffe/go-spiffe/v2/workloadapi"
"google.golang.org/grpc"
)
func main() {
configFilePath := flag.String("config", "envoy-jwt-auth-helper.conf", "Path to configuration file")
flag.Parse()
c, err := config.ParseConfigFile(*configFilePath)
if err != nil {
log.Fatalf("Error parsing configuration file: %v", err)
}
lis, err := net.Listen("tcp", net.JoinHostPort(c.Host, strconv.Itoa(c.Port)))
if err != nil {
log.Fatalf("Failed to listen: %v", err)
}
s := grpc.NewServer([]grpc.ServerOption{grpc.MaxConcurrentStreams(10)}...)
// Create options to configure Sources to use socket path passed via config file.
clientOptions := workloadapi.WithClientOptions(workloadapi.WithAddr(c.SocketPath))
// Create a JWTSource to validate provided tokens from clients
jwtSource, err := workloadapi.NewJWTSource(context.Background(), clientOptions)
if err != nil {
log.Fatalf("Unable to create JWTSource: %v", err)
}
defer jwtSource.Close()
authExternal, err := authExternal.NewAuthServer(c.SocketPath, c.Audience, c.JWTMode, jwtSource)
if err != nil {
log.Fatalf("Error creating AuthServer: %v", err)
}
auth.RegisterAuthorizationServer(s, authExternal)
log.Printf("Starting gRPC Server at %d", c.Port)
s.Serve(lis)
}

View File

@ -0,0 +1,184 @@
package auth
import (
"context"
"fmt"
"log"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
auth "github.com/envoyproxy/go-control-plane/envoy/service/auth/v3"
envoy_type "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"github.com/gogo/googleapis/google/rpc"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/spiffe/go-spiffe/v2/svid/jwtsvid"
"github.com/spiffe/go-spiffe/v2/workloadapi"
rpcstatus "google.golang.org/genproto/googleapis/rpc/status"
)
// Mode type will define how this service will behave
type Mode int
const (
// JWTInjection Mode will insert JWT in header
JWTInjection Mode = 1 + iota
// JWTSvidValidator Mode will validate JWT header
JWTSvidValidator
)
func (m Mode) String() string {
switch m {
case JWTInjection:
return "jwt_injection"
case JWTSvidValidator:
return "jwt_svid_validator"
default:
return fmt.Sprintf("UNKNOWN(%d)", m)
}
}
// Config auth server config
type Config struct {
// JWT Source used to verify token
jwtSource *workloadapi.JWTSource
// Expected audiences
audience string
// Defines how this service will behave
mode Mode
}
// AuthServer implements auth.AuthorizationServer interface
type AuthServer struct {
config *Config
}
// NewAuthServer creates a new Auth server according to the given config
func NewAuthServer(socketPath string, audience string, mode string, jwtSource *workloadapi.JWTSource) (*AuthServer, error) {
var config = &Config{
jwtSource: jwtSource,
audience: audience,
}
if mode != "" {
var err error
config.mode, err = parseJWTMode(mode)
if err != nil {
return nil, err
}
}
log.Printf("Auth Server running in %s mode", config.mode)
return &AuthServer{
config: config,
}, nil
}
// Check check
func (a *AuthServer) Check(ctx context.Context, req *auth.CheckRequest) (*auth.CheckResponse, error) {
authHeader, ok := req.Attributes.Request.Http.Headers["authorization"]
switch a.config.mode {
case JWTInjection:
if authHeader != "" {
log.Printf("%v", fmt.Errorf("Request already contains an authorization header. Verify mode if expected mode is %s", a.config.mode))
return forbiddenResponse("PERMISSION_DENIED"), nil
}
return a.injectJWTSVID(ctx)
case JWTSvidValidator:
var fields []string
if ok {
fields = strings.Split(authHeader, "Bearer ")
}
if len(fields) != 2 {
log.Printf("Invalid or unsupported authorization header: %s", fields)
return forbiddenResponse("Invalid or unsupported authorization header"), nil
}
token := fields[1]
return a.validateJWTSVID(ctx, token)
default:
err := fmt.Errorf("Unknown server mode: %s", a.config.mode)
log.Printf("Error selecting server mode. %v", err)
return nil, err
}
}
func (a *AuthServer) validateJWTSVID(ctx context.Context, token string) (*auth.CheckResponse, error) {
// Parse and validate token against fetched bundle from jwtSource,
_, err := jwtsvid.ParseAndValidate(token, a.config.jwtSource, []string{a.config.audience})
if err != nil {
log.Printf("Invalid token: %v\n", err)
return forbiddenResponse("PERMISSION_DENIED"), nil
}
log.Printf("Token is valid")
return okResponse(), nil
}
func (a *AuthServer) injectJWTSVID(ctx context.Context) (*auth.CheckResponse, error) {
jwtSVID, err := a.config.jwtSource.FetchJWTSVID(ctx, jwtsvid.Params{
Audience: a.config.audience,
})
if err != nil {
log.Printf("Unable to fetch SVID: %v", err)
return forbiddenResponse("PERMISSION_DENIED"), nil
}
response := &auth.CheckResponse{}
headers := []*core.HeaderValueOption{
{
Append: &wrappers.BoolValue{
Value: false, //Default is true
},
Header: &core.HeaderValue{
Key: "authorization",
Value: fmt.Sprintf("Bearer %s", jwtSVID.Marshal()),
},
},
}
response.HttpResponse = &auth.CheckResponse_OkResponse{
OkResponse: &auth.OkHttpResponse{
Headers: headers,
},
}
log.Printf("JWT-SVID injected. Sending response with %v new headers\n", len(response.GetOkResponse().Headers))
return response, nil
}
func parseJWTMode(mode string) (Mode, error) {
switch strings.ToLower(mode) {
case "jwt_injection":
return JWTInjection, nil
case "jwt_svid_validator":
return JWTSvidValidator, nil
}
return 0, fmt.Errorf("Unknown mode %s. Must be one of: jwt_injection, jwt_svid_validator", mode)
}
func okResponse() *auth.CheckResponse {
return &auth.CheckResponse{
Status: &rpcstatus.Status{
Code: int32(rpc.OK),
},
HttpResponse: &auth.CheckResponse_OkResponse{
OkResponse: &auth.OkHttpResponse{},
},
}
}
func forbiddenResponse(format string, args ...interface{}) *auth.CheckResponse {
return &auth.CheckResponse{
Status: &rpcstatus.Status{
Code: int32(rpc.PERMISSION_DENIED),
},
HttpResponse: &auth.CheckResponse_DeniedResponse{
DeniedResponse: &auth.DeniedHttpResponse{
Status: &envoy_type.HttpStatus{
Code: envoy_type.StatusCode_Forbidden,
},
Body: fmt.Sprintf(format, args...),
},
},
}
}

View File

@ -0,0 +1,43 @@
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/hashicorp/hcl"
)
// Config available configurations
type Config struct {
SocketPath string `hcl:"socket_path"`
Host string `hcl:"host"`
Port int `hcl:"port"`
JWTMode string `hcl:"jwt_mode"`
Audience string `hcl:"audience"`
}
//ParseConfigFile parse config file
func ParseConfigFile(filePath string) (*Config, error) {
data, err := ioutil.ReadFile(filePath)
if err != nil {
if os.IsNotExist(err) {
msg := "could not find config file %s: please use the -config flag"
p, err := filepath.Abs(filePath)
if err != nil {
p = filePath
msg = "config file not found at %s: use -config"
}
return nil, fmt.Errorf(msg, p)
}
return nil, err
}
c := new(Config)
if err := hcl.Decode(c, string(data)); err != nil {
return nil, fmt.Errorf("unable to decode configuration: %v", err)
}
return c, nil
}

View File

@ -6,7 +6,7 @@
This tutorial builds on the [SPIRE Envoy-JWT Tutorial](../envoy-jwt/README.md) to demonstrate how to combine SPIRE, Envoy and OPA to perform JWT SVID authentication and request authorization. The changes required to implement request authorization with OPA are shown here as a delta to that tutorial, so you should run, or at least read through, the SPIRE Envoy-JWT tutorial first.
To illustrate request authorization with OPA, we add a new sidecar to the backend service used in the SPIRE Envoy JWT tutorial. The new sidecar acts as a new [External Authorization Filter](https://www.envoyproxy.io/docs/envoy/v1.14.1/intro/arch_overview/security/ext_authz_filter#arch-overview-ext-authz) for Envoy.
To illustrate request authorization with OPA, we add a new sidecar to the backend service used in the SPIRE Envoy JWT tutorial. The new sidecar acts as a new [External Authorization Filter](https://www.envoyproxy.io/docs/envoy/v1.25.1/intro/arch_overview/security/ext_authz_filter#arch-overview-ext-authz) for Envoy.
![SPIRE Envoy-JWT with OPA integration diagram][diagram]
@ -22,9 +22,41 @@ In this tutorial you will learn how to:
* Add an External Authorization Filter to the Envoy configuration that connects Envoy to OPA
* Test successful JWT authentication using SPIRE plus OPA authorization
# Prerequisites
## External IP support
This tutorial requires a LoadBalancer that can assign an external IP (e.g., [metallb](https://metallb.universe.tf/))
```console
$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml
```
Wait until metallb has started
```console
$ kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=app=metallb \
--timeout=90s
```
Apply metallb configuration
```console
$ kubectl apply -f ../envoy-x509/metallb-config.yaml
```
## Auth helper image
An External Authorization filter is implemented using [Envoy-jwt-auth-helper](../envoy-jwt-auth-helper),
An script is provided to facilitate building and import using `kind` or `minikube`
``` console
$ bash ./scripts/build-helper.sh kind
```
## Previous SPIRE instalation
Before proceeding, review the following:
* You'll need access to the Kubernetes environment configured when going through the [SPIRE Envoy-JWT Tutorial](../envoy-jwt/README.md). Optionally, you can create the Kubernetes environment with the `pre-set-env.sh` script described just below.
@ -40,7 +72,6 @@ The script will create all the resources needed for the SPIRE Server and SPIRE A
**Note:** The configuration changes needed to enable Envoy and OPA to work with SPIRE are shown as snippets in this tutorial. However, all of these settings have already been configured. You don't have to edit any configuration files.
# Part 1: Deploy Updated and New Resources
Assuming the SPIRE Envoy JWT Tutorial as a starting point, there are some resources that need to be created.
@ -50,12 +81,12 @@ The solution applied in this tutorial consists of adding a new External Authoriz
## Update Deployments
In order to let OPA authorize or reject requests coming to the `backend` service it is necessary to add OPA as a sidecar to the deployment.
We use the `openpolicyagent/opa:0.24.0-envoy-5` image which extends OPA with a gRPC server that implements the Envoy External Authorization API so OPA can communicate policy decisions with Envoy. The new container is added and configured as follows in [`backend-deployment.yaml`](k8s/backend/backend-deployment.yaml):
We use the `openpolicyagent/opa:0.50.2-envoy` image which extends OPA with a gRPC server that implements the Envoy External Authorization API so OPA can communicate policy decisions with Envoy. The new container is added and configured as follows in [`backend-deployment.yaml`](k8s/backend/backend-deployment.yaml):
```console
- name: opa
image: openpolicyagent/opa:0.24.0-envoy-5
image: openpolicyagent/opa:0.50.2-envoy
imagePullPolicy: IfNotPresent
ports:
- name: opa-envoy
@ -64,15 +95,15 @@ We use the `openpolicyagent/opa:0.24.0-envoy-5` image which extends OPA with a g
- name: opa-api-port
containerPort: 8181
protocol: TCP
args:
- "run"
- "--server"
- "--config-file=/run/opa/opa-config.yaml"
- "/run/opa/opa-policy.rego"
volumeMounts:
- name: backend-opa-policy
mountPath: /run/opa
readOnly: true
args:
- "run"
- "--server"
- "--config-file=/run/opa/opa-config.yaml"
- "/run/opa/opa-policy.rego"
volumeMounts:
- name: backend-opa-policy
mountPath: /run/opa
readOnly: true
```
The ConfigMap `backend-opa-policy` needs to be added into the `volumes` section, like this:
@ -171,11 +202,12 @@ Envoy needs to know how to contact the OPA Agent just configured to perform the
```console
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
with_request_body:
max_request_bytes: 8192
allow_partial_message: true
failure_mode_allow: false
transport_api_version: V3
grpc_service:
google_grpc:
target_uri: 127.0.0.1:8182
@ -217,13 +249,13 @@ The first test will demonstrate how a request that satisfies the policy allows f
$ kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
backend-envoy ClusterIP None <none> 9001/TCP 6m53s
frontend LoadBalancer 10.8.14.117 35.222.164.221 3000:32586/TCP 6m52s
frontend-2 LoadBalancer 10.8.7.57 35.222.190.182 3002:32056/TCP 6m53s
kubernetes ClusterIP 10.8.0.1 <none> 443/TCP 59m
backend-envoy ClusterIP None <none> 9001/TCP 5m56s
frontend LoadBalancer 10.96.194.108 172.18.255.200 3000:30824/TCP 5m56s
frontend-2 LoadBalancer 10.96.61.216 172.18.255.201 3002:31960/TCP 5m56s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 14m
```
The `frontend` service will be available at the `EXTERNAL-IP` value and port `3000`, which was configured for our container. In the sample output shown above, the URL to navigate to is `http://35.222.164.221:3000`. Open your browser and navigate to the IP address shown for `frontend` in your environment, adding the port `:3000`. Once the page is loaded, you'll see the account details for user _Jacob Marley_.
The `frontend` service will be available at the `EXTERNAL-IP` value and port `3000`, which was configured for our container. In the sample output shown above, the URL to navigate to is `http://172.18.255.200:3000`. Open your browser and navigate to the IP address shown for `frontend` in your environment, adding the port `:3000`. Once the page is loaded, you'll see the account details for user _Jacob Marley_.
![Frontend][frontend-view]
@ -285,7 +317,7 @@ Note the presence of the `authorization` header containing the JWT. As explained
## Testing Invalid Requests
On the other hand, when you connect to the URL for the `frontend-2` service (e.g. `http://35.222.190.182:3002`), the browser only displays the title without any account details. This is because the SPIFFE ID of the `frontend-2` service (`spiffe://example.org/ns/default/sa/default/frontend-2`) does not satisfy the policy for the OPA Agent.
On the other hand, when you connect to the URL for the `frontend-2` service (e.g. `http://172.18.255.201:3002`), the browser only displays the title without any account details. This is because the SPIFFE ID of the `frontend-2` service (`spiffe://example.org/ns/default/sa/default/frontend-2`) does not satisfy the policy for the OPA Agent.
![Frontend-2-no-details][frontend-2-view-no-details]
@ -315,13 +347,13 @@ svc_spiffe_id == "spiffe://example.org/ns/default/sa/default/frontend-2"
Save the changes and exit. The `backend-update-policy.sh` script resumes. The script applies new version of the ConfigMap and then restarts the `backend` pod to pick up the new rule.
Wait some seconds for the deployment to propagate before trying to view the `frontend-2` service in your browser again.
Once the pod is ready, refresh the browser using the correct URL for the `frontend-2` service (e.g. `http://35.222.190.182:3002`). As a result, now the page shows the account details for user _Alex Fergus_.
Once the pod is ready, refresh the browser using the correct URL for the `frontend-2` service (e.g. `http://172.18.255.201:3002`). As a result, now the page shows the account details for user _Alex Fergus_.
![Frontend-2][frontend-2-view]
[frontend-2-view]: images/frontend-2_view.png "Frontend-2 view"
On the other hand, if you now connect to the URL for the `frontend` service (e.g. `http://35.222.164.221:3000`), the browser only displays the title without any account details. This is the expected behaviour as the policy was updated and now the SPIFFE ID of the `frontend` service does not satisfy the policy anymore.
On the other hand, if you now connect to the URL for the `frontend` service (e.g. `http://172.18.255.200:3000`), the browser only displays the title without any account details. This is the expected behaviour as the policy was updated and now the SPIFFE ID of the `frontend` service does not satisfy the policy anymore.
# Cleanup

View File

@ -18,7 +18,7 @@ spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: IfNotPresent
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml"]
ports:
@ -31,7 +31,7 @@ spec:
mountPath: /run/spire/sockets
readOnly: true
- name: auth-helper
image: us.gcr.io/scytale-registry/envoy-jwt-auth-helper@sha256:e55ce5fd42f13c5475b285a848c5f83bc4a335a93d7f934d3ac884920045fc96
image: envoy-jwt-auth-helper:latest
imagePullPolicy: IfNotPresent
args: ["-config", "/run/envoy-jwt-auth-helper/config/envoy-jwt-auth-helper.conf"]
ports:
@ -58,7 +58,7 @@ spec:
mountPath: "/usr/share/nginx/html/transactions"
readOnly: true
- name: opa
image: openpolicyagent/opa:0.24.0-envoy-5
image: openpolicyagent/opa:0.50.2-envoy
imagePullPolicy: IfNotPresent
ports:
- name: opa-envoy

View File

@ -10,15 +10,16 @@ static_resources:
port_value: 9001
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/inbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -34,71 +35,101 @@ static_resources:
http_filters:
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
transport_api_version: V3
grpc_service:
envoy_grpc:
cluster_name: ext-authz
timeout: 0.5s
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
with_request_body:
max_request_bytes: 8192
allow_partial_message: true
failure_mode_allow: false
transport_api_version: V3
grpc_service:
google_grpc:
target_uri: 127.0.0.1:8182
stat_prefix: ext_authz
timeout: 0.5s
- name: envoy.router
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
verify_subject_alt_name:
- "spiffe://example.org/ns/default/sa/default/frontend"
- "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: local_service
connect_timeout: 1s
type: strict_dns
hosts:
- socket_address:
address: 127.0.0.1
port_value: 80
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 80
- name: ext-authz
connect_timeout: 1s
type: strict_dns
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 9010
load_assignment:
cluster_name: ext-authz
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 9010

0
k8s/envoy-jwt-opa/scripts/backend-opa-logs.sh Normal file → Executable file
View File

0
k8s/envoy-jwt-opa/scripts/backend-update-policy.sh Normal file → Executable file
View File

View File

@ -0,0 +1,26 @@
#/bin/bash
#/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
EXAMPLEDIR="$(dirname "$DIR")"
K8SDIR="$(dirname "$EXAMPLEDIR")"
DOCKER_IMAGE="envoy-jwt-auth-helper"
SERVICE_VERSION="1.0.0"
echo "Building ${DOCKER_IMAGE}"
(cd $K8SDIR/envoy-jwt-auth-helper; docker build --no-cache --tag ${DOCKER_IMAGE} .)
case $1 in
"minikube")
echo "Loading image into minikube"
minikube image load $DOCKER_IMAGE:latest;;
"kind")
echo "Load image into kind"
kind load docker-image $DOCKER_IMAGE:latest;;
*)
echo "Image builded successfully";;
esac

0
k8s/envoy-jwt-opa/scripts/clean-env.sh Normal file → Executable file
View File

0
k8s/envoy-jwt-opa/scripts/pre-set-env.sh Normal file → Executable file
View File

0
k8s/envoy-jwt-opa/scripts/set-env.sh Normal file → Executable file
View File

View File

@ -19,6 +19,9 @@ trap clean-env EXIT
echo "${bb}Preparing environment...${nm}"
clean-env
# Build helper image
bash "${DIR}"/scripts/build-helper.sh minikube
# Creates Envoy JWT OPA scenario
bash "${DIR}"/scripts/set-env.sh

View File

@ -4,7 +4,7 @@
This tutorial builds on the [SPIRE Envoy-X.509 Tutorial](../envoy-x509/) to demonstrate how to use SPIRE to perform JWT SVID authentication on a workload's behalf instead of X.509 SVID authentication. The changes required to implement JWT SVID authentication are shown here as a delta to that tutorial, so you should run, or at least read through, the X.509 tutorial first.
To illustrate JWT authentication, we add sidecars to each of the services used in the Envoy X.509 tutorial. Each sidecar acts as an [external authorization filter](https://www.envoyproxy.io/docs/envoy/v1.14.1/intro/arch_overview/security/ext_authz_filter#arch-overview-ext-authz) for Envoy.
To illustrate JWT authentication, we add sidecars to each of the services used in the Envoy X.509 tutorial. Each sidecar acts as an [external authorization filter](https://www.envoyproxy.io/docs/envoy/v1.25.1/intro/arch_overview/security/ext_authz_filter#arch-overview-ext-authz) for Envoy.
![SPIRE Envoy integration diagram][diagram]
@ -24,6 +24,39 @@ In this tutorial you will learn how to:
# Prerequisites
## External IP support
This tutorial requires a LoadBalancer that can assign an external IP (e.g., [metallb](https://metallb.universe.tf/))
```console
$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml
```
Wait until metallb has started
```console
$ kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=app=metallb \
--timeout=90s
```
Apply metallb configuration
```console
$ kubectl apply -f ../envoy-x509/metallb-config.yaml
```
## Auth helper image
An External Authorization filter is implemented using [Envoy-jwt-auth-helper](../envoy-jwt-auth-helper),
An script is provided to facilitate building and import using `kind` or `minikube`
``` console
$ bash ./scripts/build-helper.sh kind
```
## Previous SPIRE instalation
Before proceeding, review the following:
* You'll need access to the Kubernetes environment configured when going through the [SPIRE Envoy-X.509 Tutorial](../envoy-x509/README.md). Optionally, you can create the Kubernetes environment with the `pre-set-env.sh` script described just below.
@ -38,7 +71,6 @@ $ bash scripts/pre-set-env.sh
The script will create all the resources needed for the SPIRE Server and SPIRE Agent to be available in the cluster and then will create all the resources for the SPIRE Envoy X.509 tutorial, which is the base scenario for this SPIRE Envoy JWT Tutorial.
# Part 1: Deploy Updated and New Resources
Assuming the SPIRE Envoy X.509 Tutorial as a starting point, there are some resources that need to be updated and others must be created.
@ -97,7 +129,7 @@ This new `auth-helper` service must be added as a sidecar and must be configured
```console
- name: auth-helper
image: envoy-jwt-auth-helper:1.0.0
image: envoy-jwt-auth-helper:latest
imagePullPolicy: IfNotPresent
args: ["-config", "/run/envoy-jwt-auth-helper/config/envoy-jwt-auth-helper.conf"]
ports:
@ -125,12 +157,14 @@ Next, this setup requires an External Authorization Filter in the Envoy configur
```console
http_filters:
- name: envoy.ext_authz
config:
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
transport_api_version: V3
grpc_service:
envoy_grpc:
cluster_name: ext-authz
timeout: 0.5s
envoy_grpc:
cluster_name: ext-authz
timeout: 0.5s
```
Heres the corresponding cluster configuration for the External Authorization Filter:
@ -140,10 +174,15 @@ Heres the corresponding cluster configuration for the External Authorization
connect_timeout: 1s
type: strict_dns
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 9010
load_assignment:
cluster_name: ext-authz
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 9010
```
@ -219,19 +258,19 @@ The first set of testing will demonstrate how valid JWT-SVIDs allow for the disp
$ kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
backend-envoy ClusterIP None <none> 9001/TCP 6m53s
frontend LoadBalancer 10.8.14.117 35.222.164.221 3000:32586/TCP 6m52s
frontend-2 LoadBalancer 10.8.7.57 35.222.190.182 3002:32056/TCP 6m53s
kubernetes ClusterIP 10.8.0.1 <none> 443/TCP 59m
backend-envoy ClusterIP None <none> 9001/TCP 10m
frontend LoadBalancer 10.96.226.176 172.18.255.200 3000:32314/TCP 10m
frontend-2 LoadBalancer 10.96.33.198 172.18.255.201 3002:31797/TCP 10m
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 55m
```
The `frontend` service will be available at the `EXTERNAL-IP` value and port `3000`, which was configured for our container. In the sample output shown above, the URL to navigate is `http://35.222.164.221:3000`. Open your browser and navigate to the IP address shown for `frontend` in your environment, adding the port `:3000`. Once the page is loaded, you'll see the account details for user _Jacob Marley_.
The `frontend` service will be available at the `EXTERNAL-IP` value and port `3000`, which was configured for our container. In the sample output shown above, the URL to navigate is `http://172.18.255.200:3000`. Open your browser and navigate to the IP address shown for `frontend` in your environment, adding the port `:3000`. Once the page is loaded, you'll see the account details for user _Jacob Marley_.
![Frontend][frontend-view]
[frontend-view]: images/frontend_view.png "Frontend view"
On the other hand, when you connect to the URL for the `frontend-2` service (e.g. `http://35.222.190.182:3002`), the browser only displays the title without any account details. This is because the `frontend-2` service was not updated to include a JWT token in the request. The lack of a valid token on the request makes the Envoy instance in front of the `backend` reject it.
On the other hand, when you connect to the URL for the `frontend-2` service (e.g. `http://172.18.255.201:3002`), the browser only displays the title without any account details. This is because the `frontend-2` service was not updated to include a JWT token in the request. The lack of a valid token on the request makes the Envoy instance in front of the `backend` reject it.
![Frontend-2-no-details][frontend-2-view-no-details]

0
k8s/envoy-jwt/create-registration-entries.sh Normal file → Executable file
View File

View File

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: IfNotPresent
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml"]
ports:
@ -28,7 +28,7 @@ spec:
mountPath: /run/spire/sockets
readOnly: true
- name: auth-helper
image: us.gcr.io/scytale-registry/envoy-jwt-auth-helper@sha256:e55ce5fd42f13c5475b285a848c5f83bc4a335a93d7f934d3ac884920045fc96
image: envoy-jwt-auth-helper:latest
imagePullPolicy: IfNotPresent
args: ["-config", "/run/envoy-jwt-auth-helper/config/envoy-jwt-auth-helper.conf"]
ports:

View File

@ -10,15 +10,16 @@ static_resources:
port_value: 9001
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/inbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -32,60 +33,90 @@ static_resources:
route:
cluster: local_service
http_filters:
- name: envoy.ext_authz
config:
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
transport_api_version: V3
grpc_service:
envoy_grpc:
cluster_name: ext-authz
timeout: 0.5s
- name: envoy.router
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_subject_alt_names:
- exact: "spiffe://example.org/ns/default/sa/default/frontend"
- exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: local_service
connect_timeout: 1s
type: strict_dns
hosts:
- socket_address:
address: 127.0.0.1
port_value: 80
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 80
- name: ext-authz
connect_timeout: 1s
type: strict_dns
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 9010
load_assignment:
cluster_name: ext-authz
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 9010

View File

@ -2,7 +2,11 @@ node:
id: "frontend-2"
cluster: "demo-cluster-spire"
admin:
access_log_path: /tmp/admin_access0.log
access_log:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/admin_access0.log"
address:
socket_address:
protocol: TCP
@ -17,15 +21,16 @@ static_resources:
port_value: 3003
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/outbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -39,59 +44,88 @@ static_resources:
route:
cluster: backend
http_filters:
- name: envoy.ext_authz
config:
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
grpc_service:
envoy_grpc:
cluster_name: ext-authz
timeout: 0.5s
- name: envoy.router
transport_api_version: V3
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: ext-authz
connect_timeout: 1s
type: strict_dns
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 9012
load_assignment:
cluster_name: ext-authz
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 9012
- name: backend
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
hosts:
- socket_address:
address: backend-envoy
port_value: 9001
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
load_assignment:
cluster_name: backend
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: backend-envoy
port_value: 9001
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/frontend-2"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_subject_alt_names:
- exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384

View File

View File

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: Always
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml", "--base-id", "2"]
volumeMounts:
@ -26,7 +26,7 @@ spec:
mountPath: /run/spire/sockets
readOnly: true
- name: auth-helper
image: us.gcr.io/scytale-registry/envoy-jwt-auth-helper@sha256:e55ce5fd42f13c5475b285a848c5f83bc4a335a93d7f934d3ac884920045fc96
image: envoy-jwt-auth-helper:latest
imagePullPolicy: IfNotPresent
args: ["-config", "/run/envoy-jwt-auth-helper/config/envoy-jwt-auth-helper.conf"]
ports:

View File

@ -2,7 +2,11 @@ node:
id: "frontend"
cluster: "demo-cluster-spire"
admin:
access_log_path: /tmp/admin_access0.log
access_log:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/admin_access0.log"
address:
socket_address:
protocol: TCP
@ -17,15 +21,16 @@ static_resources:
port_value: 3001
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/outbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -39,59 +44,88 @@ static_resources:
route:
cluster: backend
http_filters:
- name: envoy.ext_authz
config:
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
grpc_service:
envoy_grpc:
cluster_name: ext-authz
timeout: 0.5s
- name: envoy.router
transport_api_version: V3
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: ext-authz
connect_timeout: 1s
type: strict_dns
http2_protocol_options: {}
hosts:
- socket_address:
address: 127.0.0.1
port_value: 9011
load_assignment:
cluster_name: ext-authz
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 9011
- name: backend
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
hosts:
- socket_address:
address: backend-envoy
port_value: 9001
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
load_assignment:
cluster_name: backend
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: backend-envoy
port_value: 9001
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/frontend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_subject_alt_names:
- exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384

View File

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: Always
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml", "--base-id", "1"]
volumeMounts:
@ -26,7 +26,7 @@ spec:
mountPath: /run/spire/sockets
readOnly: true
- name: auth-helper
image: us.gcr.io/scytale-registry/envoy-jwt-auth-helper@sha256:e55ce5fd42f13c5475b285a848c5f83bc4a335a93d7f934d3ac884920045fc96
image: envoy-jwt-auth-helper:latest
imagePullPolicy: IfNotPresent
args: ["-config", "/run/envoy-jwt-auth-helper/config/envoy-jwt-auth-helper.conf"]
ports:

View File

@ -0,0 +1,26 @@
#/bin/bash
#/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
EXAMPLEDIR="$(dirname "$DIR")"
K8SDIR="$(dirname "$EXAMPLEDIR")"
DOCKER_IMAGE="envoy-jwt-auth-helper"
SERVICE_VERSION="1.0.0"
echo "Building ${DOCKER_IMAGE}"
(cd $K8SDIR/envoy-jwt-auth-helper; docker build --no-cache --tag ${DOCKER_IMAGE} .)
case $1 in
"minikube")
echo "Loading image into minikube"
minikube image load $DOCKER_IMAGE:latest;;
"kind")
echo "Load image into kind"
kind load docker-image $DOCKER_IMAGE:latest;;
*)
echo "Image builded successfully";;
esac

0
k8s/envoy-jwt/scripts/clean-env.sh Normal file → Executable file
View File

0
k8s/envoy-jwt/scripts/pre-set-env.sh Normal file → Executable file
View File

0
k8s/envoy-jwt/scripts/set-env.sh Normal file → Executable file
View File

View File

@ -19,6 +19,9 @@ trap clean-env EXIT
echo "${bb}Preparing environment...${nm}"
clean-env
# Build helper image
bash "${DIR}"/scripts/build-helper.sh minikube
# Creates Envoy JWT scenario
bash "${DIR}"/scripts/set-env.sh

View File

@ -38,6 +38,27 @@ The script will create all the resources needed for the SPIRE Server and SPIRE A
**Note:** The configuration changes needed to enable Envoy and OPA to work with SPIRE are shown as snippets in this tutorial. However, all of these settings have already been configured. You don't have to edit any configuration files.
## External IP support
This tutorial requires a LoadBalancer that can assign an external IP (e.g., [metallb](https://metallb.universe.tf/))
```console
$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml
```
Wait until metallb has started
```console
$ kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=app=metallb \
--timeout=90s
```
Apply metallb configuration
```console
$ kubectl apply -f ../envoy-x509/metallb-config.yaml
```
# Part 1: Deploy Updated and New Resources
@ -51,26 +72,26 @@ The new container is added and configured as follows in [`backend-deployment.yam
```console
- name: opa
image: openpolicyagent/opa:0.24.0-envoy-5
imagePullPolicy: Always
ports:
- name: opa-envoy
image: openpolicyagent/opa:0.50.2-envoy
imagePullPolicy: IfNotPresent
ports:
- name: opa-envoy
containerPort: 8182
protocol: TCP
- name: opa-api-port
- name: opa-api-port
containerPort: 8181
protocol: TCP
args:
- "run"
- "--server"
- "--config-file=/run/opa/opa-config.yaml"
- "/run/opa/opa-policy.rego"
volumeMounts:
- name: backend-opa-policy
args:
- "run"
- "--server"
- "--config-file=/run/opa/opa-config.yaml"
- "/run/opa/opa-policy.rego"
volumeMounts:
- name: backend-opa-policy
mountPath: /run/opa
readOnly: true
```
One thing to note is the use of the `openpolicyagent/opa:0.24.0-envoy-5` image. This image extends OPA with a gRPC server that implements the Envoy External Authorization API so OPA can communicate policy decisions with Envoy.
One thing to note is the use of the `openpolicyagent/opa:0.50.2-envoy` image. This image extends OPA with a gRPC server that implements the Envoy External Authorization API so OPA can communicate policy decisions with Envoy.
The ConfigMap `backend-opa-policy` needs to be added into the `volumes` section, like this:
@ -192,7 +213,8 @@ Finally, this setup requires an External Authorization Filter that connects to t
```console
- name: envoy.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
transport_api_version: V3
with_request_body:
max_request_bytes: 8192
allow_partial_message: true

View File

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: Always
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml"]
ports:
@ -42,7 +42,7 @@ spec:
mountPath: "/usr/share/nginx/html/transactions"
readOnly: true
- name: opa
image: openpolicyagent/opa:0.24.0-envoy-5
image: openpolicyagent/opa:0.50.2-envoy
imagePullPolicy: IfNotPresent
ports:
- name: opa-envoy

View File

@ -10,9 +10,9 @@ static_resources:
port_value: 9001
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
forward_client_cert_details: sanitize_set
@ -20,8 +20,9 @@ static_resources:
uri: true
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/inbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -35,9 +36,10 @@ static_resources:
route:
cluster: local_service
http_filters:
- name: envoy.ext_authz
- name: envoy.filters.http.ext_authz
typed_config:
"@type": type.googleapis.com/envoy.config.filter.http.ext_authz.v2.ExtAuthz
"@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz
transport_api_version: V3
with_request_body:
max_request_bytes: 8192
allow_partial_message: true
@ -47,46 +49,69 @@ static_resources:
target_uri: 127.0.0.1:8182
stat_prefix: ext_authz
timeout: 0.5s
- name: envoy.router
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_subject_alt_names:
- exact: "spiffe://example.org/ns/default/sa/default/frontend"
- exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: local_service
connect_timeout: 1s
type: strict_dns
hosts:
- socket_address:
address: 127.0.0.1
port_value: 80
load_assignment:
cluster_name: ext-authz
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 80

0
k8s/envoy-opa/scripts/backend-opa-logs.sh Normal file → Executable file
View File

0
k8s/envoy-opa/scripts/backend-update-policy.sh Normal file → Executable file
View File

0
k8s/envoy-opa/scripts/clean-env.sh Normal file → Executable file
View File

0
k8s/envoy-opa/scripts/pre-set-env.sh Normal file → Executable file
View File

0
k8s/envoy-opa/scripts/set-env.sh Normal file → Executable file
View File

View File

@ -35,6 +35,27 @@ $ bash scripts/pre-set-env.sh
The script will create all the resources needed for the SPIRE Server and SPIRE Agent to be available in the cluster.
## External IP support
This tutorial requires a LoadBalancer that can assign an external IP (e.g., [metallb](https://metallb.universe.tf/))
```console
$ kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml
```
Wait until metallb has started
```console
$ kubectl wait --namespace metallb-system \
--for=condition=ready pod \
--selector=app=metallb \
--timeout=90s
```
Apply metallb configuration
```console
$ kubectl apply -f metallb-config.yaml
```
# Envoy SDS Support
@ -89,9 +110,14 @@ clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
```
### TLS Certificates
@ -100,31 +126,42 @@ To obtain a TLS certificate and private key from SPIRE, you set up an SDS config
Furthermore SPIRE provides a validation context per trust domain that Envoy uses to verify peer certificates.
```console
tls_context:
common_tls_context:
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
api_config_source:
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_subject_alt_names:
- "spiffe://example.org/ns/default/sa/default/frontend"
- "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
```
Similar configurations are set on both frontend services to establish an mTLS communication. Check the configuration of the cluster named `backend` in `k8s/frontend/config/envoy.yaml` and `k8s/frontend-2/config/envoy.yaml`.
@ -205,7 +242,7 @@ Following the same steps, when you connect to the URL for the `frontend-2` servi
## Update the TLS Configuration So Only One Frontend Can Access the Backend
The Envoy configuration for the `backend` service uses the TLS configuration to filter incoming connections by validating the Subject Alternative Name (SAN) of the certificate presented on the TLS connection. For SVIDs, the SAN field of the certificate is set with the SPIFFE ID associated with the service. So by specifying the SPIFFE IDs in the `match_subject_alt_names` filter we indicate to Envoy which services can establish a connection.
The Envoy configuration for the `backend` service uses the TLS configuration to filter incoming connections by validating the Subject Alternative Name (SAN) of the certificate presented on the TLS connection. For SVIDs, the SAN field of the certificate is set with the SPIFFE ID associated with the service. So by specifying the SPIFFE IDs in the `match_typed_subject_alt_names` filter we indicate to Envoy which services can establish a connection.
Let's now update the Envoy configuration for the `backend` service to allow requests from the `frontend` service only. This is achieved by removing the SPIFFE ID of the `frontend-2` service from the `combined_validation_context` section at the [Envoy configuration](k8s/backend/config/envoy.yaml#L49). The updated configuration looks like this:
@ -213,9 +250,10 @@ Let's now update the Envoy configuration for the `backend` service to allow requ
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_subject_alt_names:
- exact: "spiffe://example.org/ns/default/sa/default/frontend"
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
```
## Apply the New Configuration for Envoy
@ -256,7 +294,8 @@ The following snippet can be added to the Envoy configuration for the `backend`
```console
- name: envoy.filters.http.rbac
config:
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC
rules:
action: ALLOW
policies:

View File

@ -0,0 +1,124 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: backend-envoy
data:
envoy.yaml: |
node:
id: "backend"
cluster: "demo-cluster-spire"
static_resources:
listeners:
- name: local_service
address:
socket_address:
address: 0.0.0.0
port_value: 9001
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
forward_client_cert_details: sanitize_set
set_current_client_cert_details:
uri: true
codec_type: auto
access_log:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/inbound-proxy.log"
stat_prefix: ingress_http
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: local_service
http_filters:
- name: envoy.filters.http.rbac
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBAC
rules:
action: ALLOW
policies:
"general-rules":
permissions:
- and_rules:
rules:
- header: { name: ":method", exact_match: "GET" }
- url_path:
path: { prefix: "/profiles" }
principals:
- authenticated:
principal_name:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: local_service
connect_timeout: 1s
type: strict_dns
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 80

View File

@ -16,9 +16,9 @@ data:
port_value: 9001
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
forward_client_cert_details: sanitize_set
@ -26,8 +26,9 @@ data:
uri: true
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/inbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -41,45 +42,66 @@ data:
route:
cluster: local_service
http_filters:
- name: envoy.router
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_subject_alt_names:
- exact: "spiffe://example.org/ns/default/sa/default/frontend"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: local_service
connect_timeout: 1s
type: strict_dns
hosts:
- socket_address:
address: 127.0.0.1
port_value: 80
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 80

0
k8s/envoy-x509/create-registration-entries.sh Normal file → Executable file
View File

View File

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: Always
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml"]
ports:

View File

@ -10,9 +10,9 @@ static_resources:
port_value: 9001
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
forward_client_cert_details: sanitize_set
@ -20,8 +20,9 @@ static_resources:
uri: true
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/inbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -35,46 +36,69 @@ static_resources:
route:
cluster: local_service
http_filters:
- name: envoy.router
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_subject_alt_names:
- exact: "spiffe://example.org/ns/default/sa/default/frontend"
- exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/backend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of incoming clients (optionally)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend"
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/frontend-2"
# obtain the trust bundle from SDS
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: local_service
connect_timeout: 1s
type: strict_dns
hosts:
- socket_address:
address: 127.0.0.1
port_value: 80
load_assignment:
cluster_name: local_service
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: 127.0.0.1
port_value: 80

View File

@ -2,7 +2,11 @@ node:
id: "frontend-2"
cluster: "demo-cluster-spire"
admin:
access_log_path: /tmp/admin_access0.log
access_log:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/admin_access0.log"
address:
socket_address:
protocol: TCP
@ -17,15 +21,16 @@ static_resources:
port_value: 3003
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/outbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -39,45 +44,66 @@ static_resources:
route:
cluster: backend
http_filters:
- name: envoy.router
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: backend
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
hosts:
- socket_address:
address: backend-envoy
port_value: 9001
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
load_assignment:
cluster_name: backend
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: backend-envoy
port_value: 9001
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/frontend-2"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_subject_alt_names:
exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384

View File

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: Always
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml", "--base-id", "2"]
volumeMounts:
@ -44,4 +44,4 @@ spec:
type: DirectoryOrCreate
- name: symbank-webapp-2-config
configMap:
name: symbank-webapp-2-config
name: symbank-webapp-2-config

View File

@ -2,7 +2,11 @@ node:
id: "frontend"
cluster: "demo-cluster-spire"
admin:
access_log_path: /tmp/admin_access0.log
access_log:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/admin_access0.log"
address:
socket_address:
protocol: TCP
@ -17,15 +21,16 @@ static_resources:
port_value: 3001
filter_chains:
- filters:
- name: envoy.http_connection_manager
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
common_http_protocol_options:
idle_timeout: 1s
codec_type: auto
access_log:
- name: envoy.file_access_log
config:
- name: envoy.access_loggers.file
typed_config:
"@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog
path: "/tmp/outbound-proxy.log"
stat_prefix: ingress_http
route_config:
@ -39,45 +44,66 @@ static_resources:
route:
cluster: backend
http_filters:
- name: envoy.router
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: spire_agent
connect_timeout: 0.25s
http2_protocol_options: {}
hosts:
- pipe:
path: /run/spire/sockets/agent.sock
load_assignment:
cluster_name: spire_agent
endpoints:
- lb_endpoints:
- endpoint:
address:
pipe:
path: /run/spire/sockets/agent.sock
- name: backend
connect_timeout: 0.25s
type: strict_dns
lb_policy: ROUND_ROBIN
hosts:
- socket_address:
address: backend-envoy
port_value: 9001
tls_context:
common_tls_context:
tls_certificate_sds_secret_configs:
load_assignment:
cluster_name: ext-authz
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: backend-envoy
port_value: 9001
transport_socket:
name: envoy.transport_sockets.tls
typed_config:
"@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
common_tls_context:
tls_certificate_sds_secret_configs:
- name: "spiffe://example.org/ns/default/sa/default/frontend"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_subject_alt_names:
exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
api_config_source:
api_type: GRPC
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384
combined_validation_context:
# validate the SPIFFE ID of the server (recommended)
default_validation_context:
match_typed_subject_alt_names:
- san_type: URI
matcher:
exact: "spiffe://example.org/ns/default/sa/default/backend"
validation_context_sds_secret_config:
name: "spiffe://example.org"
sds_config:
resource_api_version: V3
api_config_source:
api_type: GRPC
transport_api_version: V3
grpc_services:
envoy_grpc:
cluster_name: spire_agent
tls_params:
ecdh_curves:
- X25519:P-256:P-521:P-384

View File

@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: envoy
image: envoyproxy/envoy-alpine:v1.14.1
image: envoyproxy/envoy:v1.25.1
imagePullPolicy: Always
args: ["-l", "debug", "--local-address-ip-version", "v4", "-c", "/run/envoy/envoy.yaml", "--base-id", "1"]
volumeMounts:
@ -44,4 +44,4 @@ spec:
type: DirectoryOrCreate
- name: symbank-webapp-config
configMap:
name: symbank-webapp-config
name: symbank-webapp-config

View File

@ -0,0 +1,12 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: sandbox
namespace: metallb-system
spec:
addresses:
# On the other hand, the sandbox environment uses private IP space,
# which is free and plentiful. We give this address pool a ton of IPs,
# so that developers can spin up as many sandboxes as they need.
- 192.168.144.0/20

View File

@ -0,0 +1,14 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: example
namespace: metallb-system
spec:
addresses:
- 172.18.255.200-172.18.255.250
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: empty
namespace: metallb-system

0
k8s/envoy-x509/scripts/clean-env.sh Normal file → Executable file
View File

0
k8s/envoy-x509/scripts/pre-set-env.sh Normal file → Executable file
View File

0
k8s/envoy-x509/scripts/set-env.sh Normal file → Executable file
View File